Ejemplo n.º 1
0
def prepare_data():
    from os.path import join as pjoin
    from commontool.io.io import CiftiReader, save2nifti

    hemi = 'lh'
    brain_structure = {
        'lh': 'CIFTI_STRUCTURE_CORTEX_LEFT',
        'rh': 'CIFTI_STRUCTURE_CORTEX_RIGHT'
    }
    proj_dir = '/nfs/s2/userhome/chenxiayu/workingdir/study/FFA_pattern'
    data_dir = pjoin(proj_dir, 'data/HCP')
    trg_dir = pjoin(proj_dir, f'analysis/s2_rh')
    trg_file = pjoin(trg_dir, f'curvature_{hemi}.nii.gz')
    subj_id_file = pjoin(trg_dir, 'subject_id')
    subj_id_all_file = pjoin(data_dir, 'structure/subject_id_curv')
    data_file = pjoin(
        data_dir, 'structure/S1200.All.curvature_MSMAll.32k_fs_LR.dscalar.nii')

    subj_ids = open(subj_id_file).read().splitlines()
    subj_ids_all = open(subj_id_all_file).read().splitlines()
    data = CiftiReader(data_file).get_data(brain_structure[hemi], True)

    indices = []
    for i in subj_ids:
        indices.append(subj_ids_all.index(i))

    data_new = data[indices]
    data_new = data_new.T[:, None, None, :]
    save2nifti(trg_file, data_new)
Ejemplo n.º 2
0
def rois_stats():
    import numpy as np
    import nibabel as nib
    import pickle as pkl

    from os.path import join as pjoin
    from commontool.io.io import save2nifti

    roi_dir = '/nfs/s2/userhome/chenxiayu/workingdir/study/FFA_pattern/analysis/s2_rh/zscore/HAC_ward_euclidean/100clusters/activation/ROIs/v3'
    roi_file = pjoin(roi_dir, 'rois.nii.gz')
    roi2label_file = pjoin(roi_dir, 'roi2label.csv')
    group_label_file = '/nfs/s2/userhome/chenxiayu/workingdir/study/FFA_pattern/' \
                       'analysis/s2_rh/zscore/HAC_ward_euclidean/100clusters/group_labels'

    rois = nib.load(roi_file).get_data().squeeze().T
    group_labels_all = np.array(open(group_label_file).read().split(' '),
                                dtype=np.int)
    roi2label = dict()
    for line in open(roi2label_file).read().splitlines():
        k, v = line.split(',')
        roi2label[k] = int(v)

    # prepare rois information dict
    rois_info = dict()
    for roi in roi2label.keys():
        rois_info[roi] = dict()

    prob_maps = []
    for roi, label in roi2label.items():
        # get indices of groups which contain the roi
        indices = rois == label
        group_indices = np.any(indices, 1)

        # calculate the number of the valid groups
        n_group = np.sum(group_indices)
        rois_info[roi]['n_group'] = n_group

        # calculate the number of the subjects
        n_subj = 0
        group_labels = np.where(group_indices)[0] + 1
        for i in group_labels:
            n_subj += np.sum(group_labels_all == i)
        rois_info[roi]['n_subject'] = n_subj

        # calculate roi sizes for each valid group
        sizes = np.sum(indices[group_indices], 1)
        rois_info[roi]['sizes'] = sizes

        # calculate roi probability map among valid groups
        prob_map = np.mean(indices[group_indices], 0)
        prob_maps.append(prob_map)
    prob_maps = np.array(prob_maps).T[:, None, None, :]

    # save out
    pkl.dump(rois_info, open(pjoin(roi_dir, 'rois_info.pkl'), 'wb'))
    save2nifti(pjoin(roi_dir, 'prob_maps.nii.gz'), prob_maps)
def calc_mean_map(connect_files, items):
    connect_dir = os.path.dirname(connect_files)
    mean_dir = pjoin(connect_dir, 'mean')
    if not os.path.exists(mean_dir):
        os.makedirs(mean_dir)
    for item in items:
        connect_file = connect_files.format(item=item)
        data = np.atleast_2d(nib.load(connect_file).get_data())
        mean = np.mean(data, 0)

        out_name = os.path.basename(connect_file)
        save2nifti(pjoin(mean_dir, out_name), mean)
Ejemplo n.º 4
0
def gen_mean_structure():
    import os
    import numpy as np
    import nibabel as nib

    from os.path import join as pjoin
    from commontool.io.io import save2nifti

    # predefine some variates
    # -----------------------
    # predefine parameters
    n_clusters = [100]
    hemi = 'rh'

    # predefine paths
    proj_dir = '/nfs/s2/userhome/chenxiayu/workingdir/study/FFA_pattern'
    clustering_dir = pjoin(proj_dir, 'analysis/s2/lh')
    n_cluster_dirs = pjoin(clustering_dir,
                           'zscore/HAC_ward_euclidean/{}clusters')
    data_file = pjoin(proj_dir, 'analysis/s2/{}/curvature.nii.gz'.format(hemi))
    # -----------------------

    # get data
    data = nib.load(data_file).get_data().squeeze().T

    # analyze labels
    # --------------
    for n_cluster in n_clusters:
        # get clustering labels of subjects
        n_cluster_dir = n_cluster_dirs.format(n_cluster)
        group_labels_file = pjoin(n_cluster_dir, 'group_labels')
        with open(group_labels_file) as rf:
            group_labels = np.array(rf.read().split(' '), dtype=np.uint16)

        structure_dir = pjoin(n_cluster_dir, 'structure')
        if not os.path.exists(structure_dir):
            os.makedirs(structure_dir)

        mean_data = np.zeros((0, data.shape[1]))
        for label in np.unique(group_labels):
            # get subgroup data
            subgroup_data = np.atleast_2d(data[group_labels == label])
            subgroup_data_mean = np.mean(subgroup_data, 0)
            mean_data = np.r_[mean_data, np.atleast_2d(subgroup_data_mean)]

        # output
        save2nifti(pjoin(structure_dir, f'mean_curv_{hemi}.nii.gz'),
                   mean_data.T[:, None, None, :])
        print('{}clusters: done'.format(n_cluster))
Ejemplo n.º 5
0
def save_mean_maps():
    import os
    import numpy as np

    from os.path import join as pjoin
    from commontool.io.io import CiftiReader, save2nifti

    # predefine parameters
    cluster_nums = [2]
    hemi = 'lh'
    structure_name = 'curv'
    project_dir = '/nfs/s2/userhome/chenxiayu/workingdir/study/FFA_clustering'
    src_file = pjoin(
        project_dir,
        'data/HCP_1080/S1200_1080_curvature_MSMAll_32k_fs_LR.dscalar.nii')
    cluster_num_dirs = pjoin(project_dir,
                             's2_25_zscore/HAC_ward_euclidean/{}clusters')

    brain_structure = hemi2structure[hemi]

    # prepare data
    reader = CiftiReader(src_file)
    maps = reader.get_data(brain_structure, True)

    for cluster_num in cluster_nums:
        # get clustering labels of subjects
        cluster_num_dir = cluster_num_dirs.format(cluster_num)
        group_labels_path = pjoin(cluster_num_dir, 'group_labels')
        with open(group_labels_path) as rf:
            group_labels = np.array(rf.read().split(' '), dtype=np.uint16)

        mean_maps = np.zeros((0, maps.shape[1]))
        for label in sorted(set(group_labels)):
            subgroup_maps = maps[group_labels == label]
            subgroup_maps_mean = np.atleast_2d(np.mean(subgroup_maps, 0))
            mean_maps = np.r_[mean_maps, subgroup_maps_mean]

        # output
        out_dir = pjoin(cluster_num_dir, 'structure')
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)
        save2nifti(
            pjoin(out_dir,
                  '{}_{}_mean_maps.nii.gz'.format(hemi, structure_name)),
            mean_maps)

        print('{}_{}clusters: done'.format(structure_name, cluster_num))
Ejemplo n.º 6
0
def get_mpm(hid=1, hemi='lh'):
    """maximal probability map"""
    import numpy as np
    import nibabel as nib
    from cxy_hcp_ffa.lib.predefine import roi2label
    from magicbox.io.io import save2nifti

    # inputs
    thr = 0.25
    map_indices = [1, 2]
    idx2roi = {
        0: 'IOG-face',
        1: 'pFus-face',
        2: 'mFus-face'}
    prob_file = pjoin(split_dir, f'prob_maps_half{hid}_{hemi}.nii.gz')

    # outputs
    out_file = pjoin(split_dir, f'MPM_half{hid}_{hemi}_FFA.nii.gz')

    # prepare
    prob_maps = nib.load(prob_file).get_fdata()[..., map_indices]
    mpm_map = np.zeros(prob_maps.shape[:3])

    # calculate
    supra_thr_idx_arr = prob_maps > thr
    valid_idx_arr = np.any(supra_thr_idx_arr, 3)
    valid_arr = prob_maps[valid_idx_arr, :]
    mpm_tmp = np.argmax(valid_arr, -1)
    for i, idx in enumerate(map_indices):
        roi = idx2roi[idx]
        idx_arr = np.zeros_like(mpm_map, dtype=np.bool8)
        idx_arr[valid_idx_arr] = mpm_tmp == i
        mpm_map[idx_arr] = roi2label[roi]

    # verification
    valid_supra_thr_idx_arr = supra_thr_idx_arr[valid_idx_arr, :]
    valid_count_vec = np.sum(valid_supra_thr_idx_arr, -1)
    valid_count_vec_uniq = np.zeros_like(valid_count_vec)
    for i in range(len(valid_count_vec)):
        valid_supra_thr_idx_vec = valid_supra_thr_idx_arr[i]
        valid_count_vec_uniq[i] = \
            len(set(valid_arr[i, valid_supra_thr_idx_vec]))
    assert np.all(valid_count_vec == valid_count_vec_uniq)

    # save
    save2nifti(out_file, mpm_map)
Ejemplo n.º 7
0
def get_mpm(gh_id=11, hemi='lh'):
    """maximal probability map"""
    import numpy as np
    import nibabel as nib
    from commontool.io.io import save2nifti

    thr = 0.25
    prob_map_file = pjoin(split_dir, f'prob_maps_GH{gh_id}_{hemi}.nii.gz')
    out_file = pjoin(split_dir, f'MPM_GH{gh_id}_{hemi}.nii.gz')

    prob_maps = nib.load(prob_map_file).get_data()
    supra_thr_idx_arr = prob_maps > thr
    prob_maps[~supra_thr_idx_arr] = 0
    mpm = np.argmax(prob_maps, 3)
    mpm[np.any(prob_maps, 3)] += 1

    # save
    save2nifti(out_file, mpm)
Ejemplo n.º 8
0
def roi_stats(gh_id=11, hemi='lh'):
    import numpy as np
    import nibabel as nib
    import pickle as pkl
    from cxy_hcp_ffa.lib.predefine import roi2label
    from commontool.io.io import save2nifti

    gh_id_file = pjoin(split_dir, f'half_id_{hemi}.npy')
    roi_file = pjoin(
        proj_dir, 'analysis/s2/1080_fROI/refined_with_Kevin/'
        f'rois_v3_{hemi}.nii.gz')
    info_trg_file = pjoin(split_dir, f'rois_info_GH{gh_id}_{hemi}.pkl')
    prob_trg_file = pjoin(split_dir, f'prob_maps_GH{gh_id}_{hemi}.nii.gz')

    gh_id_idx_vec = np.load(gh_id_file) == gh_id
    rois = nib.load(roi_file).get_data().squeeze().T[gh_id_idx_vec]

    # prepare rois information dict
    rois_info = dict()
    for roi in roi2label.keys():
        rois_info[roi] = dict()

    prob_maps = []
    for roi, label in roi2label.items():
        # get indices of subjects which contain the roi
        indices = rois == label
        subj_indices = np.any(indices, 1)

        # calculate the number of the valid subjects
        n_subject = np.sum(subj_indices)
        rois_info[roi]['n_subject'] = n_subject

        # calculate roi sizes for each valid subject
        sizes = np.sum(indices[subj_indices], 1)
        rois_info[roi]['sizes'] = sizes

        # calculate roi probability map among valid subjects
        prob_map = np.mean(indices[subj_indices], 0)
        prob_maps.append(prob_map)
    prob_maps = np.array(prob_maps)

    # save out
    pkl.dump(rois_info, open(info_trg_file, 'wb'))
    save2nifti(prob_trg_file, prob_maps.T[:, None, None, :])
def compare(maps1, maps2, output_nifti, output_text, mask=None, label_names=None, p_thr=1.0):
    assert maps1.shape[1] == maps2.shape[1]
    vtx_num = maps1.shape[1]
    if mask is None:
        label_ids = list(map(str, range(vtx_num)))
        trg_vertices_list = [[vtx] for vtx in range(vtx_num)]
    else:
        assert mask.ndim == 1
        assert vtx_num == mask.shape[0]
        label_ids = [int(label_id) for label_id in np.unique(mask) if label_id != 0]
        trg_vertices_list = [np.where(mask == i)[0] for i in label_ids]
        label_ids = list(map(str, label_ids))
    if label_names is None:
        label_names = label_ids.copy()
    else:
        assert len(label_ids) == len(label_names)
    label_ids.insert(0, 'label_id')
    label_names.insert(0, 'label_name')

    ts = ['t']
    ps = ['p']
    compare_map = np.zeros(vtx_num)
    for trg_vertices in trg_vertices_list:
        trg_data1 = np.mean(np.atleast_2d(maps1[:, trg_vertices]), 1)
        trg_data2 = np.mean(np.atleast_2d(maps2[:, trg_vertices]), 1)
        t, p = ttest_ind(trg_data1, trg_data2)
        ts.append(str(t))
        ps.append(str(p))
        compare_map[trg_vertices] = t if p < p_thr else 0

    if p_thr == 1:
        line1 = ','.join(label_ids)
        line2 = ','.join(label_names)
        line3 = ','.join(ts)
        line4 = ','.join(ps)
        lines = '\n'.join([line1, line2, line3, line4])
        open(output_text, 'w+').writelines(lines)
    save2nifti(output_nifti, compare_map)
Ejemplo n.º 10
0
                    zip(edges[:, 0], edges[:, 1], edge_data))
            elif w == 'unweighted':
                graph.add_edges_from(edges)
            else:
                raise RuntimeError("invalid method: {}".format(method))
            patches = get_patch_by_LV(graph)
        else:
            raise RuntimeError(
                "the method - {} is not supported at present!".format(method))

        for label, patch in enumerate(patches, 1):
            rFFA_patch_maps[idx, patch] = label
        patch_stat.append(str(len(patches)))
        patch_stat.extend([str(len(patch)) for patch in patches])
        rFFA_patch_stats.append(','.join(patch_stat))

        print('{}/{}'.format(idx + 1, rmaps.shape[0]))

    header = nib.Nifti2Header()
    header['descrip'] = 'FreeROI label'
    save2nifti(pjoin(patch_dir, 'lFFA_patch_maps.nii.gz'.format(threshold)),
               lFFA_patch_maps,
               header=header)
    save2nifti(pjoin(patch_dir, 'rFFA_patch_maps.nii.gz'.format(threshold)),
               rFFA_patch_maps,
               header=header)
    open(pjoin(patch_dir, 'lFFA_patch_stats'.format(threshold)),
         'w+').writelines('\n'.join(lFFA_patch_stats))
    open(pjoin(patch_dir, 'rFFA_patch_stats'.format(threshold)),
         'w+').writelines('\n'.join(rFFA_patch_stats))
Ejemplo n.º 11
0
        subject_ids_selected = rf.read().splitlines()

    for item in items:
        if item == 'activation':
            maps_file = pjoin(
                project_dir,
                'data/HCP_face-avg/s2/S1200.1080.FACE-AVG_level2_zstat_hp200_s2_MSMAll.dscalar.nii'
            )
        elif item == 'curvature':
            maps_file = pjoin(
                project_dir,
                'data/HCP_face-avg/S1200.1080.curvature_MSMAll.32k_fs_LR.dscalar.nii'
            )
        else:
            raise RuntimeError("{} is not supported at present!".format(item))

        reader = CiftiReader(maps_file)
        lmaps = reader.get_data('CIFTI_STRUCTURE_CORTEX_LEFT', True)
        rmaps = reader.get_data('CIFTI_STRUCTURE_CORTEX_RIGHT', True)

        lmaps_selected = []
        rmaps_selected = []
        for subject_id in subject_ids_selected:
            subject_idx = subject_ids.index(subject_id)
            lmaps_selected.append(lmaps[subject_idx])
            rmaps_selected.append(rmaps[subject_idx])
        save2nifti(out_file.format(hemi='lh', item=item),
                   np.array(lmaps_selected))
        save2nifti(out_file.format(hemi='rh', item=item),
                   np.array(rmaps_selected))
Ejemplo n.º 12
0
def gen_mean_activation():
    import os
    import numpy as np
    import nibabel as nib

    from os.path import join as pjoin
    from commontool.io.io import save2nifti, CiftiReader

    # predefine some variates
    # -----------------------
    # predefine parameters
    prob_thr = 1.65
    n_clusters = [100]
    hemi = 'rh'
    brain_structure = {
        'lh': 'CIFTI_STRUCTURE_CORTEX_LEFT',
        'rh': 'CIFTI_STRUCTURE_CORTEX_RIGHT'
    }
    stats_table_titles = [
        'label', '#subject', 'min', 'max', 'mean', 'min_roi', 'max_roi',
        'mean_roi'
    ]

    # predefine paths
    proj_dir = '/nfs/s2/userhome/chenxiayu/workingdir/study/FFA_pattern'
    clustering_dir = pjoin(proj_dir, 'analysis/s2/lh')
    n_cluster_dirs = pjoin(clustering_dir,
                           'zscore/HAC_ward_euclidean/{}clusters')
    roi_file = pjoin(proj_dir,
                     f'data/HCP/label/MMPprob_OFA_FFA_thr1_{hemi}.label')
    activ_file = pjoin(proj_dir, 'analysis/s2/activation.dscalar.nii')
    pattern_file = pjoin(proj_dir,
                         'analysis/s2/{}/zscore/roi_patterns.npy'.format(hemi))
    # -----------------------

    # get data
    roi = nib.freesurfer.read_label(roi_file)
    # activ = nib.load(activ_file).get_data().squeeze().T
    activ = CiftiReader(activ_file).get_data(brain_structure[hemi], True)
    patterns = np.load(pattern_file)

    # analyze labels
    # --------------
    for n_cluster in n_clusters:
        # get clustering labels of subjects
        n_cluster_dir = n_cluster_dirs.format(n_cluster)
        group_labels_file = pjoin(n_cluster_dir, 'group_labels')
        with open(group_labels_file) as rf:
            group_labels = np.array(rf.read().split(' '), dtype=np.uint16)

        activation_dir = pjoin(n_cluster_dir, 'activation')
        if not os.path.exists(activation_dir):
            os.makedirs(activation_dir)

        stats_table_content = dict()
        for title in stats_table_titles:
            # initialize statistics table content
            stats_table_content[title] = []

        mean_activ = np.zeros((0, activ.shape[1]))
        mean_std_activ = np.zeros((0, activ.shape[1]))
        prob_activ = np.zeros((0, activ.shape[1]))
        mean_patterns = np.zeros((0, activ.shape[1]))
        mean_std_patterns = np.zeros((0, activ.shape[1]))
        for label in sorted(set(group_labels)):
            # get subgroup data
            subgroup_activ = np.atleast_2d(activ[group_labels == label])
            subgroup_activ_mean = np.mean(subgroup_activ, 0)
            subgroup_activ_prob = np.mean(subgroup_activ > prob_thr, 0)
            subgroup_activ_mean_std = subgroup_activ_mean / np.std(
                subgroup_activ, 0)
            subgroup_activ_mean_std[np.isnan(subgroup_activ_mean_std)] = 0
            subgroup_roi_activ_mean = subgroup_activ_mean[roi]

            mean_activ = np.r_[mean_activ, np.atleast_2d(subgroup_activ_mean)]
            mean_std_activ = np.r_[mean_std_activ,
                                   np.atleast_2d(subgroup_activ_mean_std)]
            prob_activ = np.r_[prob_activ, np.atleast_2d(subgroup_activ_prob)]

            stats_table_content['label'].append(str(label))
            stats_table_content['#subject'].append(str(
                subgroup_activ.shape[0]))
            stats_table_content['min'].append(str(np.min(subgroup_activ_mean)))
            stats_table_content['max'].append(str(np.max(subgroup_activ_mean)))
            stats_table_content['mean'].append(
                str(np.mean(subgroup_activ_mean)))
            stats_table_content['min_roi'].append(
                str(np.min(subgroup_roi_activ_mean)))
            stats_table_content['max_roi'].append(
                str(np.max(subgroup_roi_activ_mean)))
            stats_table_content['mean_roi'].append(
                str(np.mean(subgroup_roi_activ_mean)))

            # get mean patterns
            subgroup_patterns = patterns[group_labels == label]
            subgroup_patterns_mean = np.mean(subgroup_patterns, 0)
            subgroup_patterns_mean_std = subgroup_patterns_mean / np.std(
                subgroup_patterns, 0)
            subgroup_patterns_mean = subgroup_patterns_mean[None, :]
            subgroup_patterns_mean_std = subgroup_patterns_mean_std[None, :]

            pattern_mean_map = np.ones(
                (1, activ.shape[1])) * np.min(subgroup_patterns_mean)
            pattern_mean_map[:, roi] = subgroup_patterns_mean
            mean_patterns = np.r_[mean_patterns, pattern_mean_map]

            pattern_mean_std_map = np.ones(
                (1, activ.shape[1])) * np.min(subgroup_patterns_mean_std)
            pattern_mean_std_map[:, roi] = subgroup_patterns_mean_std
            mean_std_patterns = np.r_[mean_std_patterns, pattern_mean_std_map]

            # save2nifti(pjoin(activation_dir, f'activ_g{label}_{hemi}.nii.gz'), subgroup_activ.T[:, None, None, :])

        # output activ
        save2nifti(pjoin(activation_dir, f'mean_activ_{hemi}.nii.gz'),
                   mean_activ.T[:, None, None, :])
        save2nifti(pjoin(activation_dir, f'mean_std_activ_{hemi}.nii.gz'),
                   mean_std_activ.T[:, None, None, :])
        save2nifti(
            pjoin(activation_dir, f'prob{prob_thr}_activ_{hemi}.nii.gz'),
            prob_activ.T[:, None, None, :])
        save2nifti(pjoin(activation_dir, f'mean_patterns_{hemi}.nii.gz'),
                   mean_patterns.T[:, None, None, :])
        save2nifti(pjoin(activation_dir, f'mean_std_patterns_{hemi}.nii.gz'),
                   mean_std_patterns.T[:, None, None, :])

        # output statistics
        with open(pjoin(activation_dir, f'statistics_{hemi}.csv'), 'w') as f:
            f.write(','.join(stats_table_titles) + '\n')
            lines = []
            for title in stats_table_titles:
                lines.append(stats_table_content[title])
            for line in zip(*lines):
                f.write(','.join(line) + '\n')

        print('{}clusters: done'.format(n_cluster))
Ejemplo n.º 13
0
    project_dir = '/nfs/s2/userhome/chenxiayu/workingdir/study/FFA_clustering'
    acti_file = pjoin(
        project_dir,
        'data/HCP_face-avg/s2/S1200.1080.FACE-AVG_level2_zstat_hp200_s2_MSMAll.dscalar.nii'
    )
    patch_dir = pjoin(project_dir, 'data/HCP_face-avg/s2/patches_15/crg')
    patch_file = pjoin(patch_dir, 'rFFA_patch_maps_lt15.nii.gz')
    max_maps_file = pjoin(patch_dir, 'rFFA_max_maps_lt15.nii.gz')
    prob_max_map_file = pjoin(patch_dir, 'rFFA_prob_max_map_lt15.nii.gz')
    brain_structure = 'CIFTI_STRUCTURE_CORTEX_RIGHT'

    acti_maps = CiftiReader(acti_file).get_data(brain_structure, True)
    patch_maps = nib.load(patch_file).get_data()
    max_maps = np.zeros_like(patch_maps)
    for row in range(acti_maps.shape[0]):
        labels = np.unique(patch_maps[row])
        for label in labels:
            if label == 0:
                continue
            acti_map_tmp = acti_maps[row].copy()
            not_label_indices = np.logical_not(patch_maps[row] == label)
            acti_map_tmp[not_label_indices] = -np.inf
            max_idx = np.argmax(acti_map_tmp)
            max_maps[row, max_idx] = label
    prob_max_map = np.mean(max_maps > 0, 0)

    header = nib.Nifti2Header()
    header['descrip'] = 'FreeROI label'
    save2nifti(max_maps_file, max_maps, header=header)
    save2nifti(prob_max_map_file, prob_max_map)
Ejemplo n.º 14
0
        max_num_map = np.argmax(num_maps, 0) + 1
        max_prob_map = np.argmax(prob_maps, 0) + 1
        top_prob_ROIs = (prob_maps > prob_thr).astype(np.int8)
        top_acti_ROIs = np.zeros_like(mean_maps)
        for row, mean_map_FFA in enumerate(mean_maps[:, FFA_vertices]):
            col_val = list(zip(FFA_vertices, mean_map_FFA))
            col_val_sorted = sorted(col_val, key=lambda x: x[1], reverse=True)
            col_val_top = col_val_sorted[:int(
                len(FFA_vertices) * top_acti_percent)]
            for col, val in col_val_top:
                top_acti_ROIs[row, col] = 1

        # output maps
        header = nib.Nifti2Header()
        header['descrip'] = 'FreeROI label'
        save2nifti(pjoin(activation_dir, '{}_mean_maps.nii.gz'.format(hemi)),
                   mean_maps)
        save2nifti(
            pjoin(activation_dir,
                  '{}_prob_maps_z{}.nii.gz'.format(hemi, acti_thr)), prob_maps)
        # save2nifti(pjoin(activation_dir, 'max_num_map_z{}.nii.gz'.format(acti_thr)), max_num_map)
        # save2nifti(pjoin(activation_dir, 'max_prob_map_z{}.nii.gz'.format(acti_thr)), max_prob_map)
        # save2nifti(pjoin(activation_dir, 'top_prob_ROIs_z{}_p{}.nii.gz'.format(acti_thr, prob_thr)), top_prob_ROIs)
        # save2nifti(pjoin(activation_dir, '{}_top_acti_ROIs_percent{}.nii.gz'.format(hemi, int(top_acti_percent*100))),
        #            top_acti_ROIs, header=header)
        save2nifti(
            pjoin(activation_dir, '{}_pattern_mean_maps.nii.gz'.format(hemi)),
            pattern_mean_maps)

        # output statistics
        with open(pjoin(activation_dir, '{}_statistics.csv'.format(hemi)),
                  'w+') as f:
Ejemplo n.º 15
0
        acti_maps = CiftiReader(acti_maps_file).get_data('CIFTI_STRUCTURE_CORTEX_RIGHT', True)
        mask = nib.load(mask_files.format(hemi[0])).get_data().ravel()
    else:
        raise RuntimeError("hemi error!")

    ROIs = [np.where(mask == i)[0] for i in np.unique(mask) if i != 0]
    intra_subgroup_dissimilarity = None
    inter_subgroup_dissimilarity = None
    for roi in ROIs:
        patterns = get_roi_pattern(acti_maps, roi, True)
        X = leave_one_out_representation(patterns, group_labels, metric)
        row_num, col_num = X.shape
        intra_tmp = []
        inter_tmp = []
        for row in range(row_num):
            for col in range(col_num):
                if row == col:
                    intra_tmp.extend(X[row, col])
                else:
                    inter_tmp.extend(X[row, col])
        if intra_subgroup_dissimilarity is None:
            intra_subgroup_dissimilarity = np.zeros((len(intra_tmp), acti_maps.shape[1]))
        if inter_subgroup_dissimilarity is None:
            inter_subgroup_dissimilarity = np.zeros((len(intra_tmp), acti_maps.shape[1]))
        intra_subgroup_dissimilarity[:, roi] = np.atleast_2d(np.array(intra_tmp)).T
        inter_subgroup_dissimilarity[:, roi] = np.atleast_2d(np.array(inter_tmp)).T

    repre_dir = pjoin(cluster_num_dir, 'representation')
    save2nifti(pjoin(repre_dir, '{}_intra_subgroup_dissimilarity.nii.gz'.format(hemi)), intra_subgroup_dissimilarity)
    save2nifti(pjoin(repre_dir, '{}_inter_subgroup_dissimilarity.nii.gz'.format(hemi)), inter_subgroup_dissimilarity)
Ejemplo n.º 16
0
    compare_dict = CsvReader(compare_file).to_dict(1)
    valid_idx_mat = np.array(compare_dict['p']) != 'nan'
    if mask_file is not None:
        mask_vertices = nib.freesurfer.read_label(mask_file)
        mask_idx_mat = np.zeros_like(valid_idx_mat, dtype=np.bool)
        mask_idx_mat[mask_vertices] = True
        valid_idx_mat = np.logical_and(valid_idx_mat, mask_idx_mat)

    compare_data = np.zeros((3, maps.shape[1]))
    ps_uncorrected = np.array([
        float(p) for idx, p in enumerate(compare_dict['p'])
        if valid_idx_mat[idx]
    ])
    reject, ps_corrected, alpha_sidak, alpha_bonf = multipletests(
        ps_uncorrected, 0.05, 'fdr_bh')
    ts = [
        float(t) for idx, t in enumerate(compare_dict['t'])
        if valid_idx_mat[idx]
    ]
    compare_data[0, valid_idx_mat] = ts
    compare_data[1, valid_idx_mat] = -ps_uncorrected
    compare_data[2, valid_idx_mat] = -ps_corrected
    compare_data[0, np.logical_not(valid_idx_mat)] = np.min(ts)
    compare_data[1, np.logical_not(valid_idx_mat)] = np.min(-ps_uncorrected)
    compare_data[2, np.logical_not(valid_idx_mat)] = np.min(-ps_corrected)
    save2nifti(
        pjoin(compare_dir, '{}_g1_vs_g2_posterior_masked.nii.gz'.format(hemi)),
        compare_data)
    # ---compare2nifti end---
Ejemplo n.º 17
0
def save2mean_map(src_maps, group_labels, output_name):
    mean_maps = []
    for label in np.unique(group_labels):
        sub_maps = np.atleast_2d(src_maps[group_labels == label])
        mean_maps.append(np.mean(sub_maps, 0))
    save2nifti(output_name, np.array(mean_maps))
Ejemplo n.º 18
0
    roi = nib.freesurfer.read_label(roi_file)
    activ_roi = activ[:, roi]

    corrs = 1 - cdist(activ_roi, activ_roi, 'correlation')
    top_corrs_stats = {'mean': [], 'min': [], 'max': []}
    top_activ_means = []
    for row in corrs:
        top_indices = np.argsort(-row)[:n_top]
        top_corrs = row[top_indices]
        top_corrs_stats['mean'].append(str(np.mean(top_corrs)))
        top_corrs_stats['min'].append(str(np.min(top_corrs)))
        top_corrs_stats['max'].append(str(np.max(top_corrs)))
        top_activ_means.append(np.mean(activ[top_indices], 0))

    top_corrs_stats_file = pjoin(trg_dir, f'top{n_top}_corrs_stats.csv')
    with open(top_corrs_stats_file, 'w') as wf:
        text = [','.join(i) for i in zip(*list(top_corrs_stats.values()))]
        text.insert(0, ','.join(top_corrs_stats.keys()))
        wf.write('\n'.join(text))

    top_activ_means = np.array(top_activ_means).T[:, None, None, :]
    top_activ_means_file = pjoin(trg_dir, f'top{n_top}_activ_means.nii.gz')
    save2nifti(top_activ_means_file, top_activ_means)

    top_activ_means_roi = np.ones_like(top_activ_means) * np.min(
        top_activ_means[roi], 0)
    top_activ_means_roi[roi] = top_activ_means[roi]
    top_activ_means_roi_file = pjoin(trg_dir,
                                     f'top{n_top}_activ_means_roi.nii.gz')
    save2nifti(top_activ_means_roi_file, top_activ_means_roi)
Ejemplo n.º 19
0
    log_list = []
    connect_dict = dict()
    for result in results:
        value = result.get()
        if isinstance(value, list):
            log_list.extend(value)
        elif isinstance(value, dict):
            for k, v in value.items():
                if connect_dict.get(k) is None:
                    connect_dict[k] = [v]
                else:
                    connect_dict[k].append(v)
        else:
            raise RuntimeError("invalid return")
    for k, v in connect_dict.items():
        if 'subject' in k:
            subject_out = '\n'.join(v)
            open(pjoin(project_dir, 'subject{}_id'.format(k[1])),
                 'w+').writelines(subject_out)
        else:
            out_file = out_files.format(hemi2=k[0],
                                        label2=k[1],
                                        roi_label=k[2],
                                        hemi1=k[3],
                                        label1=k[4])
            save2nifti(out_file, np.array(v))
    log_out = '\n'.join(log_list)
    open(pjoin(project_dir, 'tseries_connectivity_log'),
         'w+').writelines(log_out)
Ejemplo n.º 20
0
    subject_ids_file = pjoin(project_dir, 'data/HCP_face-avg/s2/subject_id')
    with open(subject_ids_file) as rf:
        subject_ids = rf.read().splitlines()

    patch_maps = nib.load(patch_file).get_data()
    patch_maps_filtered = np.zeros_like(patch_maps)
    patch_stats = []
    label_new = 0
    for row in range(patch_maps.shape[0]):
        labels = np.unique(patch_maps[row])
        patch_stat = [subject_ids[row]]
        patch_sizes = []
        for label in labels:
            if label == 0:
                continue
            vertices = np.where(patch_maps[row] == label)[0]
            size = len(vertices)
            if size > size_min:
                label_new += 1
                patch_sizes.append(str(size))
                patch_maps_filtered[row, vertices] = label_new
        patch_stat.append(str(label_new))
        patch_stat.extend(patch_sizes)
        patch_stats.append(','.join(patch_stat))
        label_new = 0

    header = nib.Nifti2Header()
    header['descrip'] = 'FreeROI label'
    save2nifti(patch_file_filtered, patch_maps_filtered, header=header)
    open(stat_file, 'w+').writelines('\n'.join(patch_stats))
Ejemplo n.º 21
0
    #             'HCP_S1200_GroupAvg_v1/S1200.{}.white_MSMAll.32k_fs_LR.surf.gii'
    geo_files = None
    # mask_file = pjoin(project_dir, 'data/HCP_face-avg/s2/patches_15/crg2.3/{}FFA_patch_maps_lt15.nii.gz')
    mask_file = None
    # -----------------------
    print('Finish: predefine some variates')

    if mask_file is not None:
        lh_mask = nib.load(mask_file.format('l')).get_data() != 0
        rh_mask = nib.load(mask_file.format('r')).get_data() != 0
    else:
        lh_mask = None
        rh_mask = None
    if geo_files is None:
        lh_faces = None
        rh_faces = None
    else:
        raise NotImplementedError

    lmaps = nib.load(maps_file.format('lh')).get_data()
    rmaps = nib.load(maps_file.format('rh')).get_data()
    lFFA_vertices = nib.freesurfer.read_label(FFA_label_files.format('l'))
    rFFA_vertices = nib.freesurfer.read_label(FFA_label_files.format('r'))
    lFFA_patterns = get_roi_pattern(lmaps, lFFA_vertices, zscore, thr, bin,
                                    size_min, lh_faces, lh_mask)
    rFFA_patterns = get_roi_pattern(rmaps, rFFA_vertices, zscore, thr, bin,
                                    size_min, rh_faces, rh_mask)

    save2nifti(pjoin(analysis_dir, 'lFFA_patterns.nii.gz'), lFFA_patterns)
    save2nifti(pjoin(analysis_dir, 'rFFA_patterns.nii.gz'), rFFA_patterns)
Ejemplo n.º 22
0
            f.writelines(regroup_ids)

    max_num_map = np.argmax(nums_maps, 0) + 1
    max_prob_map = np.argmax(prob_maps, 0) + 1
    top_prob_ROIs = (prob_maps > prob_thr).astype(np.int8)
    top_acti_ROIs = np.zeros_like(mean_maps)
    for row, mean_map_rFFA in enumerate(mean_maps[:, rFFA_vertices]):
        col_val = list(zip(rFFA_vertices, mean_map_rFFA))
        col_val_sorted = sorted(col_val, key=lambda x: x[1], reverse=True)
        col_val_top = col_val_sorted[:int(
            len(rFFA_vertices) * top_acti_percent)]
        for col, val in col_val_top:
            top_acti_ROIs[row, col] = 1

    # output data
    save2nifti(pjoin(result_dir, 'mean_maps.nii.gz'), mean_maps)
    save2nifti(pjoin(result_dir, 'prob_maps_z{}.nii.gz'.format(acti_thr)),
               prob_maps)
    # save2nifti(pjoin(result_dir, 'max_num_map_z{}.nii.gz'.format(acti_thr)), max_num_map)
    # save2nifti(pjoin(result_dir, 'max_prob_map_z{}.nii.gz'.format(acti_thr)), max_prob_map)
    # save2nifti(pjoin(result_dir, 'top_prob_ROIs_z{}_p{}.nii.gz'.format(acti_thr, prob_thr)), top_prob_ROIs)
    save2nifti(
        pjoin(result_dir,
              'top_acti_ROIs_percent{}.nii.gz'.format(top_acti_percent * 100)),
        top_acti_ROIs)

    # output statistics
    with open(pjoin(result_dir, 'statistics.csv'), 'w+') as f:
        f.write(','.join(stats_table_titles) + '\n')
        lines = []
        for title in stats_table_titles: