Beispiel #1
0
def calc_map_corr(data_file1,
                  data_file2,
                  atlas_name,
                  roi_name,
                  out_file,
                  map_names1=None,
                  map_names2=None,
                  index=False):
    """
    计算指定atlas的ROI内的data1 maps和data2 maps的相关
    存出的CSV文件的index是map_names1, column是map_names2

    Args:
        data_file1 (str): end with .dscalar.nii
            shape=(n_map1, LR_count_32k)
        data_file2 (str): end with .dscalar.nii
            shape=(n_map2, LR_count_32k)
        atlas_name (str):
        roi_name (str):
        out_file (str):
        map_names1 (list, optional): Defaults to None.
            If is None, use map names in data1_file.
        map_names2 (list, optional): Defaults to None.
            If is None, use map names in data2_file.
        index (bool, optional): Defaults to False.
            Save index of DataFrame or not
    """
    # prepare
    reader1 = CiftiReader(data_file1)
    reader2 = CiftiReader(data_file2)
    data_maps1 = reader1.get_data()
    data_maps2 = reader2.get_data()
    atlas = Atlas(atlas_name)
    assert atlas.maps.shape == (1, LR_count_32k)
    roi_idx_map = atlas.maps[0] == atlas.roi2label[roi_name]
    data_maps1 = data_maps1[:, roi_idx_map]
    data_maps2 = data_maps2[:, roi_idx_map]

    if map_names1 is None:
        map_names1 = reader1.map_names()
    else:
        assert len(map_names1) == data_maps1.shape[0]

    if map_names2 is None:
        map_names2 = reader2.map_names()
    else:
        assert len(map_names2) == data_maps2.shape[0]

    # calculate
    corrs = 1 - cdist(data_maps1, data_maps2, 'correlation')

    # save
    df = pd.DataFrame(corrs, map_names1, map_names2)
    df.to_csv(out_file, index=index)
def extract_net_parcel_info():
    """
    把每个网络的编号,名字以及所包含的MMP parcel的编号和名字列出来
    """
    from magicbox.io.io import CiftiReader

    # inputs
    net_file = pjoin(work_dir, 'networks.dlabel.nii')

    # outputs
    out_file = pjoin(work_dir, 'net_parcel_info.txt')

    # prepare
    reader = CiftiReader(net_file)
    net_names = reader.map_names()
    lbl_tables = reader.label_tables()

    # calculate & save
    wf = open(out_file, 'w')
    for net_idx, net_name in enumerate(net_names):
        wf.write(f'>>>{net_idx+1}-{net_name}\n')
        lbl_tab = lbl_tables[net_idx]
        for lbl_k, lbl_v in lbl_tab.items():
            if lbl_k == 0:
                continue
            wf.write(f'{lbl_k}-{lbl_v.label[:-4]}\n')
        wf.write('<<<\n')
    wf.close()
def calc_pattern_corr_between_twins(meas_name='thickness'):
    """
    Calculate spatial pattern correlation between each twin pair.
    """
    import pandas as pd
    import nibabel as nib
    from magicbox.io.io import CiftiReader
    from scipy.stats import pearsonr

    twins_id_file = pjoin(work_dir, 'twins_id_1080.csv')
    meas2file = {
        'thickness':
        '/nfs/p1/public_dataset/datasets/hcp/DATA/'
        'HCP_S1200_GroupAvg_v1/HCP_S1200_GroupAvg_v1/'
        'S1200.All.thickness_MSMAll.32k_fs_LR.dscalar.nii',
        'myelin':
        '/nfs/p1/public_dataset/datasets/hcp/DATA/'
        'HCP_S1200_GroupAvg_v1/HCP_S1200_GroupAvg_v1/'
        'S1200.All.MyelinMap_BC_MSMAll.32k_fs_LR.dscalar.nii',
        'activ':
        pjoin(proj_dir, 'analysis/s2/activation.dscalar.nii')
    }
    hemis = ('lh', 'rh')
    hemi2stru = {
        'lh': 'CIFTI_STRUCTURE_CORTEX_LEFT',
        'rh': 'CIFTI_STRUCTURE_CORTEX_RIGHT'
    }
    mpm_file = pjoin(
        proj_dir, 'analysis/s2/1080_fROI/refined_with_Kevin/'
        'MPM_v3_{hemi}_0.25.nii.gz')
    roi2label = {'IOG-face': 1, 'pFus-face': 2, 'mFus-face': 3}
    zyg2label = {'MZ': 1, 'DZ': 2}
    out_file = pjoin(work_dir, f'twins_pattern-corr_{meas_name}.csv')

    df = pd.read_csv(twins_id_file)
    meas_reader = CiftiReader(meas2file[meas_name])
    meas_ids = [int(name.split('_')[0]) for name in meas_reader.map_names()]
    twin1_indices = [meas_ids.index(i) for i in df['twin1']]
    twin2_indices = [meas_ids.index(i) for i in df['twin2']]

    out_df = pd.DataFrame()
    out_df['zygosity'] = df['zygosity']
    out_df['zyg'] = [zyg2label[zyg] for zyg in df['zygosity']]
    for hemi in hemis:
        meas1 = meas_reader.get_data(hemi2stru[hemi], True)[twin1_indices]
        meas2 = meas_reader.get_data(hemi2stru[hemi], True)[twin2_indices]
        mpm = nib.load(mpm_file.format(hemi=hemi)).get_data().squeeze()
        for roi, lbl in roi2label.items():
            idx_vec = mpm == lbl
            out_df[f"{hemi}_{roi.split('-')[0]}"] = [
                pearsonr(i[idx_vec], j[idx_vec])[0]
                for i, j in zip(meas1, meas2)
            ]
    out_df.to_csv(out_file, index=False)
def zscore_data(data_file, out_file):
    """
    对每个被试做全脑zscore

    Args:
        data_file (str): .dscalar.nii
        out_file (str): .dscalar.nii
    """
    reader = CiftiReader(data_file)
    data = reader.get_data()
    data = zscore(data, 1)
    save2cifti(out_file, data, reader.brain_models(), reader.map_names())
Beispiel #5
0
def check_1096_sid():
    """
    检查HCPY_SubjInfo.csv文件中的1096个被试ID及其顺序和
    S1200 1096 myelin file, S1200 1096 thickness file
    是对的上的
    """
    df = pd.read_csv(dataset_name2info['HCPY'])
    subj_ids = df['subID'].to_list()

    fpaths = (s1200_1096_myelin, s1200_1096_thickness)
    for fpath in fpaths:
        reader = CiftiReader(fpath)
        tmp = [int(i.split('_')[0]) for i in reader.map_names()]
        assert tmp == subj_ids
Beispiel #6
0
def mask_maps(data_file, atlas_name, roi_name, out_file):
    """
    把data map在指定atlas的ROI以外的部分全赋值为nan

    Args:
        data_file (str): end with .dscalar.nii
            shape=(n_map, LR_count_32k)
        atlas_name (str):
        roi_name (str):
        out_file (str):
    """
    # prepare
    reader = CiftiReader(data_file)
    data = reader.get_data()
    atlas = Atlas(atlas_name)
    assert atlas.maps.shape == (1, LR_count_32k)
    roi_idx_map = atlas.maps[0] == atlas.roi2label[roi_name]

    # calculate
    data[:, ~roi_idx_map] = np.nan

    # save
    save2cifti(out_file, data, reader.brain_models(), reader.map_names())