示例#1
0
def test_merge_nifti2cifti():
    import nibabel as nib

    # get data
    data_l = nib.load(r'E:\tmp\l_vcAtlas_refine2.nii.gz').get_data().ravel()
    data_r = nib.load(r'E:\tmp\r_vcAtlas_refine2.nii.gz').get_data().ravel()

    cifti_reader = CiftiReader(
        r'E:\useful_things\data\HCP\HCP_S1200_GroupAvg_v1'
        r'\HCP_S1200_997_tfMRI_FACE-AVG_level2_cohensd_hp200_s4_MSMAll.dscalar.nii'
    )
    # get valid data
    bm_lr = cifti_reader.brain_models(
        ['CIFTI_STRUCTURE_CORTEX_LEFT', 'CIFTI_STRUCTURE_CORTEX_RIGHT'])
    idx2vtx_l = list(bm_lr[0].vertex_indices)
    idx2vtx_r = list(bm_lr[1].vertex_indices)
    cifti_data_l = data_l[idx2vtx_l]
    cifti_data_r = data_r[idx2vtx_r]

    # get edge_list
    cifti_data = np.c_[np.atleast_2d(cifti_data_l),
                       np.atleast_2d(cifti_data_r)]

    save2cifti(r'E:\tmp\vcAtlas_refine.dscalar.nii', cifti_data, bm_lr,
               ['surface vcAtlas'])
示例#2
0
def cifti_io():
    reader1 = CiftiReader(r'E:\useful_things\data\HCP\HCP_S1200_GroupAvg_v1\HCP_S1200_GroupAvg_v1'
                          r'\HCP_S1200_997_tfMRI_ALLTASKS_level2_cohensd_hp200_s4_MSMAll.dscalar.nii')

    save2cifti(r'E:\tmp\HCP_S1200_997_tfMRI_FACE-AVG_level2_cohensd_hp200_s4_MSMAll.dscalar.nii',
               np.atleast_2d(reader1.get_data()[19]),
               reader1.brain_models(), reader1.map_names([19]), reader1.volume, reader1.label_tables([19]))

    reader2 = CiftiReader(r'E:\tmp\HCP_S1200_997_tfMRI_FACE-AVG_level2_cohensd_hp200_s4_MSMAll.dscalar.nii')
    data1 = reader1.get_data('CIFTI_STRUCTURE_CORTEX_LEFT', zeroize=True)[19]
    print(data1)
    data2 = reader2.get_data('CIFTI_STRUCTURE_CORTEX_LEFT', zeroize=True)[0]
    print(data2)
    print(np.max(data1 - data2), np.min(data1 - data2))
    reader = None
    for fpath in src_paths:
        if not os.path.exists(fpath):
            message = 'Path-{0} does not exist!\n'.format(fpath)
            print(message, end='')
            log_file.writelines(message)
            continue

        reader = CiftiReader(fpath)
        # If don't use .copy(), the merged_data will share the same data object with data in reader.
        # As a result, the memory space occupied by the whole data in reader will be reserved.
        # But we only need one row of the whole data, so we can use the .copy() to make the element
        # avoid being a reference to the row of the whole data. Then, the whole data in reader will
        # be regarded as a garbage and collected by Garbage Collection Program.
        merged_data.append(reader.get_data()[column - 1].copy())
        map_names.append(reader.map_names()[column - 1])
        print('Merged:', fpath)
    if reader is None:
        message = "Can't find any valid source path\n"
        print(message, end='')
        log_file.writelines(message)
    else:
        message = 'Start save2cifti\n'
        print(message, end='')
        log_file.writelines(message)
        save2cifti(out_path, np.array(merged_data), reader.brain_models(),
                   map_names)

    log_file.write('done')
    log_file.close()
示例#4
0
if __name__ == '__main__':
    import numpy as np

    from os.path import join as pjoin
    from commontool.io.io import CiftiReader, save2cifti

    project_dir = '/nfs/s2/userhome/chenxiayu/workingdir/study/FFA_clustering'
    test_par = pjoin(project_dir, 'data/HCP/tseries_test_dir')
    series_LR_files = pjoin(test_par, '{subject}/tfMRI_WM_LR_Atlas_MSMAll.dtseries.nii')
    series_RL_files = pjoin(test_par, '{subject}/tfMRI_WM_RL_Atlas_MSMAll.dtseries.nii')
    mean_signal_maps_out = pjoin(test_par, 'tfMRI_WM_Mean_BOLD_Signal_MSMAll_new.dscalar.nii')

    subject_ids = open(pjoin(test_par, 'subject_id')).read().splitlines()

    mean_signal_maps = []
    for subject in subject_ids:
        series_LR_file = series_LR_files.format(subject=subject)
        series_RL_file = series_RL_files.format(subject=subject)
        reader_LR = CiftiReader(series_LR_file)
        reader_RL = CiftiReader(series_RL_file)
        series_LR = reader_LR.get_data()
        series_RL = reader_RL.get_data()
        series = np.r_[series_LR, series_RL]
        mean_signal = np.mean(series, 0)
        mean_signal_maps.append(mean_signal)

        print('Finish:', subject)

    save2cifti(mean_signal_maps_out, np.array(mean_signal_maps), reader_RL.brain_models(), subject_ids)
    from commontool.io.io import CiftiReader, save2cifti

    # predefine some variates
    project_dir = '/nfs/s2/userhome/chenxiayu/workingdir/study/FFA_clustering'

    reader = CiftiReader(
        '/nfs/p1/public_dataset/datasets/hcp/DATA/HCP_S1200_GroupAvg_v1/'
        'HCP_S1200_GroupAvg_v1/S1200.All.MyelinMap_BC_MSMAll.32k_fs_LR.dscalar.nii'
    )
    data = reader.get_data()
    names = reader.map_names()

    subj_id_file = pjoin(project_dir, 'data/HCP_face-avg/s2/subject_id')
    with open(subj_id_file) as rf:
        subj_ids = rf.read().splitlines()
    names_1080 = [_ + '_MyelinMap' for _ in subj_ids]

    indices = []
    for name in names_1080:
        if name in names:
            indices.append(names.index(name))

    data_1080 = data[indices]

    # output
    save2cifti(
        pjoin(
            project_dir,
            'data/HCP_face-avg/S1200_1080_MyelinMap_BC_MSMAll_32k_fs_LR.dscalar.nii'
        ), data_1080, reader.brain_models(), names_1080)
示例#6
0
    group_labels_file = pjoin(cluster_num_dir, 'group_labels')
    mfs_dir = pjoin(cluster_num_dir, 'mfs')
    if not os.path.exists(mfs_dir):
        os.makedirs(mfs_dir)

    curv_reader = CiftiReader(curv_file)
    aparc_reader = CiftiReader(aparc_file)
    sulc_mask = curv_reader.get_data() < 0
    fusiform_mask = np.logical_or(aparc_reader.get_data() == 21, aparc_reader.get_data() == 96)

    with open(group_labels_file) as rf:
        group_labels = np.array(rf.read().split(' '), dtype=np.uint16)

    mfs_prob_maps = []
    fusiform_prob_maps = []
    map_names = []
    for label in sorted(set(group_labels)):
        indices = group_labels == label
        subgroup_mfs_mask = np.logical_and(sulc_mask[indices], fusiform_mask[indices])
        subgroup_mfs_prob = np.mean(subgroup_mfs_mask, 0)
        subgroup_fusiform_prob = np.mean(fusiform_mask[indices], 0)

        mfs_prob_maps.append(subgroup_mfs_prob)
        fusiform_prob_maps.append(subgroup_fusiform_prob)
        map_names.append('label{}'.format(label))

    save2cifti(pjoin(mfs_dir, 'MFS_prob_maps.dscalar.nii'), np.array(mfs_prob_maps), curv_reader.brain_models(),
               map_names)
    save2cifti(pjoin(mfs_dir, 'fusiform_prob_maps.dscalar.nii'), np.array(fusiform_prob_maps),
               curv_reader.brain_models(), map_names)
示例#7
0
    with open(subject_id_file) as rf:
        subject_ids = rf.read().splitlines()

    maps_new_dict = OrderedDict()
    map_names_new_dict = OrderedDict()
    for k in interested_copes.keys():
        maps_new_dict[k] = []
        map_names_new_dict[k] = []
    brain_models = None
    for subject_id in subject_ids:
        src_file = src_files.format(subject_id)
        reader = CiftiReader(src_file)
        maps = reader.get_data()
        map_names = reader.map_names()
        if brain_models is None:
            brain_models = reader.brain_models()

        # make sure that we get right copes
        for k, v in interested_copes.items():
            if v not in map_names[k]:
                raise RuntimeError("subject-{0}'s cope{1} is not {2}".format(
                    subject_id, k, v))

        for k in interested_copes.keys():
            maps_new_dict[k].append(maps[k].copy())
            map_names_new_dict[k].append(map_names[k])
            print('Finished: merge {0}_cope{1}'.format(subject_id, k))

    for k in interested_copes.keys():
        save2cifti(trg_file.format(k + 1, interested_copes[k]),
                   np.array(maps_new_dict[k]), brain_models,