Esempio n. 1
0
def run(args):
    """Run it"""
    verbose(1, "Loading %d result files" % len(args.data))

    filetype_in = guess_backend(args.data[0])

    if filetype_in == 'nifti':
        dss = [fmri_dataset(f) for f in args.data]
    elif filetype_in == 'hdf5':
        dss = [h5load(f) for f in args.data]
    data = np.asarray([d.samples[args.isample] for d in dss])

    if args.mask:
        filetype_mask = guess_backend(args.mask)
        if filetype_mask == 'nifti':
            mask = nib.load(args.mask).get_data()
        elif filetype_mask == 'hdf5':
            mask = h5load(args.mask).samples
        out_of_mask = mask == 0
    else:
        # just take where no voxel had a value
        out_of_mask = np.sum(data != 0, axis=0) == 0

    t, p = ttest_1samp(data,
                       popmean=args.chance_level,
                       axis=0,
                       alternative=args.alternative)

    if args.stat == 'z':
        if args.alternative == 'two-sided':
            s = stats.norm.isf(p / 2)
        else:
            s = stats.norm.isf(p)
        # take the sign of the original t
        s = np.abs(s) * np.sign(t)
    elif args.stat == 'p':
        s = p
    elif args.stat == 't':
        s = t
    else:
        raise ValueError('WTF you gave me? have no clue about %r' %
                         (args.stat, ))

    if s.shape != out_of_mask.shape:
        try:
            out_of_mask = out_of_mask.reshape(s.shape)
        except ValueError:
            raise ValueError('Cannot use mask of shape {0} with '
                             'data of shape {1}'.format(
                                 out_of_mask.shape, s.shape))
    s[out_of_mask] = 0

    verbose(1, "Saving to %s" % args.output)
    filetype_out = guess_backend(args.output)
    if filetype_out == 'nifti':
        map2nifti(dss[0], data=s).to_filename(args.output)
    else:  # filetype_out is hdf5
        s = Dataset(np.atleast_2d(s), fa=dss[0].fa, a=dss[0].a)
        h5save(args.output, s)
    return s
Esempio n. 2
0
def test_nifti_dataset_from3_d():
    """Test NiftiDataset based on 3D volume(s)
    """
    tssrc = os.path.join(pymvpa_dataroot, "bold.nii.gz")
    masrc = os.path.join(pymvpa_dataroot, "mask.nii.gz")

    # Test loading of 3D volumes
    # by default we are enforcing 4D, testing here with the demo 3d mask
    ds = fmri_dataset(masrc, mask=masrc, targets=1)
    assert_equal(len(ds), 1)

    import nibabel

    plain_data = nibabel.load(masrc).get_data()
    # Lets check if mapping back works as well
    assert_array_equal(plain_data, map2nifti(ds).get_data().reshape(plain_data.shape))

    # test loading from a list of filenames

    # for now we should fail if trying to load a mix of 4D and 3D volumes
    assert_raises(ValueError, fmri_dataset, (masrc, tssrc), mask=masrc, targets=1)

    # Lets prepare some custom NiftiImage
    dsfull = fmri_dataset(tssrc, mask=masrc, targets=1)
    ds_selected = dsfull[3]
    nifti_selected = map2nifti(ds_selected)

    # Load dataset from a mix of 3D volumes
    # (given by filenames and NiftiImages)
    labels = [123, 2, 123]
    ds2 = fmri_dataset((masrc, masrc, nifti_selected), mask=masrc, targets=labels)
    assert_equal(ds2.nsamples, 3)
    assert_array_equal(ds2.samples[0], ds2.samples[1])
    assert_array_equal(ds2.samples[2], dsfull.samples[3])
    assert_array_equal(ds2.targets, labels)
Esempio n. 3
0
def test_nifti_mapper(filename):
    """Basic testing of map2Nifti
    """
    skip_if_no_external('scipy')

    import nibabel
    data = fmri_dataset(samples=os.path.join(pymvpa_dataroot,
                                             'example4d.nii.gz'),
                        targets=[1, 2])

    # test mapping of ndarray
    vol = map2nifti(data, np.ones((294912, ), dtype='int16'))
    if externals.versions['nibabel'] >= '1.2':
        vol_shape = vol.shape
    else:
        vol_shape = vol.get_shape()
    assert_equal(vol_shape, (128, 96, 24))
    assert_true((vol.get_data() == 1).all())
    # test mapping of the dataset
    vol = map2nifti(data)
    if externals.versions['nibabel'] >= '1.2':
        vol_shape = vol.shape
    else:
        vol_shape = vol.get_shape()
    assert_equal(vol_shape, (128, 96, 24, 2))
    ok_(isinstance(vol, data.a.imgtype))

    # test providing custom imgtypes
    vol = map2nifti(data, imgtype=nibabel.Nifti1Pair)
    if externals.versions['nibabel'] >= '1.2':
        vol_shape = vol.shape
    else:
        vol_shape = vol.get_shape()
    ok_(isinstance(vol, nibabel.Nifti1Pair))

    # Lets generate a dataset using an alternative format (MINC)
    # and see if type persists
    volminc = nibabel.MincImage(vol.get_data(), vol.get_affine(),
                                vol.get_header())
    ok_(isinstance(volminc, nibabel.MincImage))
    dsminc = fmri_dataset(volminc, targets=1)
    ok_(dsminc.a.imgtype is nibabel.MincImage)
    ok_(isinstance(dsminc.a.imghdr, nibabel.minc.MincImage.header_class))

    # Lets test if we could save/load now into Analyze volume/dataset
    if externals.versions['nibabel'] < '1.1.0':
        raise SkipTest(
            'nibabel prior 1.1.0 had an issue with types comprehension')
    volanal = map2nifti(
        dsminc, imgtype=nibabel.AnalyzeImage)  # MINC has no 'save' capability
    ok_(isinstance(volanal, nibabel.AnalyzeImage))
    volanal.to_filename(filename)
    dsanal = fmri_dataset(filename, targets=1)
    # this one is tricky since it might become Spm2AnalyzeImage
    ok_('AnalyzeImage' in str(dsanal.a.imgtype))
    ok_('AnalyzeHeader' in str(dsanal.a.imghdr.__class__))
    volanal_ = map2nifti(dsanal)
    ok_(isinstance(volanal_, dsanal.a.imgtype))  # type got preserved
Esempio n. 4
0
def test_er_nifti_dataset():
    # setup data sources
    tssrc = os.path.join(pymvpa_dataroot, u"bold.nii.gz")
    evsrc = os.path.join(pymvpa_dataroot, "fslev3.txt")
    masrc = os.path.join(pymvpa_dataroot, "mask.nii.gz")
    evs = FslEV3(evsrc).to_events()
    # load timeseries
    ds_orig = fmri_dataset(tssrc)
    # segment into events
    ds = eventrelated_dataset(ds_orig, evs, time_attr="time_coords")

    # we ask for boxcars of 9s length, and the tr in the file header says 2.5s
    # hence we should get round(9.0/2.4) * np.prod((1,20,40) == 3200 features
    assert_equal(ds.nfeatures, 3200)
    assert_equal(len(ds), len(evs))
    # the voxel indices are reflattened after boxcaring , but still 3D
    assert_equal(ds.fa.voxel_indices.shape, (ds.nfeatures, 3))
    # and they have been broadcasted through all boxcars
    assert_array_equal(ds.fa.voxel_indices[:800], ds.fa.voxel_indices[800:1600])
    # each feature got an event offset value
    assert_array_equal(ds.fa.event_offsetidx, np.repeat([0, 1, 2, 3], 800))
    # check for all event attributes
    assert_true("onset" in ds.sa)
    assert_true("duration" in ds.sa)
    assert_true("features" in ds.sa)
    # check samples
    origsamples = _load_anyimg(tssrc)[0]
    for i, onset in enumerate([value2idx(e["onset"], ds_orig.sa.time_coords, "floor") for e in evs]):
        assert_array_equal(ds.samples[i], origsamples[onset : onset + 4].ravel())
        assert_array_equal(ds.sa.time_indices[i], np.arange(onset, onset + 4))
        assert_array_equal(ds.sa.time_coords[i], np.arange(onset, onset + 4) * 2.5)
        for evattr in [a for a in ds.sa if a.count("event_attrs") and not a.count("event_attrs_event")]:
            assert_array_equal(evs[i]["_".join(evattr.split("_")[2:])], ds.sa[evattr].value[i])
    # check offset: only the last one exactly matches the tr
    assert_array_equal(ds.sa.orig_offset, [1, 1, 0])

    # map back into voxel space, should ignore addtional features
    nim = map2nifti(ds)
    # origsamples has t,x,y,z
    assert_equal(nim.get_shape(), origsamples.shape[1:] + (len(ds) * 4,))
    # check shape of a single sample
    nim = map2nifti(ds, ds.samples[0])
    # pynifti image has [t,]z,y,x
    assert_equal(nim.get_shape(), (40, 20, 1, 4))

    # and now with masking
    ds = fmri_dataset(tssrc, mask=masrc)
    ds = eventrelated_dataset(ds, evs, time_attr="time_coords")
    nnonzero = len(_load_anyimg(masrc)[0].nonzero()[0])
    assert_equal(nnonzero, 530)
    # we ask for boxcars of 9s length, and the tr in the file header says 2.5s
    # hence we should get round(9.0/2.4) * np.prod((1,20,40) == 3200 features
    assert_equal(ds.nfeatures, 4 * 530)
    assert_equal(len(ds), len(evs))
    # and they have been broadcasted through all boxcars
    assert_array_equal(ds.fa.voxel_indices[:nnonzero], ds.fa.voxel_indices[nnonzero : 2 * nnonzero])
Esempio n. 5
0
def test_nifti_mapper(filename):
    """Basic testing of map2Nifti
    """
    skip_if_no_external('scipy')

    import nibabel
    data = fmri_dataset(samples=os.path.join(pymvpa_dataroot,'example4d.nii.gz'),
                        targets=[1,2])

    # test mapping of ndarray
    vol = map2nifti(data, np.ones((294912,), dtype='int16'))
    if externals.versions['nibabel'] >= '1.2': 
        vol_shape = vol.shape
    else:
        vol_shape = vol.get_shape()
    assert_equal(vol_shape, (128, 96, 24))
    assert_true((vol.get_data() == 1).all())
    # test mapping of the dataset
    vol = map2nifti(data)
    if externals.versions['nibabel'] >= '1.2':
        vol_shape = vol.shape
    else:
        vol_shape = vol.get_shape()
    assert_equal(vol_shape, (128, 96, 24, 2))
    ok_(isinstance(vol, data.a.imgtype))

    # test providing custom imgtypes
    vol = map2nifti(data, imgtype=nibabel.Nifti1Pair)
    if externals.versions['nibabel'] >= '1.2':
        vol_shape = vol.shape
    else:
        vol_shape = vol.get_shape()
    ok_(isinstance(vol, nibabel.Nifti1Pair))

    # Lets generate a dataset using an alternative format (MINC)
    # and see if type persists
    volminc = nibabel.MincImage(vol.get_data(),
                                vol.get_affine(),
                                vol.get_header())
    ok_(isinstance(volminc, nibabel.MincImage))
    dsminc = fmri_dataset(volminc, targets=1)
    ok_(dsminc.a.imgtype is nibabel.MincImage)
    ok_(isinstance(dsminc.a.imghdr, nibabel.minc.MincImage.header_class))

    # Lets test if we could save/load now into Analyze volume/dataset
    if externals.versions['nibabel'] < '1.1.0':
        raise SkipTest('nibabel prior 1.1.0 had an issue with types comprehension')
    volanal = map2nifti(dsminc, imgtype=nibabel.AnalyzeImage) # MINC has no 'save' capability
    ok_(isinstance(volanal, nibabel.AnalyzeImage))
    volanal.to_filename(filename)
    dsanal = fmri_dataset(filename, targets=1)
    # this one is tricky since it might become Spm2AnalyzeImage
    ok_('AnalyzeImage' in str(dsanal.a.imgtype))
    ok_('AnalyzeHeader' in str(dsanal.a.imghdr.__class__))
    volanal_ = map2nifti(dsanal)
    ok_(isinstance(volanal_, dsanal.a.imgtype)) # type got preserved
Esempio n. 6
0
def run(args):
    """Run it"""
    verbose(1, "Loading %d result files" % len(args.data))

    filetype_in = guess_backend(args.data[0])

    if filetype_in == 'nifti':
        dss = [fmri_dataset(f) for f in args.data]
    elif filetype_in == 'hdf5':
        dss = [h5load(f) for f in args.data]
    data = np.asarray([d.samples[args.isample] for d in dss])

    if args.mask:
        filetype_mask = guess_backend(args.mask)
        if filetype_mask == 'nifti':
            mask = nib.load(args.mask).get_data()
        elif filetype_mask == 'hdf5':
            mask = h5load(args.mask).samples
        out_of_mask = mask == 0
    else:
        # just take where no voxel had a value
        out_of_mask = np.sum(data != 0, axis=0)==0

    t, p = ttest_1samp(data, popmean=args.chance_level, axis=0,
                       alternative=args.alternative)

    if args.stat == 'z':
        if args.alternative == 'two-sided':
            s = stats.norm.isf(p/2)
        else:
            s = stats.norm.isf(p)
        # take the sign of the original t
        s = np.abs(s) * np.sign(t)
    elif args.stat == 'p':
        s = p
    elif args.stat == 't':
        s = t
    else:
        raise ValueError('WTF you gave me? have no clue about %r' % (args.stat,))

    if s.shape != out_of_mask.shape:
        try:
            out_of_mask = out_of_mask.reshape(s.shape)
        except ValueError:
            raise ValueError('Cannot use mask of shape {0} with '
                             'data of shape {1}'.format(out_of_mask.shape, s.shape))
    s[out_of_mask] = 0

    verbose(1, "Saving to %s" % args.output)
    filetype_out = guess_backend(args.output)
    if filetype_out == 'nifti':
        map2nifti(dss[0], data=s).to_filename(args.output)
    else:  # filetype_out is hdf5
        s = Dataset(np.atleast_2d(s), fa=dss[0].fa, a=dss[0].a)
        h5save(args.output, s)
    return s
Esempio n. 7
0
def main(subject, study_dir, mask, stat, res_name, items='ac',
         suffix='_stim_fix2', feature_mask=None, radius=3, n_perm=1000,
         n_proc=None):

    from mvpa2.datasets.mri import map2nifti
    from mvpa2.mappers.zscore import zscore
    from mvpa2.measures.searchlight import sphere_searchlight
    from nireact import mvpa

    # lookup subject directory
    sp = su.SubjPath(subject, study_dir)

    # load task information
    vols = task.disp_vols(sp.path('behav', 'log'))

    # unpack the items option to get item type code
    item_names = 'abc'
    item_comp = [item_names.index(name) + 1 for name in items]

    # get post runs, A and C items only
    include = ((vols.run >= 5) & np.isin(vols.item_type, item_comp) &
               vols.correct == 1)
    post = vols.loc[include, :]

    # load beta series
    ds = mvpa.load_disp_beta(sp, suffix, mask, feature_mask, verbose=1)

    # define measure and contrasts to write out
    contrasts = ['pos', 'block_inter', 'inter_block']
    m = mvpa.TriadVector(post, item_comp, stat, contrasts, n_perm)

    # zscore
    ds.sa['run'] = vols.run.values
    zscore(ds, chunks_attr='run')

    # searchlight
    print('Running searchlight...')
    sl = sphere_searchlight(m, radius=radius, nproc=n_proc)
    sl_map = sl(ds[include])

    # save results
    nifti_include = map2nifti(ds, sl_map[-1])
    for i, contrast in enumerate(contrasts):
        res_dir = sp.path('rsa', f'{res_name}_{contrast}')
        if not os.path.exists(res_dir):
            os.makedirs(res_dir)

        nifti = map2nifti(ds, sl_map[i])
        nifti.to_filename(su.impath(res_dir, 'zstat'))

        nifti_include.to_filename(su.impath(res_dir, 'included'))
Esempio n. 8
0
def main(subject,
         study_dir,
         mask,
         feature_mask,
         res_dir,
         radius=3,
         n_proc=None):
    from mvpa2.measures.searchlight import sphere_searchlight
    from mvpa2.datasets.mri import map2nifti

    # load functional data
    subject_dir = os.path.join(study_dir, f'tesser_{subject}')
    ds = mvpa.load_struct_timeseries(study_dir,
                                     subject,
                                     mask,
                                     feature_mask=feature_mask,
                                     verbose=1,
                                     zscore_run=True)

    # load events data, split by object within structure learning block
    vols = rsa.load_vol_info(study_dir, subject)
    events = vols.query('sequence_type > 0').copy()
    n_item = events['trial_type'].nunique()
    events['trial_type'] = (events['trial_type'] +
                            (events['sequence_type'] - 1) * n_item)

    # set up BRSA model
    model = brsa.GBRSA()
    n_ev = 21 * 2
    n_vol = ds.shape[0]
    mat, nuisance, scan_onsets = rsa.create_brsa_matrix(
        subject_dir, events, n_vol)
    m = mvpa.ItemBRSA(model, n_ev, mat, nuisance, scan_onsets)

    # run searchlight
    sl = sphere_searchlight(m, radius=radius, nproc=n_proc)
    sl_map = sl(ds)

    # save included voxels map
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)
    nifti_include = map2nifti(ds, sl_map[-1])
    include_file = os.path.join(res_dir, 'included.nii.gz')
    nifti_include.to_filename(include_file)

    # save pairwise correlations as a timeseries
    filepath = os.path.join(res_dir, 'stat.nii.gz')
    nifti = map2nifti(ds, sl_map[:-1])
    nifti.to_filename(filepath)
def split_mask(mask_image, workdir):
    """
    Split a mask with multiple ROIs into seperate files for each ROI.
    Saves them in 'workdir/roi_masks'.
    returns a list of paths for each produced mask file.
    """

    ms = fmri_dataset(mask_image)

    maskpath = os.path.join(workdir, 'roi_masks')

    if not os.path.exists(maskpath):
        os.makedirs(maskpath)

    # list of paths for each roi mask
    maskfiles = []

    # save seperate mask for each roi to file in workdir
    for roivalue in range(1, max(ms.samples[0]) + 1):
        msc = ms.copy()
        msc.samples[0][msc.samples[0] != roivalue] = False
        image = map2nifti(msc)
        maskfile = os.path.join(maskpath, 'roi{}_mask.nii.gz'.format(roivalue))
        image.to_filename(maskfile)
        maskfiles.append(maskfile)

    return maskfiles
Esempio n. 10
0
def test_searchlight_cross_decoding(path, subjects, conf_file, type, **kwargs):
    
    conf = read_configuration(path, conf_file, type)
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'radius':
            radius = kwargs[arg]
    
    
    debug.active += ["SLC"]
    
    ds_merged = get_merged_ds(path, subjects, conf_file, type, **kwargs)
    
    clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
    cv = CrossValidation(clf, NFoldPartitioner(attr='task'))
    
    maps = []
    
    for ds in ds_merged:
                
        ds.targets[ds.targets == 'point'] = 'face'
        ds.targets[ds.targets == 'saccade'] = 'place'
        
        sl = sphere_searchlight(cv, radius, space = 'voxel_indices')
    
        sl_map = sl(ds)
    
        sl_map.samples *= -1
        sl_map.samples +=  1
    
        nif = map2nifti(sl_map, imghdr=ds.a.imghdr)
        
        maps.append(nif)
        
        
    datetime = get_time()
    analysis = 'cross_searchlight'
    mask = conf['mask_area']
    task = type
    
    new_dir = datetime+'_'+analysis+'_'+mask+'_'+task
    command = 'mkdir '+os.path.join(path, '0_results', new_dir)
    os.system(command)
    
    parent_dir = os.path.join(path, '0_results', new_dir)
    
    for s, map in zip(subjects, maps):
        name = s
        command = 'mkdir '+os.path.join(parent_dir, name)
        os.system(command)
        
        results_dir = os.path.join(parent_dir, name)
        fname = name+'_radius_'+str(radius)+'_searchlight_map.nii.gz'
        map.to_filename(os.path.join(results_dir, fname))
        
    
    return maps
Esempio n. 11
0
def test_nifti_dataset_from3_d():
    """Test NiftiDataset based on 3D volume(s)
    """
    tssrc = os.path.join(pymvpa_dataroot, 'bold.nii.gz')
    masrc = os.path.join(pymvpa_dataroot, 'mask.nii.gz')

    # Test loading of 3D volumes
    # by default we are enforcing 4D, testing here with the demo 3d mask
    ds = fmri_dataset(masrc, mask=masrc, targets=1)
    assert_equal(len(ds), 1)

    import nibabel
    plain_data = nibabel.load(masrc).get_data()
    # Lets check if mapping back works as well
    assert_array_equal(plain_data,
                       map2nifti(ds).get_data().reshape(plain_data.shape))

    # test loading from a list of filenames

    # for now we should fail if trying to load a mix of 4D and 3D volumes
    # TODO: nope -- it should work and we should test here if correctly
    dsfull_plusone = fmri_dataset((masrc, tssrc), mask=masrc, targets=1)

    # Lets prepare some custom NiftiImage
    dsfull = fmri_dataset(tssrc, mask=masrc, targets=1)
    assert_equal(len(dsfull) + 1, len(dsfull_plusone))
    assert_equal(dsfull.nfeatures, dsfull_plusone.nfeatures)
    # skip 3d mask in 0th sample

    assert_array_equal(dsfull.samples, dsfull_plusone[1:].samples)
    ds_selected = dsfull[3]
    nifti_selected = map2nifti(ds_selected)

    # Load dataset from a mix of 3D volumes
    # (given by filenames and NiftiImages)
    labels = [123, 2, 123]
    ds2 = fmri_dataset((masrc, masrc, nifti_selected),
                       mask=masrc,
                       targets=labels)
    assert_equal(ds2.nsamples, 3)
    assert_array_equal(ds2.samples[0], ds2.samples[1])
    assert_array_equal(ds2.samples[2], dsfull.samples[3])
    assert_array_equal(ds2.targets, labels)
Esempio n. 12
0
def perm_hist(subj):
	conf = AnalysisConfiguration()
	data_dir = os.environ.get('DATA_DIR') or '/home/user/data'
	sub_dir = _opj(data_dir,conf.study_name,'sub{:0>3d}'.format(subj))
	directory = _opj(data_dir,'LP/sub{:0>3d}/results/'.format(subj))
	print conf.dir_name()
	for pair in conf.conditions_to_compare:
			#print _opj(directory,conf.dir_name(),'{}*{}{}*.p'.format(conf.mask_name,pair[0],pair[1]))
			files = sorted(glob(_opj(directory,conf.dir_name(),'{}*{}{}*.p'.format(conf.mask_name,pair[0],pair[1]))))
			plt.figure()
			plt.subplot(211)
			plt.title('sub{:0>3d}-{}{}'.format(subj,pair[0],pair[1]))
			print pair, " ", len(files)
			all_maps = []
			for f in files[:-1]:
				f_h = file(f,'r')
				m = pickle.load(f_h)
				all_maps.append(m)
				if 'perm' in f:
					color = 'black'
					line_width = 1
				else:
					color = 'crimson'
					line_width = 2
				plt.hist(np.transpose(m),bins=20,histtype='step',color=[color], lw = line_width)
			perms = vstack(all_maps)
			real_f = files[-1]
			f_h = file(real_f,'r')
			real_map = pickle.load(f_h)
			color = 'crimson'
			line_width = 2
			plt.hist(np.transpose(real_map),bins=20,histtype='step',color=[color], lw = line_width)
			percentiles = np.zeros((1,len(real_map.samples[0])))
			for i,vox in enumerate(real_map.samples[0]):
			    percentiles[0,i]=percentileofscore(perms[:,i].samples.flat,vox)
			plt.subplot(212)
			print len(percentiles[0])
			plt.hist(percentiles[0],bins=20,histtype='step')
			real_map.samples=percentiles
			nii = real_f.replace("_sl_map.p", "-acc.nii.gz")
			nii_file = nib.load(nii)
			perc_results = map2nifti(real_map, imghdr=nii_file.header)
			perc_nii_filename =real_f.replace("_sl_map.p", "-percentiles_sub{:0>3d}.nii.gz".format(subj))
			perc_results.to_filename(perc_nii_filename)
			thr_prc_filename = perc_nii_filename.replace(".nii.gz","_p0.01.nii.gz")
			thr = fsl.maths.Threshold(in_file=perc_nii_filename, thresh=100,
						  out_file=thr_prc_filename)
			thr.run()
			mni_thr_filename = thr_prc_filename.replace(".nii.gz","_mni.nii.gz")
			apply_warp(sub_dir,thr_prc_filename, mni_thr_filename)

			
	plt.show()
	#plt.savefig('/tmp/sub{:0>3d}_{}{}'.format(subj,pair[0],pair[1]))
	raw_input()
Esempio n. 13
0
def test_searchlight_cross_decoding(path, subjects, conf_file, type, **kwargs):

    conf = read_configuration(path, conf_file, type)

    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'radius':
            radius = kwargs[arg]

    debug.active += ["SLC"]

    ds_merged = get_merged_ds(path, subjects, conf_file, type, **kwargs)

    clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
    cv = CrossValidation(clf, NFoldPartitioner(attr='task'))

    maps = []

    for ds in ds_merged:

        ds.targets[ds.targets == 'point'] = 'face'
        ds.targets[ds.targets == 'saccade'] = 'place'

        sl = sphere_searchlight(cv, radius, space='voxel_indices')

        sl_map = sl(ds)

        sl_map.samples *= -1
        sl_map.samples += 1

        nif = map2nifti(sl_map, imghdr=ds.a.imghdr)

        maps.append(nif)

    datetime = get_time()
    analysis = 'cross_searchlight'
    mask = conf['mask_area']
    task = type

    new_dir = datetime + '_' + analysis + '_' + mask + '_' + task
    command = 'mkdir ' + os.path.join(path, '0_results', new_dir)
    os.system(command)

    parent_dir = os.path.join(path, '0_results', new_dir)

    for s, map in zip(subjects, maps):
        name = s
        command = 'mkdir ' + os.path.join(parent_dir, name)
        os.system(command)

        results_dir = os.path.join(parent_dir, name)
        fname = name + '_radius_' + str(radius) + '_searchlight_map.nii.gz'
        map.to_filename(os.path.join(results_dir, fname))

    return maps
Esempio n. 14
0
def test_nifti_dataset_from3_d():
    """Test NiftiDataset based on 3D volume(s)
    """
    tssrc = pathjoin(pymvpa_dataroot, 'bold.nii.gz')
    masrc = pathjoin(pymvpa_dataroot, 'mask.nii.gz')

    # Test loading of 3D volumes
    # by default we are enforcing 4D, testing here with the demo 3d mask
    ds = fmri_dataset(masrc, mask=masrc, targets=1)
    assert_equal(len(ds), 1)

    import nibabel
    plain_data = nibabel.load(masrc).get_data()
    # Lets check if mapping back works as well
    assert_array_equal(plain_data,
                       map2nifti(ds).get_data().reshape(plain_data.shape))

    # test loading from a list of filenames

    # for now we should fail if trying to load a mix of 4D and 3D volumes
    # TODO: nope -- it should work and we should test here if correctly
    dsfull_plusone = fmri_dataset((masrc, tssrc), mask=masrc, targets=1)

    # Lets prepare some custom NiftiImage
    dsfull = fmri_dataset(tssrc, mask=masrc, targets=1)
    assert_equal(len(dsfull) + 1, len(dsfull_plusone))
    assert_equal(dsfull.nfeatures, dsfull_plusone.nfeatures)
    # skip 3d mask in 0th sample

    assert_array_equal(dsfull.samples, dsfull_plusone[1:].samples)
    ds_selected = dsfull[3]
    nifti_selected = map2nifti(ds_selected)

    # Load dataset from a mix of 3D volumes
    # (given by filenames and NiftiImages)
    labels = [123, 2, 123]
    ds2 = fmri_dataset((masrc, masrc, nifti_selected),
                       mask=masrc, targets=labels)
    assert_equal(ds2.nsamples, 3)
    assert_array_equal(ds2.samples[0], ds2.samples[1])
    assert_array_equal(ds2.samples[2], dsfull.samples[3])
    assert_array_equal(ds2.targets, labels)
Esempio n. 15
0
    def searchlight(self, ds, cvte):
        
        sl = sphere_searchlight(cvte, 
                                radius= self._radius, 
                                space = 'voxel_indices')            
        sl_map = sl(ds)
        sl_map.samples *= -1
        sl_map.samples +=  1
        
        map_ = map2nifti(sl_map, imghdr=ds.a.imghdr)
        map_ = ni.Nifti1Image(map_.get_data(), affine=ds.a.imgaffine)

        self.maps.append(map_)
        
        return map_
Esempio n. 16
0
	def _forward_dataset(self,ds):
		
		if self._neighbors is None and ds.samples.max()== ds.samples.min():
			raise RuntimeError("The dataset is homogeneity, so the neighbors which offer spatial constrain cannot be null.")
		img = map2nifti(ds)
		mds = ds.copy(deep = Flase)
		mds.samples = ds.samples.copy()
		
		for c in mds.sa[chunks_attr].unique:
			slicer = np.where(mds.sa[chunks_attr].value == c)[0]
		mds.samples[slicer] = self._graph(mds.samples[slicer],np.shape(img))
		mds.a['DsType'] = 'graph'
		mds.a['header'] = img.get_header()
		mds.a['dims'] = np.shape(img)
		return mds	
Esempio n. 17
0
def test_regress_fmri_dataset(tempfile=None, testfile=None):
    if not externals.exists('nibabel'):
        raise SkipTest("can't test without nibabel")

    # verify that we have actual load
    if not (exists(testfile) and exists(realpath(testfile))):
        raise SkipTest("File %s seems to be missing -- 'git annex get .' "
                       "to fetch all test files first" % testfile)
    # Still might be a direct mode, or windows -- so lets check the size
    if os.stat(testfile).st_size < 1000:
        raise SkipTest("File %s seems to be small/empty -- 'git annex get .' "
                       "to fetch all test files first" % testfile)

    from mvpa2.datasets.mri import map2nifti

    ds = h5load(testfile)  # load previously generated dataset
    # rudimentary checks that data was loaded correctly
    assert_equal(np.sum(ds), 11444)
    assert_equal(sorted(ds.sa.keys()),
                 ['chunks', 'targets', 'time_coords', 'time_indices'])
    assert_equal(sorted(ds.fa.keys()), ['voxel_indices'])

    # verify that map2nifti works whenever version of nibabel on the system
    # greater or equal that one it was saved with:
    if externals.versions['nibabel'] >= ds.a.versions['nibabel']:
        # test that we can get str of the niftihdr:
        # to avoid such issues as https://github.com/PyMVPA/PyMVPA/issues/278
        hdr_str = str(ds.a.imghdr)
        assert(hdr_str != "")
        ds_ni = map2nifti(ds)
        # verify that we can store generated nifti to a file
        ds_ni.to_filename(tempfile)
        assert(os.path.exists(tempfile))
    else:
        raise SkipTest(
            "Our version of nibabel %s is older than the one file %s was saved "
            "with: %s" % (externals.versions['nibabel'],
                          testfile,
                          ds.a.versions['nibabel']))
Esempio n. 18
0
def test_regress_fmri_dataset(tempfile=None, testfile=None):
    if not externals.exists('nibabel'):
        raise SkipTest("can't test without nibabel")

    # verify that we have actual load
    if not (exists(testfile) and exists(realpath(testfile))):
        raise SkipTest("File %s seems to be missing -- 'git annex get .' "
                       "to fetch all test files first" % testfile)
    # Still might be a direct mode, or windows -- so lets check the size
    if os.stat(testfile).st_size < 1000:
        raise SkipTest("File %s seems to be small/empty -- 'git annex get .' "
                       "to fetch all test files first" % testfile)

    from mvpa2.datasets.mri import map2nifti

    ds = h5load(testfile)  # load previously generated dataset
    # rudimentary checks that data was loaded correctly
    assert_equal(np.sum(ds), 11444)
    assert_equal(sorted(ds.sa.keys()),
                 ['chunks', 'targets', 'time_coords', 'time_indices'])
    assert_equal(sorted(ds.fa.keys()), ['voxel_indices'])

    # verify that map2nifti works whenever version of nibabel on the system
    # greater or equal that one it was saved with:
    if externals.versions['nibabel'] >= ds.a.versions['nibabel']:
        # test that we can get str of the niftihdr:
        # to avoid such issues as https://github.com/PyMVPA/PyMVPA/issues/278
        hdr_str = str(ds.a.imghdr)
        assert (hdr_str != "")
        ds_ni = map2nifti(ds)
        # verify that we can store generated nifti to a file
        ds_ni.to_filename(tempfile)
        assert (os.path.exists(tempfile))
    else:
        raise SkipTest(
            "Our version of nibabel %s is older than the one file %s was saved "
            "with: %s" % (externals.versions['nibabel'], testfile,
                          ds.a.versions['nibabel']))
def do_searchlight(glm_dataset, radius, output_basename, with_null_prob=False, clf=LinearCSVMC(space='condition')):
    if(len(glob(output_basename+"*")) > 0):
        print "sl already ran"
        return
    splt = ChainNode([NFoldPartitioner(),Balancer(attr='condition',count=1,limit='partitions',apply_selection=True)],space='partitions')
    #splt = NFoldPartitioner()
    cv = CrossValidation(clf, splt,
                         errorfx=mean_match_accuracy,
                         enable_ca=['stats'], postproc=mean_sample())
    distr_est = []
    if with_null_prob:
        permutator = AttributePermutator('condition', count=100,
                                         limit='chunks')
        distr_est = MCNullDist(permutator, tail='left',
                               enable_ca=['dist_samples'])
        """
        repeater   = Repeater(count=100)
        permutator = AttributePermutator('condition', limit={'partitions': 1}, count=1)
        null_cv = CrossValidation(clf, ChainNode([splt, permutator],space=splt.get_space()),
                      postproc=mean_sample())
        null_sl = sphere_searchlight(null_cv, radius=radius, space='voxel_indices',
                         enable_ca=['roi_sizes'])
        distr_est = MCNullDist(repeater,tail='left', measure=null_sl,
                       enable_ca=['dist_samples'])

        sl = sphere_searchlight(cv, radius=radius, space='voxel_indices',
                                null_dist=distr_est,
                                enable_ca=['roi_sizes', 'roi_feature_ids']
                                # ,result_fx = _fill_in_scattered_results # average across all spheres
                                )
        """
    else:
        kwa = {'voxel_indices': KNNNeighbourhood(radius, glm_dataset.fa['voxel_indices'])}
        qe = IndexQueryEngine(**kwa)
        # init the searchlight with the queryengine
        sl = Searchlight(cv, queryengine=qe, roi_ids=None,
                       enable_ca=['roi_sizes', 'roi_feature_ids']
                       # ,results_fx = _fill_in_scattered_results # average across all spheres
                          )
       #;v sl = sphere_searchlight(cv, radius=radius, space='voxel_indices',

                                # ,result_fx = _fill_in_scattered_results # average across all spheres
                               # )
    # ds = glm_dataset.copy(deep=False,
    #		       sa=['condition','chunks'],
    #		       fa=['voxel_indices'],
    #		       a=['mapper'])
    from datetime import datetime
    print "starting sl {}".format(datetime.now())
    sl_map = sl(glm_dataset)
    print "finished sl {}".format(datetime.now())
    import pickle
    pickle.dump(sl_map, open("{}_sl_map.p".format(output_basename), "wb"))
#    pickle.dump(sl.ca.roi_feature_ids, open("{}_sl_feature_ids.p".format(output_basename), "wb"))
#    print len(sl.ca.roi_feature_ids[0])
    acc_results = map2nifti(sl_map,
                           imghdr=glm_dataset.a.imghdr)
    acc_nii_filename = '{}-acc.nii.gz'.format(output_basename)
    acc_results.to_filename(acc_nii_filename)
    sl_map.samples *= -1
    sl_map.samples += 1
    niftiresults = map2nifti(sl_map,
                             imghdr=glm_dataset.a.imghdr)
    niftiresults.to_filename('{}-err.nii.gz'.format(output_basename))
    # TODO: check p value map
    if with_null_prob:
        nullt_results = map2nifti(sl_map, data=sl.ca.null_t,
                                  imghdr=glm_dataset.a.imghdr)
        nullt_results.to_filename('{}-t.nii.gz'.format(output_basename))
        nullprob_results = map2nifti(sl_map, data=sl.ca.null_prob,
                                     imghdr=glm_dataset.a.imghdr)
        nullprob_results.to_filename('{}-prob.nii.gz'.format(output_basename))
        nullprob_results = map2nifti(sl_map, data=distr_est.cdf(sl_map.samples),
                                     imghdr=glm_dataset.a.imghdr)
        nullprob_results.to_filename('{}-cdf.nii.gz'.format(output_basename))
    return sl_map
Esempio n. 20
0
    perm_count.append(len(sds) - 1)

print 'Merge data', time.asctime()
orig_ds = Dataset(orig, sa=dict(subj=subj_ids), fa=sds.fa, a=sds.a)
perm_ds = Dataset(np.vstack(perms),
                  sa=dict(subj=np.repeat(subj_ids, perm_count)),
                  fa=sds.fa,
                  a=sds.a)
# some magic to drop the memory demand
del orig
del perms

print 'Train thresholder', time.asctime()
thr = GroupClusterThreshold(n_bootstrap=10000,
                            chunk_attr='subj',
                            n_blocks=100,
                            feature_thresh_prob=0.001,
                            n_proc=1,
                            fwe_rate=0.05)
thr.train(perm_ds)

print 'Treshold', time.asctime()
res = thr(orig_ds)
h5save('grpavg_stats.hdf5', res, compression=9)

print 'Store results', time.asctime()
nb.save(map2nifti(res, res.samples), 'avg_acc.nii.gz')
nb.save(map2nifti(res, res.fa.clusters_fwe_thresh),
        'fwecorrected_clusters.nii.gz')
nb.save(map2nifti(res, res.fa.featurewise_thresh), 'featurewise_thresh.nii.gz')
    'background' : os.path.join(sessionPath,'/home/brain/host/pymvpaniifiles/anat.nii.gz'),
    'background_mask' : os.path.join(sessionPath,'/home/brain/host/pymvpaniifiles/mask_brain.nii.gz'),
    'overlay_mask' : os.path.join(sessionPath,'analyze/structural/lc2ms_deskulled.hdr'),
    'do_stretch_colors' : False,
    'cmap_bg' : 'gray',
    'cmap_overlay' : 'autumn', # pl.cm.autumn
    'interactive' : cfg.getboolean('examples', 'interactive', True),
    }

for radius in [3]:
# tell which one we are doing
    print 'Running searchlight with radius: %i ...' % (radius)

    sl = sphere_searchlight(foldwiseCvedAnovaSelectedSMLR, radius=radius, space='voxel_indices',
                            center_ids=center_ids,
                            postproc=mean_sample())

    ds = dataset.copy(deep=False,
                      sa=['targets', 'chunks'],
                      fa=['voxel_indices'],
                      a=['mapper'])

    sl_map = sl(ds)
    sl_map.samples *= -1
    sl_map.samples += 1

    niftiresults = map2nifti(sl_map, imghdr=dataset.a.imghdr)
    niftiresults.to_filename(os.path.join(sessionPath,'analyze/functional/%s-grey-searchlight.nii') % classificationName)
    print 'Best performing sphere error:', np.min(sl_map.samples)

Esempio n. 22
0
def runsub(sub, thisContrast, r, dstype="raw", roi="grayMatter", filterLen=49, filterOrd=3, write=False):

    if dstype == "raw":
        outdir = "PyMVPA"
        print "working with raw data"
        thisSub = {sub: subList[sub]}
        dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi)
        thisDS = dsdict[sub]
        mc_params = lmvpa.loadmotionparams(paths, thisSub)
        beta_events = lmvpa.loadevents(paths, thisSub)
        # savitsky golay filtering
        sg.sg_filter(thisDS, filterLen, filterOrd)
        # gallant group zscores before regression.

        # zscore w.r.t. rest trials
        # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
        # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
        zscore(thisDS, chunks_attr="chunks")
        print "beta extraction"
        ## BETA EXTRACTION ##
        rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub])
        evds = er.fit_event_hrf_model(
            rds,
            events,
            time_attr="time_coords",
            condition_attr=("trial_type", "chunks"),
            design_kwargs={"add_regs": mc_params[sub], "hrf_model": "canonical"},
            return_model=True,
        )

        fds = lmvpa.replacetargets(evds, contrasts, thisContrast)
        fds = fds[fds.targets != "0"]
    else:
        outdir = os.path.join("LSS", dstype)
        print "loading betas"
        fds = lmvpa.loadsubbetas(paths, sub, btype=dstype, m=roi)
        fds.sa["targets"] = fds.sa[thisContrast]
        zscore(fds, chunks_attr="chunks")

    fds = lmvpa.sortds(fds)
    print "searchlights"
    ## initialize classifier
    clf = svm.LinearNuSVMC()
    cv = CrossValidation(clf, NFoldPartitioner())
    from mvpa2.measures.searchlight import sphere_searchlight

    cvSL = sphere_searchlight(cv, radius=r)

    # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs
    lidx = fds.chunks < fds.sa["chunks"].unique[len(fds.sa["chunks"].unique) / 2]
    pidx = fds.chunks >= fds.sa["chunks"].unique[len(fds.sa["chunks"].unique) / 2]

    lres = sl.run_cv_sl(cvSL, fds[lidx].copy(deep=False))
    pres = sl.run_cv_sl(cvSL, fds[pidx].copy(deep=False))

    if write:
        from mvpa2.base import dataset

        map2nifti(fds, dataset.vstack([lres, pres])).to_filename(
            os.path.join(paths[0], "Maps", outdir, sub + "_" + roi + "_" + thisContrast + "_cvsl.nii.gz")
        )

    del lres, pres, cvSL

    cvSL = sphere_searchlight(cv, radius=r)
    crossSet = fds.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cres = sl.run_cv_sl(cvSL, crossSet.copy(deep=False))
    if write:
        map2nifti(fds, cres[0]).to_filename(
            os.path.join(paths[0], "Maps", outdir, sub + "_" + roi + "_" + (thisContrast) + "_P2L.nii.gz")
        )
        map2nifti(fds, cres[1]).to_filename(
            os.path.join(paths[0], "Maps", outdir, sub + "_" + roi + "_" + (thisContrast) + "_L2P.nii.gz")
        )
Esempio n. 23
0
def runsub(sub,
           thisContrast,
           filterLen,
           filterOrd,
           thisContrastStr,
           roi='grayMatter'):
    thisSub = {sub: subList[sub]}
    mc_params = lmvpa.loadmotionparams(paths, thisSub)
    beta_events = lmvpa.loadevents(paths, thisSub)
    dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
    thisDS = dsdict[sub]

    # savitsky golay filtering
    sg.sg_filter(thisDS, filterLen, filterOrd)
    # gallant group zscores before regression.

    # zscore w.r.t. rest trials
    # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
    # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
    zscore(thisDS, chunks_attr='chunks')

    # kay method: leave out a model run, use it to fit an HRF for each voxel
    # huth method: essentially use FIR
    # mumford method: deconvolution with canonical HRF

    # refit events and regress...
    # get timing data from timing files
    rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub],
                                     contrasts)  # adding features

    # we can model out motion and just not use those betas.
    if isinstance(thisContrast, basestring):
        thisContrast = [thisContrast]
    # instead of binarizing each one, make them parametric
    desX, rds = lmvpa.make_designmat(rds,
                                     events,
                                     time_attr='time_coords',
                                     condition_attr=thisContrast,
                                     design_kwargs={
                                         'hrf_model': 'canonical',
                                         'drift_model': 'blank'
                                     },
                                     regr_attrs=None)
    # want to collapse ap and cr, but have anim separate
    des = lmvpa.make_parammat(desX)

    # set chunklen and nchunks
    # split by language and pictures
    lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(
        thisDS.sa['chunks'].unique) / 2]
    pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(
        thisDS.sa['chunks'].unique) / 2]
    ldes = cp.copy(des)
    pdes = cp.copy(des)

    ldes.matrix = ldes.matrix[lidx]
    pdes.matrix = pdes.matrix[pidx]

    lwts, lres, lceil = bsr.bootstrap_linear(rds[lidx],
                                             ldes,
                                             part_attr='chunks',
                                             mode='test')
    pwts, pres, pceil = bsr.bootstrap_linear(rds[pidx],
                                             pdes,
                                             part_attr='chunks',
                                             mode='test')

    # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs
    print 'language ' + str(np.mean(lres))

    # pictures within
    print 'pictures: ' + str(np.mean(pres))
    from mvpa2.base import dataset
    map2nifti(thisDS, dataset.vstack([lres, pres])) \
        .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                  '_univar_corr.nii.gz'))
    map2nifti(thisDS, dataset.vstack([lwts, pwts])) \
        .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                  '_univar_betas.nii.gz'))
    map2nifti(thisDS, dataset.vstack([lceil, pceil])) \
        .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                  '_univar_ceiling.nii.gz'))
    del lres, pres, lwts, pwts, lceil, pceil

    crossSet = thisDS.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cwts, cres, cceil = bsr.bootstrap_linear(crossSet,
                                             des,
                                             part_attr='chunks',
                                             mode='test')
    print 'cross: ' + str(np.mean(cres))

    map2nifti(thisDS, cres[0]).to_filename(
        os.path.join(
            paths[0], 'Maps', 'Encoding',
            sub + '_' + roi + '_' + thisContrastStr + '_P2L_univar.nii.gz'))
    map2nifti(thisDS, cres[1]).to_filename(
        os.path.join(
            paths[0], 'Maps', 'Encoding',
            sub + '_' + roi + '_' + thisContrastStr + '_L2P_univar.nii.gz'))

    map2nifti(thisDS, cwts[0]).to_filename(
        os.path.join(
            paths[0], 'Maps', 'Encoding',
            sub + '_' + roi + '_' + thisContrastStr + '_P2L_betas.nii.gz'))
    map2nifti(thisDS, cwts[1]).to_filename(
        os.path.join(
            paths[0], 'Maps', 'Encoding',
            sub + '_' + roi + '_' + thisContrastStr + '_L2P_betas.nii.gz'))

    map2nifti(thisDS, cceil[0]).to_filename(
        os.path.join(
            paths[0], 'Maps', 'Encoding',
            sub + '_' + roi + '_' + thisContrastStr + '_P2L_betas.nii.gz'))
    map2nifti(thisDS, cceil[1]).to_filename(
        os.path.join(
            paths[0], 'Maps', 'Encoding',
            sub + '_' + roi + '_' + thisContrastStr + '_L2P_betas.nii.gz'))
    print 'Loading subj %i' % subj
    sds = h5load(_opj('grp_results', 'grpspace_sub%.3i.hdf5' % subj))
    orig.append(sds.samples[0])
    perms.append(sds.samples[1:])
    perm_count.append(len(sds) - 1)

print 'Merge data', time.asctime()
orig_ds = Dataset(orig, sa=dict(subj=subj_ids), fa=sds.fa, a=sds.a)
perm_ds = Dataset(np.vstack(perms),
                  sa=dict(subj=np.repeat(subj_ids, perm_count)),
                  fa=sds.fa,
                  a=sds.a)
# some magic to drop the memory demand
del orig
del perms

print 'Train thresholder', time.asctime()
thr = GroupClusterThreshold(
    n_bootstrap=10000, chunk_attr='subj', n_blocks=100,
    feature_thresh_prob=0.001, n_proc=1, fwe_rate=0.05)
thr.train(perm_ds)

print 'Treshold', time.asctime()
res = thr(orig_ds)
h5save('grpavg_stats.hdf5', res, compression=9)

print 'Store results', time.asctime()
nb.save(map2nifti(res, res.samples), 'avg_acc.nii.gz')
nb.save(map2nifti(res, res.fa.clusters_fwe_thresh), 'fwecorrected_clusters.nii.gz')
nb.save(map2nifti(res, res.fa.featurewise_thresh), 'featurewise_thresh.nii.gz')
Esempio n. 25
0
        # get onsets
        spec = get_onsets_famface(
            os.path.join(data_basedir, sub, 'model/model001/onsets', run),
            amplitudes)

        # increment amplitude for unfam
        amplitudes[1] += 0.5

        # create second specification for contrast that does not increase
        # in different roi
        straightspec = copy.deepcopy(spec)
        straightspec[0]['amplitude'] = 8
        straightspec[1]['amplitude'] = 1
        for cond in straightspec:
            cond['roivalue'] = 24
            spec.append(cond)

        # transform mask to subject space
        mask_subjspace = mask2subjspace_real(maskpath, anat, boldfile,
                                             mni2anat_hd5, affine_matrix,
                                             workdir)

        # add signal
        with_signal = add_signal_custom(noise, mask_subjspace, spec)

        # save data
        image = map2nifti(with_signal)
        image.to_filename(
            os.path.join(data_basedir, sub, 'BOLD', run, 'sim.nii.gz'))
Esempio n. 26
0
def main(subject,
         study_dir,
         mask,
         feature_mask,
         models,
         category,
         res_name,
         suffix='_stim_fix2',
         radius=3,
         n_perm=1000,
         n_proc=None):
    from mvpa2.mappers.zscore import zscore
    from mvpa2.mappers.fx import mean_group_sample
    from mvpa2.measures.searchlight import sphere_searchlight
    from mvpa2.datasets.mri import map2nifti
    from wikisim import mvpa

    # load subject data
    sp = su.SubjPath(subject, study_dir)
    vols = task.prex_vols(sp.path('behav', 'log'))

    # load fmri data
    ds = mvpa.load_prex_beta(sp,
                             suffix,
                             mask,
                             feature_mask=feature_mask,
                             verbose=1)

    # zscore
    ds.sa['run'] = vols.run.values
    zscore(ds, chunks_attr='run')

    # average over item presentations
    ds.sa['itemno'] = vols.itemno.to_numpy()
    m = mean_group_sample(['itemno'])
    dsm = ds.get_mapped(m)

    # get items of interest
    if category == 'face':
        cond = [1, 2]
    elif category == 'scene':
        cond = [3, 4]
    else:
        ValueError(f'Invalid category code: {category}')
    include = vols.groupby('itemno').first()['cond'].isin(cond)

    # get models of interest
    model_dir = os.path.join(study_dir, 'batch', 'models3')
    model_names = models.split('-')
    model_rdms_dict = model.load_category_rdms(model_dir, category,
                                               model_names)
    model_rdms = [model_rdms_dict[name] for name in model_names]

    # set up searchlight
    m = mvpa.ItemPartialRSA(model_rdms, n_perm)
    sl = sphere_searchlight(m, radius=radius, nproc=n_proc)
    sl_map = sl(dsm[include])

    nifti_include = map2nifti(ds, sl_map[-1])
    for i, name in enumerate(model_names):
        # save zstat map
        res_dir = sp.path('rsa', f'{res_name}_{name}')
        if not os.path.exists(res_dir):
            os.makedirs(res_dir)
        filepath = os.path.join(res_dir, 'zstat.nii.gz')
        nifti = map2nifti(ds, sl_map[i])
        nifti.to_filename(filepath)

        # save mask of included voxels
        include_file = os.path.join(res_dir, 'included.nii.gz')
        nifti_include.to_filename(include_file)
Esempio n. 27
0





if __name__ == "__main__":
#	datadir = '../data'
	targnii = 'prob-face-object.nii.gz'
    	masknii = 'prob-face-object.nii.gz'
	mask = nib.load(masknii)
	mask = mask.get_data()
#	ds = bpfmri_dataset(pjoin(datadir,targnii))
	ds = bpfmri_dataset(targnii)
	nbs = neighbor.volneighbors(mask, 3,26 )
    	nbs = nbs.compute_offsets()
	map = GraphMapper(nbs)
	nds = map(ds)
#	assert_array_equal(data, map2nifti(nds).get_data()[...,0])
	result = map2nifti(nds)










Esempio n. 28
0
def spectral_seg(hfilename,outf):
    '''
    Spectral clustering...
    '''
    tmpset = Dataset([])
    #pdb.set_trace()
    print "hdf name:",hfilename
    st =  time.time()
    ###1.load connectivity profile of seed mask voxels
    conn = h5load(hfilename)
    tmpset.a = conn.a
    print "connection matrix shape:"
    print conn.shape
    ###2.features select
    mask = create_mask(conn.samples,5)
    conn_m = conn.samples[mask]
    map = conn_m.T
    print "masked conn matrix:"
    print map.shape,map.max(),map.min()
    
    ###3.average the connection profile.
    temp = np.zeros(map.shape)
    voxel = np.array(conn.fa.values())
    v = voxel[0]
    v = v.tolist()
    
    shape = [256,256,256]
    
    i = 0
    for coor in v:
        mean_f = map[i]
        #print mean_f.shape
        #plt.plot(mean_f)
        #plt.show()
        
        neigh =get_neighbors(coor,2,shape)
        #print "neigh:",neigh

        count = 1
        for n in neigh:
            if n in v:
               mean_f = (mean_f*count + map[v.index(n)])/(count+1)
               count+=1

        temp[i] = mean_f
        i+=1
    #sys.exit(0)
    map = temp
    print "average connection matrix"
    
    ###4.spacial distance
    spacedist = ds.cdist(v,v,'euclidean') 
    #print spacedist
    
    ###5.correlation matrix
    corr = np.corrcoef(map)
    corr = np.abs(corr)
    
    ###6.mix similariry matrix.
    corr = 0.7*corr + 0.3/(spacedist+1)
    #plt.imshow(corr,interpolation='nearest',cmap=cm.jet)
    #cb = plt.colorbar() 
    #pl.xticks(())
    #pl.yticks(())
    #pl.show()
    print "mix up the corr and spacial matrix"
    
    #sys.exit(0)
    ###7.spectral segmentation    
    print "do segmentation"
    cnum = 3
    near = 100
    sc = SpectralClustering(cnum,'arpack',None,100,1,'precomputed',near,None,True)
    sc.fit_predict(corr)
    
    tmpset.samples = sc.labels_+1
    print "Number of voxels: ", sc.labels_.size
    print "Number  of clusters: ", np.unique(sc.labels_).size
    print "Elapsed time: ", time.time() - st
    
    ###8.save the segmentation result.
    print "save the result to xxx_parcel.nii.gz"
    result = map2nifti(tmpset)
    result.to_filename(outf)
    print ".....Segment end........"
    
    return True
# orig results
ds = h5load(_opj('results', 'sub%.3i_2.0mm_hrf_sl_orig.hdf5' % subj))
# load permutations and merge with orig results
data = np.vstack([ds.samples[0]] + [
    np.load(fname) for fname in sorted(
        glob(_opj('results', 'sub%.3i_2.0mm_hrf_sl_perm*.npy' % subj)))
])
#data = ds.samples

# write out as NIfTI
tdir = mkdtemp()
print tdir
orig_fname = _opj(tdir, 'data_in_orig.nii.gz')
group_fname = _opj(tdir, 'data_in_group.nii.gz')
nb.save(map2nifti(ds, data, imghdr=subjtmpl.get_header()), orig_fname)

# project into group space
ref_fname = "BASEDIR/templates/grpbold7Tp1/brain.nii.gz"
warp_fname = "BASEDIR/sub%.3i/templates/bold7Tp1/in_grpbold7Tp1/subj2tmpl_warp.nii.gz" % subj
subprocess.check_call([
    "applywarp",
    "--in=%s" % orig_fname,
    "--out=%s" % group_fname,
    "--ref=%s" % ref_fname,
    "--warp=%s" % warp_fname
])

# and back into a dataset
# group intersection brain mask
mask_fname = 'BASEDIR/templates/grpbold7Tp1/qa/subjbold7Tp1_to_grpbold7Tp1/brain_mask_intersection.nii.gz'
Esempio n. 30
0
def do_searchlight(glm_dataset, radius, output_basename, with_null_prob=False):
    clf = LinearCSVMC(space='condition')
    #		clf = RbfCSVMC(C=5.0)
    splt = NFoldPartitioner()
    cv = CrossValidation(clf,
                         splt,
                         errorfx=mean_match_accuracy,
                         enable_ca=['stats'],
                         postproc=mean_sample())
    distr_est = []
    if with_null_prob:
        permutator = AttributePermutator('condition',
                                         count=100,
                                         limit='chunks')
        distr_est = MCNullDist(permutator,
                               tail='left',
                               enable_ca=['dist_samples'])
        """
		repeater   = Repeater(count=100)
		permutator = AttributePermutator('condition', limit={'partitions': 1}, count=1) 
		null_cv = CrossValidation(clf, ChainNode([splt, permutator],space=splt.get_space()),
					  postproc=mean_sample())
		null_sl = sphere_searchlight(null_cv, radius=radius, space='voxel_indices',
					     enable_ca=['roi_sizes'])
		distr_est = MCNullDist(repeater,tail='left', measure=null_sl,
				       enable_ca=['dist_samples'])
		"""
        sl = sphere_searchlight(cv,
                                radius=radius,
                                space='voxel_indices',
                                null_dist=distr_est,
                                enable_ca=['roi_sizes', 'roi_feature_ids'])
    else:

        sl = sphere_searchlight(cv,
                                radius=radius,
                                space='voxel_indices',
                                enable_ca=['roi_sizes', 'roi_feature_ids'])
    #ds = glm_dataset.copy(deep=False,
    #		       sa=['condition','chunks'],
    #		       fa=['voxel_indices'],
    #		       a=['mapper'])
    #debug.active += ["SLC"]
    sl_map = sl(glm_dataset)
    errresults = map2nifti(sl_map, imghdr=glm_dataset.a.imghdr)
    errresults.to_filename('{}-acc.nii.gz'.format(output_basename))
    sl_map.samples *= -1
    sl_map.samples += 1
    niftiresults = map2nifti(sl_map, imghdr=glm_dataset.a.imghdr)
    niftiresults.to_filename('{}-err.nii.gz'.format(output_basename))
    #TODO: save p value map
    if with_null_prob:
        nullt_results = map2nifti(sl_map,
                                  data=sl.ca.null_t,
                                  imghdr=glm_dataset.a.imghdr)
        nullt_results.to_filename('{}-t.nii.gz'.format(output_basename))
        nullprob_results = map2nifti(sl_map,
                                     data=sl.ca.null_prob,
                                     imghdr=glm_dataset.a.imghdr)
        nullprob_results.to_filename('{}-prob.nii.gz'.format(output_basename))
        nullprob_results = map2nifti(sl_map,
                                     data=distr_est.cdf(sl_map.samples),
                                     imghdr=glm_dataset.a.imghdr)
        nullprob_results.to_filename('{}-cdf.nii.gz'.format(output_basename))
Esempio n. 31
0
def runsub(sub, thisContrast, thisContrastStr, testContrast,
           filterLen, filterOrd, write=False, debug=False,
           alphas=1, roi='grayMatter'):
    thisSub = {sub: subList[sub]}
    mc_params = lmvpa.loadmotionparams(paths, thisSub)
    beta_events = lmvpa.loadevents(paths, thisSub)
    dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
    thisDS = dsdict[sub]

    # savitsky golay filtering
    sg.sg_filter(thisDS, filterLen, filterOrd)
    # gallant group zscores before regression.

    # zscore w.r.t. rest trials
    # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
    # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
    zscore(thisDS, chunks_attr='chunks')

    # kay method: leave out a model run, use it to fit an HRF for each voxel
    # huth method: essentially use FIR
    # mumford method: deconvolution with canonical HRF

    # get timing data from timing files
    rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts)  # adding features

    # we can model out motion and just not use those betas.
    # Ridge
    if isinstance(thisContrast, basestring):
        thisContrast = [thisContrast]
    desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast,
                                     design_kwargs={'hrf_model': 'canonical', 'drift_model': 'blank'},
                                     regr_attrs=None)
    # 'add_regs': mc_params[sub]

    desX['motion'] = make_dmtx(rds.sa['time_coords'].value, paradigm=None, add_regs=mc_params[sub], drift_model='blank')

    des = lmvpa.make_parammat(desX, hrf='canonical', zscore=True)

    # split by language and pictures
    lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    ldes = cp.copy(des)
    pdes = cp.copy(des)

    ldes.matrix = ldes.matrix[lidx]
    pdes.matrix = pdes.matrix[pidx]

    covarmat = None
    mus = None
    lwts, _, lres, lceil = bsr.bootstrap_ridge(ds=rds[lidx], des=ldes, chunklen=1, nchunks=1,
                                               cov0=None, mu0=None, part_attr='chunks', mode='test',
                                               alphas=[alphas[0]], single_alpha=True, normalpha=False,
                                               nboots=1, corrmin=.2, singcutoff=1e-10, joined=None,
                                               use_corr=True)
    print 'language ' + str(np.mean(lres))

    pwts, _, pres, pceil = bsr.bootstrap_ridge(ds=rds[pidx], des=pdes, chunklen=1, nchunks=1,
                                               cov0=None, mu0=None, part_attr='chunks', mode='test',
                                               alphas=[alphas[1]], single_alpha=True, normalpha=False,
                                               nboots=1, corrmin=.2, singcutoff=1e-10, joined=None,
                                               use_corr=True)

    # pictures within
    print 'pictures: ' + str(np.mean(pres))
    if write:
        map2nifti(thisDS, dataset.vstack([lres, pres])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_corrs.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lwts, pwts])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_wts.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lceil, pceil])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_ceiling.nii.gz'))

    for t in testContrast:
        tstr = '+'.join(t)
        lcorr = lmvpa.testmodel(wts=lwts, des=ldes, ds=rds[lidx], tc=cp.copy(t), use_corr=True)
        pcorr = lmvpa.testmodel(wts=pwts, des=pdes, ds=rds[pidx], tc=cp.copy(t), use_corr=True)
        if write:
            map2nifti(thisDS, dataset.vstack([lcorr, pcorr])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_test_corrs.nii.gz'))

    del lres, pres, lwts, pwts, lceil, pceil
    crossSet = thisDS.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    # cwts, cres, cceil = bsr.ridge(rds[pidx], pdes, mu0=mus, cov0=covarmat,
    #                                             part_attr='chunks', mode='test', alphas=alphas[0], single_alpha=True,
    #                                             normalpha=False, corrmin=.2, singcutoff=1e-10, joined=None,
    #                                             use_corr=True)
    cwts, _, cres, cceil = bsr.bootstrap_ridge(ds=crossSet, des=des, chunklen=1, nchunks=1,
                                               cov0=None, mu0=None, part_attr='chunks', mode='test',
                                               alphas=[alphas[2]], single_alpha=True, normalpha=False,
                                               nboots=1, corrmin=.2, singcutoff=1e-10, joined=None,
                                               use_corr=True)
    for t in testContrast:
        tstr = '+'.join(t)
        ccorr = lmvpa.testmodel(wts=cwts, des=des, ds=crossSet, tc=cp.copy(t), use_corr=True)
        if write:
            map2nifti(thisDS, ccorr[0]) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr +
                                      '_P2L_ridge_alpha_' + str(alphas[2]) + '_test_corr.nii.gz'))
            map2nifti(thisDS, ccorr[1]) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr +
                                      '_L2P_ridge_alpha_' + str(alphas[2]) + '_test_corr.nii.gz'))
    print 'cross: ' + str(np.mean(cres))
    if write:
        map2nifti(thisDS, cres[0]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                     '_P2L_ridge_alpha_' + str(alphas[2]) + '_corr.nii.gz'))
        map2nifti(thisDS, cres[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_L2P_ridge_alpha_' + str(alphas[2]) + '_corr.nii.gz'))

        map2nifti(thisDS, cwts[cwts.chunks==1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_P2L_ridge_alpha_' + str(alphas[2]) + '_wts.nii.gz'))
        map2nifti(thisDS, cwts[cwts.chunks==2]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_L2P_ridge_alpha' + str(alphas[2]) + '_wts.nii.gz'))

        map2nifti(thisDS, cceil[0]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_P2L_ridge_alpha_' + str(alphas[2]) + '_ceiling.nii.gz'))
        map2nifti(thisDS, cceil[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_L2P_ridge_alpha_' + str(alphas[2]) + '_ceiling.nii.gz'))
    del cres, cwts, cceil
# orig results
ds = h5load(_opj('results', 'sub%.3i_2.0mm_hrf_sl_orig.hdf5' % subj))
# load permutations and merge with orig results
data = np.vstack(
    [ds.samples[0]] +
    [np.load(fname)
     for fname in sorted(
         glob(_opj('results', 'sub%.3i_2.0mm_hrf_sl_perm*.npy' % subj)))])
#data = ds.samples

# write out as NIfTI
tdir = mkdtemp()
print tdir
orig_fname = _opj(tdir, 'data_in_orig.nii.gz')
group_fname = _opj(tdir, 'data_in_group.nii.gz')
nb.save(map2nifti(ds, data, imghdr=subjtmpl.get_header()), orig_fname)

# project into group space
ref_fname = "BASEDIR/templates/grpbold7Tp1/brain.nii.gz"
warp_fname = "BASEDIR/sub%.3i/templates/bold7Tp1/in_grpbold7Tp1/subj2tmpl_warp.nii.gz" % subj
subprocess.check_call(
    ["applywarp",
     "--in=%s" % orig_fname,
     "--out=%s" % group_fname,
     "--ref=%s" % ref_fname,
     "--warp=%s" % warp_fname])

# and back into a dataset
# group intersection brain mask
mask_fname = 'BASEDIR/templates/grpbold7Tp1/qa/subjbold7Tp1_to_grpbold7Tp1/brain_mask_intersection.nii.gz'
ds = fmri_dataset(group_fname, mask=mask_fname)
Esempio n. 33
0
def runsub(sub, thisContrast, thisContrastStr, testContrast,
           filterLen, filterOrd, write=False, debug=False,
           alphas=1, roi='grayMatter'):
    thisSub = {sub: subList[sub]}
    mc_params = lmvpa.loadmotionparams(paths, thisSub)
    beta_events = lmvpa.loadevents(paths, thisSub)
    dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
    thisDS = dsdict[sub]

    # savitsky golay filtering
    sg.sg_filter(thisDS, filterLen, filterOrd)
    # gallant group zscores before regression.

    # zscore w.r.t. rest trials
    # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
    # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
    zscore(thisDS, chunks_attr='chunks')

    # kay method: leave out a model run, use it to fit an HRF for each voxel
    # huth method: essentially use FIR
    # mumford method: deconvolution with canonical HRF

    # get timing data from timing files
    rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts)  # adding features

    # we can model out motion and just not use those betas.
    # Ridge
    if isinstance(thisContrast, basestring):
        thisContrast = [thisContrast]
    desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast,
                                     design_kwargs={'hrf_model': 'canonical', 'drift_model': 'blank'},
                                     regr_attrs=None)
    # 'add_regs': mc_params[sub]

    desX['motion'] = make_dmtx(rds.sa['time_coords'].value, paradigm=None, add_regs=mc_params[sub], drift_model='blank')

    des = lmvpa.make_parammat(desX, hrf='canonical', zscore=True)

    # split by language and pictures
    lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    ldes = cp.copy(des)
    pdes = cp.copy(des)

    ldes.matrix = ldes.matrix[lidx]
    pdes.matrix = pdes.matrix[pidx]

    covarmat = None
    mus = None
    lwts, _, lres, lceil = bsr.bootstrap_ridge(ds=rds[lidx], des=ldes, chunklen=1, nchunks=1,
                                               cov0=None, mu0=None, part_attr='chunks', mode='test',
                                               alphas=[alphas[0]], single_alpha=True, normalpha=False,
                                               nboots=1, corrmin=.2, singcutoff=1e-10, joined=None,
                                               use_corr=True)
    print 'language ' + str(np.mean(lres))

    pwts, _, pres, pceil = bsr.bootstrap_ridge(ds=rds[pidx], des=pdes, chunklen=1, nchunks=1,
                                               cov0=None, mu0=None, part_attr='chunks', mode='test',
                                               alphas=[alphas[1]], single_alpha=True, normalpha=False,
                                               nboots=1, corrmin=.2, singcutoff=1e-10, joined=None,
                                               use_corr=True)

    # pictures within
    print 'pictures: ' + str(np.mean(pres))
    if write:
        map2nifti(thisDS, dataset.vstack([lres, pres])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_corrs.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lwts, pwts])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_wts.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lceil, pceil])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_ceiling.nii.gz'))

    for t in testContrast:
        tstr = '+'.join(t)
        lcorr = lmvpa.testmodel(wts=lwts, des=ldes, ds=rds[lidx], tc=cp.copy(t), use_corr=True)
        pcorr = lmvpa.testmodel(wts=pwts, des=pdes, ds=rds[pidx], tc=cp.copy(t), use_corr=True)
        if write:
            map2nifti(thisDS, dataset.vstack([lcorr, pcorr])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_test_corrs.nii.gz'))

    del lres, pres, lwts, pwts, lceil, pceil
    crossSet = thisDS.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    # cwts, cres, cceil = bsr.ridge(rds[pidx], pdes, mu0=mus, cov0=covarmat,
    #                                             part_attr='chunks', mode='test', alphas=alphas[0], single_alpha=True,
    #                                             normalpha=False, corrmin=.2, singcutoff=1e-10, joined=None,
    #                                             use_corr=True)
    cwts, _, cres, cceil = bsr.bootstrap_ridge(ds=crossSet, des=des, chunklen=1, nchunks=1,
                                               cov0=None, mu0=None, part_attr='chunks', mode='test',
                                               alphas=[alphas[2]], single_alpha=True, normalpha=False,
                                               nboots=1, corrmin=.2, singcutoff=1e-10, joined=None,
                                               use_corr=True)
    for t in testContrast:
        tstr = '+'.join(t)
        ccorr = lmvpa.testmodel(wts=cwts, des=des, ds=crossSet, tc=cp.copy(t), use_corr=True)
        if write:
            map2nifti(thisDS, ccorr[0]) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr +
                                      '_P2L_ridge_alpha_' + str(alphas[2]) + '_test_corr.nii.gz'))
            map2nifti(thisDS, ccorr[1]) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr +
                                      '_L2P_ridge_alpha_' + str(alphas[2]) + '_test_corr.nii.gz'))
    print 'cross: ' + str(np.mean(cres))
    if write:
        map2nifti(thisDS, cres[0]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                     '_P2L_ridge_alpha_' + str(alphas[2]) + '_corr.nii.gz'))
        map2nifti(thisDS, cres[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_L2P_ridge_alpha_' + str(alphas[2]) + '_corr.nii.gz'))

        map2nifti(thisDS, cwts[cwts.chunks==1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_P2L_ridge_alpha_' + str(alphas[2]) + '_wts.nii.gz'))
        map2nifti(thisDS, cwts[cwts.chunks==2]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_L2P_ridge_alpha' + str(alphas[2]) + '_wts.nii.gz'))

        map2nifti(thisDS, cceil[0]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_P2L_ridge_alpha_' + str(alphas[2]) + '_ceiling.nii.gz'))
        map2nifti(thisDS, cceil[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_L2P_ridge_alpha_' + str(alphas[2]) + '_ceiling.nii.gz'))
    del cres, cwts, cceil
Esempio n. 34
0
group_all = group1 + group2
gp11 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
gp12 = [
    11, 12, 13, 14, 29, 30, 31, 32, 33, 34, 25, 26, 27, 28, 35, 36, 37, 38, 39,
    40
]
gp21 = [
    11, 12, 13, 14, 29, 30, 31, 32, 33, 34, 15, 16, 17, 18, 19, 20, 21, 22, 23,
    24
]
gp22 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 25, 26, 27, 28, 35, 36, 37, 38, 39, 40]

gptest = range(1, 2)
# modify this line to choose the data to output
output_gp_name = 'gptest'
output_gp = gptest

greeneye_movie_all = np.empty((len(output_gp), 1), dtype=object)
for i, subj in enumerate(output_gp):

    datapath = '/jukebox/ramadge/RAW_DATA/green_eye/niftis/'
    # getting first run data
    bold_fname = os.path.join(
        datapath, 'subj{}_trans_filtered_func_data.nii'.format(subj))
    data_tmp = fmri_dataset(bold_fname, mask=mask_fname)
    nifti = map2nifti(data_tmp)

    subj_data = data_tmp.samples.T
    subj_data = subj_data[:, start[subj - 1]:end[subj - 1]]
    greeneye_movie_all[i, 0] = subj_data
Esempio n. 35
0
def main():
    '''
    Spectral clustering...
    '''
    st = time.time()
    tmpset = Dataset([])
    # hfilename = "/nfs/j3/userhome/dangxiaobin/workingdir/cutROI/%s/fdt_matrix2_targets_sc.T.hdf5"%(id)
    hfilename = 'fdt_matrix2.T.hdf5'
    print hfilename
    #load connectivity profile of seed mask voxels
    conn = open_conn_mat(hfilename)
    tmpset.a = conn.a
    print conn.shape, conn.a
    #remove some features
    mask = create_mask(conn.samples, 0.5, 1)
    # print mask,mask.shape
    conn_m = mask_feature(conn.samples, mask)
    # print  conn_m
    map = conn_m.T
    print "map:"
    print map.shape, map.max(), map.min()

    voxel = np.array(conn.fa.values())
    print voxel[0]
    v = voxel[0]
    spacedist = ds.cdist(v, v, 'euclidean')
    print spacedist
    """
    similar_mat = create_similarity_mat(map,conn.fa,0.1,2)
    X = np.array(similar_mat)
    print "similarity matrix: shape:",X.shape
    print X
    """

    corr = np.corrcoef(map)
    corr = np.abs(corr)
    corr = 0.1 * corr + 0.9 / (spacedist + 1)

    print "Elaspsed time: ", time.time() - st
    print corr.shape, corr
    plt.imshow(corr, interpolation='nearest', cmap=cm.jet)
    cb = plt.colorbar()
    pl.xticks(())
    pl.yticks(())
    pl.show()

    cnum = 3
    near = 100
    sc = SpectralClustering(cnum, 'arpack', None, 100, 1, 'precomputed', near,
                            None, True)
    #sc.fit(map)
    sc.fit_predict(corr)
    '''
    cnum = 3
    near = 100
    sc = SpectralClustering(cnum,'arpack',None,100,1,'nearest_neighbors',near,None,True)
    sc.fit(map)
   # sc.fit_predict(X)
   # param = sc.get_params(deep=True)
    '''
    tmpset.samples = sc.labels_ + 1
    # print sc.affinity_matrix_
    #print list(sc.labels_)
    print "Elaspsed time: ", time.time() - st
    print "Number of voxels: ", sc.labels_.size
    print "Number  of clusters: ", np.unique(sc.labels_).size

    result = map2nifti(tmpset)
    result.to_filename("fg_parcel_S0006.nii.gz")
    print ".....The end........"
Esempio n. 36
0
group1 = range(1,15)+range(29,35)
group2 = range(15,29)+range(35,41)
group_all = group1+group2
gp11 = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10,15,16,17,18,19,20,21,22,23,24]
gp12 = [11,12,13,14,29,30,31,32,33,34,25,26,27,28,35,36,37,38,39,40]
gp21 = [11,12,13,14,29,30,31,32,33,34,15,16,17,18,19,20,21,22,23,24]
gp22 = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10,25,26,27,28,35,36,37,38,39,40]


gptest = range(1,2)
# modify this line to choose the data to output
output_gp_name = 'gptest'
output_gp = gptest


greeneye_movie_all = np.empty((len(output_gp),1), dtype=object)
for i,subj in enumerate(output_gp):

    datapath = '/jukebox/ramadge/RAW_DATA/green_eye/niftis/'
    # getting first run data
    bold_fname = os.path.join(datapath, 'subj{}_trans_filtered_func_data.nii'.format(subj))
    data_tmp = fmri_dataset(bold_fname,mask = mask_fname)
    nifti = map2nifti(data_tmp)    

    subj_data = data_tmp.samples.T
    subj_data = subj_data[:,start[subj-1]:end[subj-1]]
    greeneye_movie_all[i,0] = subj_data


Esempio n. 37
0
def test_er_nifti_dataset():
    # setup data sources
    tssrc = os.path.join(pymvpa_dataroot, u'bold.nii.gz')
    evsrc = os.path.join(pymvpa_dataroot, 'fslev3.txt')
    masrc = os.path.join(pymvpa_dataroot, 'mask.nii.gz')
    evs = FslEV3(evsrc).to_events()
    # load timeseries
    ds_orig = fmri_dataset(tssrc)
    # segment into events
    ds = eventrelated_dataset(ds_orig, evs, time_attr='time_coords')

    # we ask for boxcars of 9s length, and the tr in the file header says 2.5s
    # hence we should get round(9.0/2.4) * np.prod((1,20,40) == 3200 features
    assert_equal(ds.nfeatures, 3200)
    assert_equal(len(ds), len(evs))
    # the voxel indices are reflattened after boxcaring , but still 3D
    assert_equal(ds.fa.voxel_indices.shape, (ds.nfeatures, 3))
    # and they have been broadcasted through all boxcars
    assert_array_equal(ds.fa.voxel_indices[:800],
                       ds.fa.voxel_indices[800:1600])
    # each feature got an event offset value
    assert_array_equal(ds.fa.event_offsetidx, np.repeat([0, 1, 2, 3], 800))
    # check for all event attributes
    assert_true('onset' in ds.sa)
    assert_true('duration' in ds.sa)
    assert_true('features' in ds.sa)
    # check samples
    origsamples = _load_anyimg(tssrc)[0]
    for i, onset in \
        enumerate([value2idx(e['onset'], ds_orig.sa.time_coords, 'floor')
                        for e in evs]):
        assert_array_equal(ds.samples[i], origsamples[onset:onset + 4].ravel())
        assert_array_equal(ds.sa.time_indices[i], np.arange(onset, onset + 4))
        assert_array_equal(ds.sa.time_coords[i],
                           np.arange(onset, onset + 4) * 2.5)
        for evattr in [
                a for a in ds.sa
                if a.count("event_attrs") and not a.count('event_attrs_event')
        ]:
            assert_array_equal(evs[i]['_'.join(evattr.split('_')[2:])],
                               ds.sa[evattr].value[i])
    # check offset: only the last one exactly matches the tr
    assert_array_equal(ds.sa.orig_offset, [1, 1, 0])

    # map back into voxel space, should ignore addtional features
    nim = map2nifti(ds)
    # origsamples has t,x,y,z
    if externals.versions['nibabel'] >= '1.2':
        vol_shape = nim.shape
    else:
        vol_shape = nim.get_shape()
    assert_equal(vol_shape, origsamples.shape[1:] + (len(ds) * 4, ))
    # check shape of a single sample
    nim = map2nifti(ds, ds.samples[0])
    if externals.versions['nibabel'] >= '1.2':
        vol_shape = nim.shape
    else:
        vol_shape = nim.get_shape()
    # pynifti image has [t,]z,y,x
    assert_equal(vol_shape, (40, 20, 1, 4))

    # and now with masking
    ds = fmri_dataset(tssrc, mask=masrc)
    ds = eventrelated_dataset(ds, evs, time_attr='time_coords')
    nnonzero = len(_load_anyimg(masrc)[0].nonzero()[0])
    assert_equal(nnonzero, 530)
    # we ask for boxcars of 9s length, and the tr in the file header says 2.5s
    # hence we should get round(9.0/2.4) * np.prod((1,20,40) == 3200 features
    assert_equal(ds.nfeatures, 4 * 530)
    assert_equal(len(ds), len(evs))
    # and they have been broadcasted through all boxcars
    assert_array_equal(ds.fa.voxel_indices[:nnonzero],
                       ds.fa.voxel_indices[nnonzero:2 * nnonzero])
Esempio n. 38
0
def runsub(sub,
           thisContrast,
           thisContrastStr,
           filterLen,
           filterOrd,
           paramEst,
           chunklen,
           alphas=np.logspace(0, 3, 20),
           debug=False,
           write=False,
           roi='grayMatter'):
    thisSub = {sub: subList[sub]}
    mc_params = lmvpa.loadmotionparams(paths, thisSub)
    beta_events = lmvpa.loadevents(paths, thisSub)
    dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
    thisDS = dsdict[sub]

    # savitsky golay filtering
    sg.sg_filter(thisDS, filterLen, filterOrd)
    # gallant group zscores before regression.

    # zscore w.r.t. rest trials
    # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
    # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
    zscore(thisDS, chunks_attr='chunks')

    # kay method: leave out a model run, use it to fit an HRF for each voxel
    # huth method: essentially use FIR
    # mumford method: deconvolution with canonical HRF

    # refit events and regress...
    # get timing data from timing files
    # rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub])
    rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub],
                                     contrasts)  # adding features

    # we can model out motion and just not use those betas.
    # Ridge
    if isinstance(thisContrast, basestring):
        thisContrast = [thisContrast]
    # instead of binarizing each one, make them parametric
    desX, rds = lmvpa.make_designmat(rds,
                                     events,
                                     time_attr='time_coords',
                                     condition_attr=thisContrast,
                                     design_kwargs={
                                         'hrf_model': 'canonical',
                                         'drift_model': 'blank'
                                     },
                                     regr_attrs=None)
    # want to collapse ap and cr, but have anim separate
    desX['motion'] = make_dmtx(rds.sa['time_coords'].value,
                               paradigm=None,
                               add_regs=mc_params[sub],
                               drift_model='blank')

    des = lmvpa.make_parammat(desX, hrf='canonical', zscore=True)

    # set chunklen and nchunks
    # split by language and pictures
    lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(
        thisDS.sa['chunks'].unique) / 2]
    pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(
        thisDS.sa['chunks'].unique) / 2]
    ldes = cp.copy(des)
    pdes = cp.copy(des)

    ldes.matrix = ldes.matrix[lidx]
    pdes.matrix = pdes.matrix[pidx]
    nchunks = int(len(thisDS) * paramEst / chunklen)
    nboots = 50
    covarmat = None
    mus = None
    lwts, lalphas, lres, lceil = bsr.bootstrap_ridge(rds[lidx],
                                                     ldes,
                                                     chunklen=chunklen,
                                                     nchunks=nchunks,
                                                     cov0=covarmat,
                                                     mu0=mus,
                                                     part_attr='chunks',
                                                     mode='test',
                                                     alphas=alphas,
                                                     single_alpha=True,
                                                     normalpha=False,
                                                     nboots=nboots,
                                                     corrmin=.2,
                                                     singcutoff=1e-10,
                                                     joined=None,
                                                     plot=debug,
                                                     use_corr=True)

    pwts, palphas, pres, pceil = bsr.bootstrap_ridge(rds[pidx],
                                                     pdes,
                                                     chunklen=chunklen,
                                                     nchunks=nchunks,
                                                     part_attr='chunks',
                                                     mode='test',
                                                     alphas=alphas,
                                                     single_alpha=True,
                                                     normalpha=False,
                                                     nboots=nboots,
                                                     corrmin=.2,
                                                     singcutoff=1e-10,
                                                     joined=None,
                                                     plot=debug,
                                                     use_corr=True)
    print 'language ' + str(np.mean(lres))

    # pictures within
    print 'pictures: ' + str(np.mean(pres))

    # need to change outstring
    if write:
        from mvpa2.base import dataset
        map2nifti(thisDS, dataset.vstack([lres, pres])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_corrs.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lwts, pwts])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_weights.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lalphas, palphas])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_alphas.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lceil, pceil])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_ceiling.nii.gz'))

    del lres, pres, lwts, pwts, lalphas, palphas, lceil, pceil
    crossSet = thisDS.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cwts, calphas, cres, cceil = bsr.bootstrap_ridge(crossSet,
                                                     des,
                                                     chunklen=chunklen,
                                                     nchunks=nchunks,
                                                     part_attr='chunks',
                                                     mode='test',
                                                     alphas=alphas,
                                                     single_alpha=True,
                                                     normalpha=False,
                                                     nboots=nboots,
                                                     corrmin=.2,
                                                     singcutoff=1e-10,
                                                     joined=None,
                                                     use_corr=True)
    print 'cross: ' + str(np.mean(cres))
    if write:
        map2nifti(thisDS, cres[0]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_P2L_ridge_corr.nii.gz'))
        map2nifti(thisDS, cres[1]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_L2P_ridge_corr.nii.gz'))

        map2nifti(thisDS, cwts[0]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_P2L_ridge_weights.nii.gz'))
        map2nifti(thisDS, cwts[1]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_L2P_ridge_weights.nii.gz'))

        map2nifti(thisDS, calphas[calphas.chunks == 1]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_P2L_ridge_alphas.nii.gz'))
        map2nifti(thisDS, calphas[calphas.chunks == 2]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_L2P_ridge_alphas.nii.gz'))

        map2nifti(thisDS, cceil[0]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_P2L_ridge_ceiling.nii.gz'))
        map2nifti(thisDS, cceil[1]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_L2P_ridge_ceiling.nii.gz'))
    del cres, cwts, calphas, cceil
Esempio n. 39
0
def to_nifti(dumpy, ds, args):
    from mvpa2.datasets.mri import map2nifti
    # TODO allow overriding the nifti header
    nimg = map2nifti(ds, dumpy)
    nimg.to_filename(args.output)
Esempio n. 40
0
    'interactive':
    cfg.getboolean('examples', 'interactive', True),
}

for radius in [3]:
    # tell which one we are doing
    print 'Running searchlight with radius: %i ...' % (radius)

    sl = sphere_searchlight(foldwiseCvedAnovaSelectedSMLR,
                            radius=radius,
                            space='voxel_indices',
                            center_ids=center_ids,
                            postproc=mean_sample())

    ds = dataset.copy(deep=False,
                      sa=['targets', 'chunks'],
                      fa=['voxel_indices'],
                      a=['mapper'])

    sl_map = sl(ds)
    sl_map.samples *= -1
    sl_map.samples += 1

    niftiresults = map2nifti(sl_map, imghdr=dataset.a.imghdr)
    niftiresults.to_filename(
        os.path.join(
            sessionPath,
            'analyze/functional/searchlight/{0}-grey-searchlight{1}-{2}.nii'.
            format(classificationName, boldDelay, stimulusWidth)))
    print 'Best performing sphere error:', np.min(sl_map.samples)
Esempio n. 41
0
def pybetaseries(fsfdir,
                 methods=['lsall', 'lsone'],
                 time_res=0.1,
                 modeldir=None,
                 outdir=None,
                 designdir=None,
                 design_fsf_file='design.fsf',
                 design_mat_file='design.mat',
                 data_file=None,
                 mask_file=None,
                 extract_evs=None,
                 collapse_other_conditions=True):
    """Compute beta-series regression on a feat directory

    Required arguments:

    fsfdir: full path of a feat directory

    Optional arguments:

    method: list of methods to be used, can include:
    'lsone': single-trial iterative least squares estimation from Turner & Ashby
    'lsall': standard beta-series regression from Rissman et al.

    time_res: time resolution of the model used to generate the convolved design matrix

    outdir: where to store the results
    designdir: location of design_mat_file (e.g. design.mat). if None -- the same as fsfdir
    collapse_other_conditions: collapse all other conditions into a single regressor for
        the lsone model.  Jeanette's analyses suggest that it's better than leaving
        them separate.
    data_file: allows to override path of the 4D datafile instead of specified in design.fsf
        'feat_files(1)'
    """

    known_methods = ['lsall', 'lsone']
    assert set(methods).issubset(set(known_methods)), \
           "Unknown method(s): %s" % (set(methods).difference(set(known_methods)))

    if not os.path.exists(fsfdir):
        print 'ERROR: %s does not exist!' % fsfdir
        #return

    if not fsfdir.endswith('/'):
        fsfdir = ''.join([fsfdir, '/'])
    if modeldir is None:
        modeldir = fsfdir

    # load design using pymvpa tools

    fsffile = pjoin(fsfdir, design_fsf_file)
    desmatfile = pjoin(modeldir, design_mat_file)

    verbose(1, "Loading design")
    design = read_fsl_design(fsffile)

    desmat = FslGLMDesign(desmatfile)

    ntp, nevs = desmat.mat.shape

    TR = design['fmri(tr)']
    # yoh: theoretically it should be identical to the one read from
    # the nifti file, but in this sample data those manage to differ:
    # bold_mcf_brain.nii.gz        int16  [ 64,  64,  30, 182] 3.12x3.12x5.00x1.00   sform
    # filtered_func_data.nii.gz   float32 [ 64,  64,  30, 182] 3.12x3.12x5.00x2.00   sform
    #assert(abs(data.a.imghdr.get_zooms()[-1] - TR) < 0.001)
    # it is the filtered_func_data.nii.gz  which was used for analysis,
    # and it differs from bold_mcf_brain.nii.gz ... 

    # exclude events that occur within two TRs of the end of the run, due to the
    # inability to accurately estimate the response to them.

    max_evtime = TR*ntp - 2;
    # TODO: filter out here the trials jumping outside

    good_evs = []
    nuisance_evs = []
    # yoh: ev_td marks temporal derivatives (of good EVs or of nuisance -- all)
    #      replacing with deriv_evs for consistency
    withderiv_evs = []
    # ev_td = N.zeros(design['fmri(evs_real)'])

    good_ons = []

    if outdir is None:
        outdir = pjoin(fsfdir, 'betaseries')

    if not os.path.exists(outdir):
        os.mkdir(outdir)

    # create smoothing kernel for design
    cutoff = design['fmri(paradigm_hp)']/TR
    verbose(1, "Creating smoothing kernel based on the original analysis cutoff %.2f"
               % cutoff)
    # yoh: Verify that the kernel is correct since it looks
    # quite ...
    F = get_smoothing_kernel(cutoff, ntp)

    verbose(1, "Determining non-motion conditions")
    # loop through and find the good (non-motion) conditions
    # NB: this assumes that the name of the motion EV includes "motpar"
    # ala the openfmri convention.
    # TO DO:  add ability to manually specify motion regressors (currently assumes
    # that any EV that includes "motpar" in its name is a motion regressor)
    evctr = 0

    for ev in range(1, design['fmri(evs_orig)']+1):
        # filter out motion parameters
        evtitle = design['fmri(evtitle%d)' % ev]
        verbose(2, "Loading EV %s" % evtitle)
        if not evtitle.startswith('mot'):
            good_evs.append(evctr)
            evctr += 1
            if design['fmri(deriv_yn%d)' % ev] == 1:
                withderiv_evs.append(evctr-1)
                # skip temporal derivative
                evctr += 1
            ev_events = FslEV3(pjoin(fsfdir, design['fmri(custom%d)' % ev]))
            good_ons.append(ev_events)
        else:
            nuisance_evs.append(evctr)
            evctr += 1
            if design['fmri(deriv_yn%d)' % ev] == 1:
                # skip temporal derivative
                withderiv_evs.append(evctr)
                nuisance_evs.append(evctr)
                evctr += 1

    # load data
    verbose(1, "Loading data")

    maskimg = pjoin(fsfdir, mask_file or 'mask.nii.gz')
    # yoh: TODO design['feat_files'] is not the one "of interest" since it is
    # the input file, while we would like to operate on pre-processed version
    # which is usually stored as filtered_func_data.nii.gz
    data_file_fullname = complete_filename(
        pjoin(fsfdir, data_file or "filtered_func_data.nii.gz"))
    data = fmri_dataset(data_file_fullname, mask=maskimg)
    assert(len(data) == ntp)

    for method in methods:
        verbose(1, 'Estimating %(method)s model...' % locals())

        if method == 'lsone':
            all_conds, glm_res_full = extract_lsone(
                        data, TR, time_res,
                        spm_hrf, F,
                        good_ons,
                        good_evs, nuisance_evs, withderiv_evs,
                        desmat,
                        extract_evs=extract_evs,
                        collapse_other_conditions=collapse_other_conditions)
        elif method == 'lsall':
            all_conds, glm_res_full = extract_lsall(
                        data, TR, time_res,
                        spm_hrf, F,
                        good_ons,
                        good_evs,
                        desmat,
                        extract_evs=extract_evs,
                        )
        else:
            raise ValueError(method)

        all_conds = N.asanyarray(all_conds)   # assure array here
        # map the data into images and save to betaseries directory
        for e in range(1, len(good_evs)+1):
            ni = map2nifti(data, data=glm_res_full[N.where(all_conds==e)[0], :])
            ni.to_filename(pjoin(outdir, 'ev%d_%s.nii.gz' % (e, method)))
Esempio n. 42
0
def to_nifti(dumpy, ds, args):
    from mvpa2.datasets.mri import map2nifti

    # TODO allow overriding the nifti header
    nimg = map2nifti(ds, dumpy)
    nimg.to_filename(args.output)
Esempio n. 43
0
def runsub(sub, thisContrast, filterLen, filterOrd, thisContrastStr, roi='grayMatter'):
    thisSub = {sub: subList[sub]}
    mc_params = lmvpa.loadmotionparams(paths, thisSub)
    beta_events = lmvpa.loadevents(paths, thisSub)
    dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
    thisDS = dsdict[sub]

    # savitsky golay filtering
    sg.sg_filter(thisDS, filterLen, filterOrd)
    # gallant group zscores before regression.

    # zscore w.r.t. rest trials
    # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
    # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
    zscore(thisDS, chunks_attr='chunks')

    # kay method: leave out a model run, use it to fit an HRF for each voxel
    # huth method: essentially use FIR
    # mumford method: deconvolution with canonical HRF

    # refit events and regress...
    # get timing data from timing files
    rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts) # adding features

    # we can model out motion and just not use those betas.
    if isinstance(thisContrast, basestring):
        thisContrast = [thisContrast]
    # instead of binarizing each one, make them parametric
    desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast,
                                 design_kwargs={'hrf_model': 'canonical', 'drift_model': 'blank'},
                                 regr_attrs=None)
    # want to collapse ap and cr, but have anim separate
    des = lmvpa.make_parammat(desX)

    # set chunklen and nchunks
    # split by language and pictures
    lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    ldes = cp.copy(des)
    pdes = cp.copy(des)

    ldes.matrix = ldes.matrix[lidx]
    pdes.matrix = pdes.matrix[pidx]

    lwts, lres, lceil = bsr.bootstrap_linear(rds[lidx], ldes, part_attr='chunks', mode='test')
    pwts, pres, pceil = bsr.bootstrap_linear(rds[pidx], pdes, part_attr='chunks', mode='test')

    # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs
    print 'language ' + str(np.mean(lres))

    # pictures within
    print 'pictures: ' + str(np.mean(pres))
    from mvpa2.base import dataset
    map2nifti(thisDS, dataset.vstack([lres, pres])) \
        .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                  '_univar_corr.nii.gz'))
    map2nifti(thisDS, dataset.vstack([lwts, pwts])) \
        .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                  '_univar_betas.nii.gz'))
    map2nifti(thisDS, dataset.vstack([lceil, pceil])) \
        .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                  '_univar_ceiling.nii.gz'))
    del lres, pres, lwts, pwts, lceil, pceil

    crossSet = thisDS.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cwts, cres, cceil = bsr.bootstrap_linear(crossSet, des, part_attr='chunks', mode='test')
    print 'cross: ' + str(np.mean(cres))

    map2nifti(thisDS, cres[0]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_univar.nii.gz'))
    map2nifti(thisDS, cres[1]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_univar.nii.gz'))

    map2nifti(thisDS, cwts[0]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_betas.nii.gz'))
    map2nifti(thisDS, cwts[1]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_betas.nii.gz'))

    map2nifti(thisDS, cceil[0]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_betas.nii.gz'))
    map2nifti(thisDS, cceil[1]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_betas.nii.gz'))
Esempio n. 44
0
def main():
    '''
    Spectral clustering...
    '''
    st =  time.time()
    tmpset = Dataset([])
   # hfilename = "/nfs/j3/userhome/dangxiaobin/workingdir/cutROI/%s/fdt_matrix2_targets_sc.T.hdf5"%(id)
    hfilename = 'fdt_matrix2.T.hdf5'
    print hfilename
    #load connectivity profile of seed mask voxels  
    conn = open_conn_mat(hfilename) 
    tmpset.a = conn.a
    print conn.shape,conn.a
    #remove some features
    mask = create_mask(conn.samples,0.5,1)
   # print mask,mask.shape
    conn_m = mask_feature(conn.samples,mask)
   # print  conn_m
    map = conn_m.T
    print "map:"
    print map.shape,map.max(),map.min()
    
    voxel = np.array(conn.fa.values())
    print voxel[0]
    v = voxel[0]
    spacedist = ds.cdist(v,v,'euclidean') 
    print spacedist

    """
    similar_mat = create_similarity_mat(map,conn.fa,0.1,2)
    X = np.array(similar_mat)
    print "similarity matrix: shape:",X.shape
    print X
    """
    
    corr = np.corrcoef(map)
    corr = np.abs(corr)
    corr = 0.1*corr + 0.9/(spacedist+1)
    
    print "Elaspsed time: ", time.time() - st
    print corr.shape,corr
    plt.imshow(corr,interpolation='nearest',cmap=cm.jet)
    cb = plt.colorbar() 
    pl.xticks(())
    pl.yticks(())
    pl.show()
    
    cnum = 3
    near = 100
    sc = SpectralClustering(cnum,'arpack',None,100,1,'precomputed',near,None,True)
    #sc.fit(map)
    sc.fit_predict(corr)
    '''
    cnum = 3
    near = 100
    sc = SpectralClustering(cnum,'arpack',None,100,1,'nearest_neighbors',near,None,True)
    sc.fit(map)
   # sc.fit_predict(X)
   # param = sc.get_params(deep=True)
    '''
    tmpset.samples = sc.labels_+1
   # print sc.affinity_matrix_
    #print list(sc.labels_)
    print "Elaspsed time: ", time.time() - st
    print "Number of voxels: ", sc.labels_.size
    print "Number  of clusters: ", np.unique(sc.labels_).size

    result = map2nifti(tmpset)
    result.to_filename("fg_parcel_S0006.nii.gz")
    print ".....The end........"
Esempio n. 45
0
def runsub(sub, thisContrast, r, dstype='raw', roi='grayMatter', filterLen=49, filterOrd=3, write=False):

    if dstype == 'raw':
        outdir='PyMVPA'
        print "working with raw data"
        thisSub = {sub: subList[sub]}
        dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi)
        thisDS = dsdict[sub]
        mc_params = lmvpa.loadmotionparams(paths, thisSub)
        beta_events = lmvpa.loadevents(paths, thisSub)
        # savitsky golay filtering
        sg.sg_filter(thisDS, filterLen, filterOrd)
        # gallant group zscores before regression.

        # zscore w.r.t. rest trials
        # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
        # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
        zscore(thisDS, chunks_attr='chunks')
        print "beta extraction"
        ## BETA EXTRACTION ##
        rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub])
        evds = er.fit_event_hrf_model(rds, events, time_attr='time_coords',
                                      condition_attr=('trial_type', 'chunks'),
                                      design_kwargs={'add_regs': mc_params[sub], 'hrf_model': 'canonical'},
                                      return_model=True)

        fds = lmvpa.replacetargets(evds, contrasts, thisContrast)
        fds = fds[fds.targets != '0']
    else:
        outdir=os.path.join('LSS', dstype)
        print "loading betas"
        fds = lmvpa.loadsubbetas(paths, sub, btype=dstype, m=roi)
        fds.sa['targets'] = fds.sa[thisContrast]
        zscore(fds, chunks_attr='chunks')

    fds = lmvpa.sortds(fds)
    print "searchlights"
    ## initialize classifier
    clf = svm.LinearNuSVMC()
    cv = CrossValidation(clf, NFoldPartitioner())
    from mvpa2.measures.searchlight import sphere_searchlight
    cvSL = sphere_searchlight(cv, radius=r)


    # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs
    lidx = fds.chunks < fds.sa['chunks'].unique[len(fds.sa['chunks'].unique)/2]
    pidx = fds.chunks >= fds.sa['chunks'].unique[len(fds.sa['chunks'].unique) / 2]

    lres = sl.run_cv_sl(cvSL, fds[lidx].copy(deep=False))
    pres = sl.run_cv_sl(cvSL, fds[pidx].copy(deep=False))

    if write:
        from mvpa2.base import dataset
        map2nifti(fds, dataset.vstack([lres, pres])).\
            to_filename(os.path.join(
                        paths[0], 'Maps', outdir,
                        sub + '_' + roi + '_' + thisContrast + '_cvsl.nii.gz'))

    del lres, pres, cvSL

    cvSL = sphere_searchlight(cv, radius=r)
    crossSet = fds.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cres = sl.run_cv_sl(cvSL, crossSet.copy(deep=False))
    if write:
        map2nifti(fds, cres[0]).to_filename(
            os.path.join(paths[0], 'Maps', outdir,
                         sub + '_' + roi + '_' + (thisContrast) + '_P2L.nii.gz'))
        map2nifti(fds, cres[1]).to_filename(
            os.path.join(paths[0], 'Maps', outdir,
                         sub + '_' + roi + '_' + (thisContrast) + '_L2P.nii.gz'))
Esempio n. 46
0
def runsub(sub, thisContrast, thisContrastStr,
           filterLen, filterOrd,
           paramEst, chunklen, alphas=np.logspace(0, 3, 20), debug=False, write=False, roi='grayMatter'):
    thisSub = {sub: subList[sub]}
    mc_params = lmvpa.loadmotionparams(paths, thisSub)
    beta_events = lmvpa.loadevents(paths, thisSub)
    dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
    thisDS = dsdict[sub]

    # savitsky golay filtering
    sg.sg_filter(thisDS, filterLen, filterOrd)
    # gallant group zscores before regression.

    # zscore w.r.t. rest trials
    # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
    # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
    zscore(thisDS, chunks_attr='chunks')

    # kay method: leave out a model run, use it to fit an HRF for each voxel
    # huth method: essentially use FIR
    # mumford method: deconvolution with canonical HRF

    # refit events and regress...
    # get timing data from timing files
    # rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub])
    rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts) # adding features

    # we can model out motion and just not use those betas.
    # Ridge
    if isinstance(thisContrast, basestring):
        thisContrast = [thisContrast]
    # instead of binarizing each one, make them parametric
    desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast,
                                     design_kwargs={'hrf_model': 'canonical', 'drift_model': 'blank'},
                                     regr_attrs=None)
    # want to collapse ap and cr, but have anim separate
    desX['motion'] = make_dmtx(rds.sa['time_coords'].value, paradigm=None, add_regs=mc_params[sub], drift_model='blank')

    des = lmvpa.make_parammat(desX, hrf='canonical', zscore=True)

    # set chunklen and nchunks
    # split by language and pictures
    lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    ldes = cp.copy(des)
    pdes = cp.copy(des)

    ldes.matrix = ldes.matrix[lidx]
    pdes.matrix = pdes.matrix[pidx]
    nchunks = int(len(thisDS)*paramEst / chunklen)
    nboots=50
    covarmat = None
    mus = None
    lwts, lalphas, lres, lceil = bsr.bootstrap_ridge(rds[lidx], ldes, chunklen=chunklen, nchunks=nchunks,
                                              cov0=covarmat, mu0=mus, part_attr='chunks', mode='test',
                                              alphas=alphas, single_alpha=True, normalpha=False,
                                              nboots=nboots, corrmin=.2, singcutoff=1e-10, joined=None,
                                              plot=debug, use_corr=True)

    pwts, palphas, pres, pceil = bsr.bootstrap_ridge(rds[pidx], pdes, chunklen=chunklen, nchunks=nchunks,
                                              part_attr='chunks', mode='test',
                                              alphas=alphas, single_alpha=True, normalpha=False,
                                              nboots=nboots, corrmin=.2, singcutoff=1e-10, joined=None,
                                              plot=debug, use_corr=True)
    print 'language ' + str(np.mean(lres))

    # pictures within
    print 'pictures: ' + str(np.mean(pres))

# need to change outstring
    if write:
        from mvpa2.base import dataset
        map2nifti(thisDS, dataset.vstack([lres, pres])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_corrs.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lwts, pwts])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_weights.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lalphas, palphas])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_alphas.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lceil, pceil])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_ceiling.nii.gz'))

    del lres, pres, lwts, pwts, lalphas, palphas, lceil, pceil
    crossSet = thisDS.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cwts, calphas, cres, cceil = bsr.bootstrap_ridge(crossSet, des, chunklen=chunklen, nchunks=nchunks,
                                              part_attr='chunks', mode='test',
                                              alphas=alphas, single_alpha=True, normalpha=False,
                                              nboots=nboots, corrmin=.2, singcutoff=1e-10, joined=None,
                                              use_corr=True)
    print 'cross: ' + str(np.mean(cres))
    if write:
        map2nifti(thisDS, cres[0]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_corr.nii.gz'))
        map2nifti(thisDS, cres[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_corr.nii.gz'))

        map2nifti(thisDS, cwts[0]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_weights.nii.gz'))
        map2nifti(thisDS, cwts[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_weights.nii.gz'))

        map2nifti(thisDS, calphas[calphas.chunks==1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_alphas.nii.gz'))
        map2nifti(thisDS, calphas[calphas.chunks==2]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_alphas.nii.gz'))

        map2nifti(thisDS, cceil[0]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_ceiling.nii.gz'))
        map2nifti(thisDS, cceil[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_ceiling.nii.gz'))
    del cres, cwts, calphas, cceil