예제 #1
0
def loadrundata(p, s, r, m=None, c=None):
    # inputs:
    # p: paths list
    # s: string representing subject ('LMVPA001')
    # r: run ID ('Run1')
    from os.path import join as pjoin
    from mvpa2.datasets import eventrelated as er
    from mvpa2.datasets.mri import fmri_dataset
    from mvpa2.datasets.sources import bids as bids


    # bfn = pjoin(p[0], 'data', s, 'func', 'extra', s+'_'+r+'_mc.nii.gz')
    # motion corrected and coregistered
    bfn = pjoin(p[0], 'data', s, 'func', s + '_' + r + '.nii.gz')
    if m is not None:
        m = pjoin(p[0], 'data', s, 'masks', s+'_'+m+'.nii.gz')
        d = fmri_dataset(bfn, chunks=int(r.split('n')[1]), mask=m)
    else:
        d = fmri_dataset(bfn, chunks=int(r.split('n')[1]))
    # This line-- should be different if we're doing GLM, etc.
    efn = pjoin(p[0], 'data', s, 'func', s + '_' + r + '.tsv')
    fe = bids.load_events(efn)
    if c is None:
        tmpe = events2dict(fe)
        c = tmpe.keys()
    if isinstance(c, basestring):
        # must be a list/tuple/array for the logic below
        c = [c]
    for ci in c:
        e = adjustevents(fe, ci)
        d = er.assign_conditionlabels(d, e, noinfolabel='rest', label_attr=ci)
    return d
예제 #2
0
def test_nifti_dataset_from3_d():
    """Test NiftiDataset based on 3D volume(s)
    """
    tssrc = os.path.join(pymvpa_dataroot, "bold.nii.gz")
    masrc = os.path.join(pymvpa_dataroot, "mask.nii.gz")

    # Test loading of 3D volumes
    # by default we are enforcing 4D, testing here with the demo 3d mask
    ds = fmri_dataset(masrc, mask=masrc, targets=1)
    assert_equal(len(ds), 1)

    import nibabel

    plain_data = nibabel.load(masrc).get_data()
    # Lets check if mapping back works as well
    assert_array_equal(plain_data, map2nifti(ds).get_data().reshape(plain_data.shape))

    # test loading from a list of filenames

    # for now we should fail if trying to load a mix of 4D and 3D volumes
    assert_raises(ValueError, fmri_dataset, (masrc, tssrc), mask=masrc, targets=1)

    # Lets prepare some custom NiftiImage
    dsfull = fmri_dataset(tssrc, mask=masrc, targets=1)
    ds_selected = dsfull[3]
    nifti_selected = map2nifti(ds_selected)

    # Load dataset from a mix of 3D volumes
    # (given by filenames and NiftiImages)
    labels = [123, 2, 123]
    ds2 = fmri_dataset((masrc, masrc, nifti_selected), mask=masrc, targets=labels)
    assert_equal(ds2.nsamples, 3)
    assert_array_equal(ds2.samples[0], ds2.samples[1])
    assert_array_equal(ds2.samples[2], dsfull.samples[3])
    assert_array_equal(ds2.targets, labels)
예제 #3
0
def loadrundata(p, s, r, m=None, c=None):
    # inputs:
    # p: paths list
    # s: string representing subject ('LMVPA001')
    # r: run ID ('Run1')
    from os.path import join as pjoin
    from mvpa2.datasets import eventrelated as er
    from mvpa2.datasets.mri import fmri_dataset
    from mvpa2.datasets.sources import bids as bids

    # bfn = pjoin(p[0], 'data', s, 'func', 'extra', s+'_'+r+'_mc.nii.gz')
    # motion corrected and coregistered
    bfn = pjoin(p[0], 'data', s, 'func', s + '_' + r + '.nii.gz')
    if m is not None:
        m = pjoin(p[0], 'data', s, 'masks', s + '_' + m + '.nii.gz')
        d = fmri_dataset(bfn, chunks=int(r.split('n')[1]), mask=m)
    else:
        d = fmri_dataset(bfn, chunks=int(r.split('n')[1]))
    # This line-- should be different if we're doing GLM, etc.
    efn = pjoin(p[0], 'data', s, 'func', s + '_' + r + '.tsv')
    fe = bids.load_events(efn)
    if c is None:
        tmpe = events2dict(fe)
        c = tmpe.keys()
    if isinstance(c, basestring):
        # must be a list/tuple/array for the logic below
        c = [c]
    for ci in c:
        e = adjustevents(fe, ci)
        d = er.assign_conditionlabels(d, e, noinfolabel='rest', label_attr=ci)
    return d
예제 #4
0
def test_multiple_calls():
    """Test if doing exactly the same operation twice yields the same result
    """
    data = fmri_dataset(samples=os.path.join(pymvpa_dataroot,'example4d.nii.gz'),
                        targets=1, sprefix='abc')
    data2 = fmri_dataset(samples=os.path.join(pymvpa_dataroot,'example4d.nii.gz'),
                         targets=1, sprefix='abc')
    assert_array_equal(data.a.abc_eldim, data2.a.abc_eldim)
예제 #5
0
def test_nifti_mapper(filename):
    """Basic testing of map2Nifti
    """
    skip_if_no_external('scipy')

    import nibabel
    data = fmri_dataset(samples=os.path.join(pymvpa_dataroot,
                                             'example4d.nii.gz'),
                        targets=[1, 2])

    # test mapping of ndarray
    vol = map2nifti(data, np.ones((294912, ), dtype='int16'))
    if externals.versions['nibabel'] >= '1.2':
        vol_shape = vol.shape
    else:
        vol_shape = vol.get_shape()
    assert_equal(vol_shape, (128, 96, 24))
    assert_true((vol.get_data() == 1).all())
    # test mapping of the dataset
    vol = map2nifti(data)
    if externals.versions['nibabel'] >= '1.2':
        vol_shape = vol.shape
    else:
        vol_shape = vol.get_shape()
    assert_equal(vol_shape, (128, 96, 24, 2))
    ok_(isinstance(vol, data.a.imgtype))

    # test providing custom imgtypes
    vol = map2nifti(data, imgtype=nibabel.Nifti1Pair)
    if externals.versions['nibabel'] >= '1.2':
        vol_shape = vol.shape
    else:
        vol_shape = vol.get_shape()
    ok_(isinstance(vol, nibabel.Nifti1Pair))

    # Lets generate a dataset using an alternative format (MINC)
    # and see if type persists
    volminc = nibabel.MincImage(vol.get_data(), vol.get_affine(),
                                vol.get_header())
    ok_(isinstance(volminc, nibabel.MincImage))
    dsminc = fmri_dataset(volminc, targets=1)
    ok_(dsminc.a.imgtype is nibabel.MincImage)
    ok_(isinstance(dsminc.a.imghdr, nibabel.minc.MincImage.header_class))

    # Lets test if we could save/load now into Analyze volume/dataset
    if externals.versions['nibabel'] < '1.1.0':
        raise SkipTest(
            'nibabel prior 1.1.0 had an issue with types comprehension')
    volanal = map2nifti(
        dsminc, imgtype=nibabel.AnalyzeImage)  # MINC has no 'save' capability
    ok_(isinstance(volanal, nibabel.AnalyzeImage))
    volanal.to_filename(filename)
    dsanal = fmri_dataset(filename, targets=1)
    # this one is tricky since it might become Spm2AnalyzeImage
    ok_('AnalyzeImage' in str(dsanal.a.imgtype))
    ok_('AnalyzeHeader' in str(dsanal.a.imghdr.__class__))
    volanal_ = map2nifti(dsanal)
    ok_(isinstance(volanal_, dsanal.a.imgtype))  # type got preserved
예제 #6
0
def extract_runs_nuisancedata(
        base_dir,
        out_dir,
        mnimask,
        sub_id,
        with_contrast=False,
        labelcsv='/data/famface/openfmri/github/notebooks/roi_coord.csv'):
    """
    extract time series from the residuals of our nuisance model.
    This does essentially the same as extract_runs_famface_mnimask, only
    for different input path structure.
    """
    """
    Given our famface data, extract time series for ALL runs of ONE subject.
    For use in TETRAD. base_dir contains pre-processed BOLD images in mni space.
    """

    # enumerated label names from csv file
    labels = getlabels(labelcsv)

    # strip whitespaces for header
    # (because tetrad doesn't allow whitespaces)
    header = [pair[1].replace(' ', '') for pair in labels]
    if with_contrast:
        header.append('FAM-UNFAM')

    ms = fmri_dataset(mnimask)

    runs = ['run%02d' % i for i in xrange(1, 12)]
    for run in runs:
        # create output dir
        if not os.path.exists(join(out_dir, 'csv', run)):
            os.makedirs(join(out_dir, 'csv', run))

        infile = join(base_dir, sub_id, 'residual4d', 'mni',
                      'res4d_%s.nii.gz' % run)
        bold = fmri_dataset(infile)
        timeseries = extract_mean_timeseries(bold, ms)

        if with_contrast:
            # TODO: don't hardcode data directory
            data_dir = '/data/famface/openfmri/oli/simulation/data_oli'
            runstring = run[:3] + '0' + run[3:]
            onsetpath = join(data_dir, sub_id, 'model/model001/onsets',
                             'task001_%s' % runstring)

            ts_with_contrast = add_contrast(timeseries, onsetpath)

            # write to csv
            outfile = join(out_dir, 'csv', run,
                           '{}_{}.csv'.format(sub_id, run))
            transpose_and_write(ts_with_contrast, outfile, header)

        else:
            # write to csv
            outfile = join(out_dir, 'csv', run,
                           '{}_{}.csv'.format(sub_id, run))
            transpose_and_write(timeseries, outfile, header)
예제 #7
0
def test_er_nifti_dataset():
    # setup data sources
    tssrc = os.path.join(pymvpa_dataroot, u"bold.nii.gz")
    evsrc = os.path.join(pymvpa_dataroot, "fslev3.txt")
    masrc = os.path.join(pymvpa_dataroot, "mask.nii.gz")
    evs = FslEV3(evsrc).to_events()
    # load timeseries
    ds_orig = fmri_dataset(tssrc)
    # segment into events
    ds = eventrelated_dataset(ds_orig, evs, time_attr="time_coords")

    # we ask for boxcars of 9s length, and the tr in the file header says 2.5s
    # hence we should get round(9.0/2.4) * np.prod((1,20,40) == 3200 features
    assert_equal(ds.nfeatures, 3200)
    assert_equal(len(ds), len(evs))
    # the voxel indices are reflattened after boxcaring , but still 3D
    assert_equal(ds.fa.voxel_indices.shape, (ds.nfeatures, 3))
    # and they have been broadcasted through all boxcars
    assert_array_equal(ds.fa.voxel_indices[:800], ds.fa.voxel_indices[800:1600])
    # each feature got an event offset value
    assert_array_equal(ds.fa.event_offsetidx, np.repeat([0, 1, 2, 3], 800))
    # check for all event attributes
    assert_true("onset" in ds.sa)
    assert_true("duration" in ds.sa)
    assert_true("features" in ds.sa)
    # check samples
    origsamples = _load_anyimg(tssrc)[0]
    for i, onset in enumerate([value2idx(e["onset"], ds_orig.sa.time_coords, "floor") for e in evs]):
        assert_array_equal(ds.samples[i], origsamples[onset : onset + 4].ravel())
        assert_array_equal(ds.sa.time_indices[i], np.arange(onset, onset + 4))
        assert_array_equal(ds.sa.time_coords[i], np.arange(onset, onset + 4) * 2.5)
        for evattr in [a for a in ds.sa if a.count("event_attrs") and not a.count("event_attrs_event")]:
            assert_array_equal(evs[i]["_".join(evattr.split("_")[2:])], ds.sa[evattr].value[i])
    # check offset: only the last one exactly matches the tr
    assert_array_equal(ds.sa.orig_offset, [1, 1, 0])

    # map back into voxel space, should ignore addtional features
    nim = map2nifti(ds)
    # origsamples has t,x,y,z
    assert_equal(nim.get_shape(), origsamples.shape[1:] + (len(ds) * 4,))
    # check shape of a single sample
    nim = map2nifti(ds, ds.samples[0])
    # pynifti image has [t,]z,y,x
    assert_equal(nim.get_shape(), (40, 20, 1, 4))

    # and now with masking
    ds = fmri_dataset(tssrc, mask=masrc)
    ds = eventrelated_dataset(ds, evs, time_attr="time_coords")
    nnonzero = len(_load_anyimg(masrc)[0].nonzero()[0])
    assert_equal(nnonzero, 530)
    # we ask for boxcars of 9s length, and the tr in the file header says 2.5s
    # hence we should get round(9.0/2.4) * np.prod((1,20,40) == 3200 features
    assert_equal(ds.nfeatures, 4 * 530)
    assert_equal(len(ds), len(evs))
    # and they have been broadcasted through all boxcars
    assert_array_equal(ds.fa.voxel_indices[:nnonzero], ds.fa.voxel_indices[nnonzero : 2 * nnonzero])
예제 #8
0
def test_nifti_mapper(filename):
    """Basic testing of map2Nifti
    """
    skip_if_no_external('scipy')

    import nibabel
    data = fmri_dataset(samples=os.path.join(pymvpa_dataroot,'example4d.nii.gz'),
                        targets=[1,2])

    # test mapping of ndarray
    vol = map2nifti(data, np.ones((294912,), dtype='int16'))
    if externals.versions['nibabel'] >= '1.2': 
        vol_shape = vol.shape
    else:
        vol_shape = vol.get_shape()
    assert_equal(vol_shape, (128, 96, 24))
    assert_true((vol.get_data() == 1).all())
    # test mapping of the dataset
    vol = map2nifti(data)
    if externals.versions['nibabel'] >= '1.2':
        vol_shape = vol.shape
    else:
        vol_shape = vol.get_shape()
    assert_equal(vol_shape, (128, 96, 24, 2))
    ok_(isinstance(vol, data.a.imgtype))

    # test providing custom imgtypes
    vol = map2nifti(data, imgtype=nibabel.Nifti1Pair)
    if externals.versions['nibabel'] >= '1.2':
        vol_shape = vol.shape
    else:
        vol_shape = vol.get_shape()
    ok_(isinstance(vol, nibabel.Nifti1Pair))

    # Lets generate a dataset using an alternative format (MINC)
    # and see if type persists
    volminc = nibabel.MincImage(vol.get_data(),
                                vol.get_affine(),
                                vol.get_header())
    ok_(isinstance(volminc, nibabel.MincImage))
    dsminc = fmri_dataset(volminc, targets=1)
    ok_(dsminc.a.imgtype is nibabel.MincImage)
    ok_(isinstance(dsminc.a.imghdr, nibabel.minc.MincImage.header_class))

    # Lets test if we could save/load now into Analyze volume/dataset
    if externals.versions['nibabel'] < '1.1.0':
        raise SkipTest('nibabel prior 1.1.0 had an issue with types comprehension')
    volanal = map2nifti(dsminc, imgtype=nibabel.AnalyzeImage) # MINC has no 'save' capability
    ok_(isinstance(volanal, nibabel.AnalyzeImage))
    volanal.to_filename(filename)
    dsanal = fmri_dataset(filename, targets=1)
    # this one is tricky since it might become Spm2AnalyzeImage
    ok_('AnalyzeImage' in str(dsanal.a.imgtype))
    ok_('AnalyzeHeader' in str(dsanal.a.imghdr.__class__))
    volanal_ = map2nifti(dsanal)
    ok_(isinstance(volanal_, dsanal.a.imgtype)) # type got preserved
예제 #9
0
def test_multiple_calls():
    """Test if doing exactly the same operation twice yields the same result
    """
    data = fmri_dataset(samples=pathjoin(pymvpa_dataroot, 'example4d.nii.gz'),
                        targets=1,
                        sprefix='abc')
    data2 = fmri_dataset(samples=pathjoin(pymvpa_dataroot, 'example4d.nii.gz'),
                         targets=1,
                         sprefix='abc')
    assert_array_equal(data.a.abc_eldim, data2.a.abc_eldim)
예제 #10
0
def extract_runs_famface_mnimask(
        base_dir,
        out_dir,
        mnimask,
        sub_id,
        with_contrast=False,
        labelcsv='/data/famface/openfmri/github/notebooks/roi_coord.csv'):
    """
    Given our famface data, extract time series for ALL runs of ONE subject.
    For use in TETRAD. base_dir contains pre-processed BOLD images in mni space.
    """

    runs = ['run%03d' % i for i in xrange(1, 12)]

    # enumerated label names from csv file
    labels = getlabels(labelcsv)

    # select label names and strip whitespaces for header
    # (because tetrad doesn't allow whitespaces)
    header = [pair[1].replace(' ', '') for pair in labels]
    if with_contrast:
        header.append('FAM-UNFAM')

    # load mask in pymvpa
    ms = fmri_dataset(mnimask)

    for run in runs:
        # create output dir
        if not os.path.exists(join(out_dir, 'csv', run)):
            os.makedirs(join(out_dir, 'csv', run))
        infile = join(base_dir, sub_id, 'bold', run, 'bold_mni.nii.gz')
        # load bold file in pymvpa
        bold = fmri_dataset(infile)
        # extract time series
        timeseries = extract_mean_timeseries(bold, ms)

        if with_contrast:
            # TODO: don't hardcode data directory
            data_dir = '/data/famface/openfmri/oli/simulation/data_oli'
            onsetpath = join(data_dir, sub_id, 'model', 'model001', 'onsets',
                             'task001_%s' % run)
            ts_with_contrast = add_contrast(timeseries, onsetpath)

            # write to csv
            outfile = join(out_dir, 'csv', run,
                           '{}_{}.csv'.format(sub_id, run))
            transpose_and_write(ts_with_contrast, outfile, header)

        else:
            # write to csv
            outfile = join(out_dir, 'csv', run,
                           '{}_{}.csv'.format(sub_id, run))
            transpose_and_write(timeseries, outfile, header)
def extract_runs_famface_betas(base_dir, out_dir, subdir_template, sub_id,
                               outfilename, beta_filename):
    """
    Project the mni mask into subject space. Extract the mean
    parameter estimate (zstat, pe, cope, varcope) for each roi and run.
    For all runs of one subject (submit multiple subjects in parallel via PBS/Condor).
    """

    # this list will later be written to a csv file.
    betas = []

    # list of directory names for each run
    runs = ['_modelestimate%d' % i for i in range(11)]

    # create output directory
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    """
    extract mean parameter estimate for each run
    """

    for run in runs:

        run_nr = (runs.index(run) + 1)
        """
        Get mask in subject space produced while simulating
        """
        mask_subspace_path = '/data/famface/openfmri/oli/simulation/mcfiles/' \
                             '%s/task001_run%03d/%s/task001_run%03d/%s_task001_run%03d_roimask.nii.gz' \
                             % (sub_id, run_nr, sub_id, run_nr, sub_id, run_nr)
        ms = fmri_dataset(mask_subspace_path)

        # get paths for beta files (or any statistical map really)
        pe_file = join(base_dir, 'modelestimate', subdir_template,
                       'modelestimate', 'mapflow', run, 'results',
                       beta_filename)

        # load stats map in pymvpa
        stats_map = fmri_dataset(pe_file)

        # extract mean parameter estimates
        run_betas = extract_mean_3d(stats_map, ms)
        betas.append(run_betas)

    # write to a csv file
    outfile_fullpath = join(out_dir, outfilename)
    with open(outfile_fullpath, 'wb') as f:
        writer = csv.writer(f)
        for b in betas:
            writer.writerow(b)
예제 #12
0
    def setUp(self):
        self.tmpdir = mkdtemp()

        data_ = fmri_dataset(datafn)
        datafn_hdf5 = pjoin(self.tmpdir, 'datain.hdf5')
        h5save(datafn_hdf5, data_)

        mask_ = fmri_dataset(maskfn)
        maskfn_hdf5 = pjoin(self.tmpdir, 'maskfn.hdf5')
        h5save(maskfn_hdf5, mask_)

        self.datafn = [datafn, datafn_hdf5]
        self.outfn = [pjoin(self.tmpdir, 'output') + ext
                      for ext in ['.nii.gz', '.nii', '.hdf5', '.h5']]
        self.maskfn = ['', maskfn, maskfn_hdf5]
예제 #13
0
def prepare_subject_for_hyperalignment(subject_label, bold_fname, mask_fname, out_dir):
    print('Loading data %s with mask %s' % (bold_fname, mask_fname))
    ds = fmri_dataset(samples=bold_fname, mask=mask_fname)
    zscore(ds, chunks_attr=None)
    out_fname = os.path.join(out_dir, 'sub-%s_data.hdf5' % subject_label)
    print('Saving to %s' % out_fname)
    h5save(out_fname, ds)
예제 #14
0
def load_dot():
    """
    load dot file
    """
    filename = raw_input("dot>>>")
    maskfile = raw_input("mask>>>")
    
    print "load data:"    
    data = np.loadtxt(filename)
    print data
    
    print "load mask:"
    seed_set = fmri_dataset(samples=maskfile,mask=maskfile)
    seed = seed_set.copy(sa=[])

    print seed

    sparse_set = csc_matrix((data[:,2],(data[:,0]-1,data[:,1]-1)))
    seed.samples = sparse_set.T.todense()
    
    print seed.samples.shape
    print seed.a
    print seed.sa
    print seed.fa
    
    seed.save(filename.replace('.dot','.T.hdf5'))
    return 0
예제 #15
0
def run(args):
    """Run it"""
    verbose(1, "Loading %d result files" % len(args.data))

    filetype_in = guess_backend(args.data[0])

    if filetype_in == 'nifti':
        dss = [fmri_dataset(f) for f in args.data]
    elif filetype_in == 'hdf5':
        dss = [h5load(f) for f in args.data]
    data = np.asarray([d.samples[args.isample] for d in dss])

    if args.mask:
        filetype_mask = guess_backend(args.mask)
        if filetype_mask == 'nifti':
            mask = nib.load(args.mask).get_data()
        elif filetype_mask == 'hdf5':
            mask = h5load(args.mask).samples
        out_of_mask = mask == 0
    else:
        # just take where no voxel had a value
        out_of_mask = np.sum(data != 0, axis=0) == 0

    t, p = ttest_1samp(data,
                       popmean=args.chance_level,
                       axis=0,
                       alternative=args.alternative)

    if args.stat == 'z':
        if args.alternative == 'two-sided':
            s = stats.norm.isf(p / 2)
        else:
            s = stats.norm.isf(p)
        # take the sign of the original t
        s = np.abs(s) * np.sign(t)
    elif args.stat == 'p':
        s = p
    elif args.stat == 't':
        s = t
    else:
        raise ValueError('WTF you gave me? have no clue about %r' %
                         (args.stat, ))

    if s.shape != out_of_mask.shape:
        try:
            out_of_mask = out_of_mask.reshape(s.shape)
        except ValueError:
            raise ValueError('Cannot use mask of shape {0} with '
                             'data of shape {1}'.format(
                                 out_of_mask.shape, s.shape))
    s[out_of_mask] = 0

    verbose(1, "Saving to %s" % args.output)
    filetype_out = guess_backend(args.output)
    if filetype_out == 'nifti':
        map2nifti(dss[0], data=s).to_filename(args.output)
    else:  # filetype_out is hdf5
        s = Dataset(np.atleast_2d(s), fa=dss[0].fa, a=dss[0].a)
        h5save(args.output, s)
    return s
예제 #16
0
    def test_surface_voxel_query_engine(self):
        vol_shape = (10, 10, 10, 1)
        vol_affine = np.identity(4)
        vol_affine[0, 0] = vol_affine[1, 1] = vol_affine[2, 2] = 5
        vg = volgeom.VolGeom(vol_shape, vol_affine)

        # make the surfaces
        sphere_density = 10

        outer = surf.generate_sphere(sphere_density) * 25. + 15
        inner = surf.generate_sphere(sphere_density) * 20. + 15

        vs = volsurf.VolSurfMaximalMapping(vg, inner, outer)

        radius = 10

        for fallback, expected_nfeatures in ((True, 1000), (False, 183)):
            voxsel = surf_voxel_selection.voxel_selection(vs, radius)
            qe = SurfaceVoxelsQueryEngine(voxsel,
                                          fallback_euclidian_distance=fallback)

            m = _Voxel_Count_Measure()

            sl = Searchlight(m, queryengine=qe)

            data = np.random.normal(size=vol_shape)
            img = nb.Nifti1Image(data, vol_affine)
            ds = fmri_dataset(img)

            sl_map = sl(ds)

            counts = sl_map.samples

            assert_true(np.all(np.logical_and(5 <= counts, counts <= 18)))
            assert_equal(sl_map.nfeatures, expected_nfeatures)
예제 #17
0
    def test_volgeom_masking(self):
        maskstep = 5
        vg = volgeom.VolGeom((2 * maskstep, 2 * maskstep, 2 * maskstep), np.identity(4))

        mask = vg.get_empty_array()
        sh = vg.shape

        # mask a subset of the voxels
        rng = range(0, sh[0], maskstep)
        for i in rng:
            for j in rng:
                for k in rng:
                    mask[i, j, k] = 1

        # make a new volgeom instance
        vg = volgeom.VolGeom(vg.shape, vg.affine, mask)

        data = vg.get_masked_nifti_image(nt=1)
        msk = vg.get_masked_nifti_image()
        dset = fmri_dataset(data, mask=msk)
        vg_dset = volgeom.from_any(dset)

        # ensure that the mask is set properly and
        assert_equal(vg.nvoxels, vg.nvoxels_mask * maskstep ** 3)
        assert_equal(vg_dset, vg)

        dilates = range(0, 8, 2)
        nvoxels_masks = [] # keep track of number of voxels for each size
        for dilate in dilates:
            covers_full_volume = dilate * 2 >= maskstep * 3 ** .5 + 1

            # constr gets values: None, Sphere(0), 2, Sphere(2), ...
            for i, constr in enumerate([Sphere, lambda x:x if x else None]):
                dilater = constr(dilate)

                img_dilated = vg.get_masked_nifti_image(dilate=dilater)
                data = img_dilated.get_data()

                assert_array_equal(data, vg.get_masked_array(dilate=dilater))
                n = np.sum(data)

                # number of voxels in mask is increasing
                assert_true(all(n >= p for p in nvoxels_masks))

                # results should be identical irrespective of constr
                if i == 0:
                    # - first call with this value of dilate: has to be more
                    #   voxels than very previous dilation value, unless the
                    #   full volume is covered - then it can be equal too
                    # - every next call: ensure size matches
                    cmp = lambda x, y:(x >= y if covers_full_volume else x > y)
                    assert_true(all(cmp(n, p) for p in nvoxels_masks))
                    nvoxels_masks.append(n)
                else:
                    # same size as previous call
                    assert_equal(n, nvoxels_masks[-1])

                # if dilate is not None or zero, then it should
                # have selected all the voxels if the radius is big enough
                assert_equal(np.sum(data) == vg.nvoxels, covers_full_volume)
def split_mask(mask_image, workdir):
    """
    Split a mask with multiple ROIs into seperate files for each ROI.
    Saves them in 'workdir/roi_masks'.
    returns a list of paths for each produced mask file.
    """

    ms = fmri_dataset(mask_image)

    maskpath = os.path.join(workdir, 'roi_masks')

    if not os.path.exists(maskpath):
        os.makedirs(maskpath)

    # list of paths for each roi mask
    maskfiles = []

    # save seperate mask for each roi to file in workdir
    for roivalue in range(1, max(ms.samples[0]) + 1):
        msc = ms.copy()
        msc.samples[0][msc.samples[0] != roivalue] = False
        image = map2nifti(msc)
        maskfile = os.path.join(maskpath, 'roi{}_mask.nii.gz'.format(roivalue))
        image.to_filename(maskfile)
        maskfiles.append(maskfile)

    return maskfiles
def CorrespondenceTable(target, chunk, mask):

    # データセットの整形

    dataset = fmri_dataset(nifti, targets=target, chunks=chunk, mask=mask ,sprefix='voxel', tprefix='time', add_fa=None)

    print('dataset ready')

    poly_detrend(dataset, polyord=1, chunks_attr='chunks')

    dataset = dataset[np.array([l in ['0', '1']
                               for l in dataset.targets], dtype='bool')]

    # ボクセル数を取得し,ボクセル名作成
    voxNum = dataset.shape[1]

    VoxName = []

    for i in range(voxNum):

        name = 'Voxel' + str(i+1)
        VoxName.append(name)


    # ボクセル位置(x, y, z)を取得
    voxPosition = dataset.fa.values()
    voxPosition = list(voxPosition)[0][:]
    voxPosition = pd.DataFrame(voxPosition, columns = ['x', 'y', 'z'], index = VoxName)

    return voxPosition
예제 #20
0
def test_fmri_to_cosmo():
    skip_if_no_external('nibabel')
    from mvpa2.datasets.mri import fmri_dataset
    # test exporting an fMRI dataset to CoSMoMVPA
    pymvpa_ds = fmri_dataset(
        samples=pathjoin(pymvpa_dataroot, 'example4d.nii.gz'),
        targets=[1, 2], sprefix='voxel')
    cosmomvpa_struct = cosmo.map2cosmo(pymvpa_ds)
    _assert_set_equal(cosmomvpa_struct.keys(), ['a', 'fa', 'sa', 'samples'])

    a_dict = dict(_obj2tup(cosmomvpa_struct['a']))
    mri_keys = ['imgaffine', 'voxel_eldim', 'voxel_dim']
    _assert_subset(mri_keys, a_dict.keys())

    for k in mri_keys:
        c_value = a_dict[k]
        p_value = pymvpa_ds.a[k].value

        if isinstance(p_value, tuple):
            c_value = c_value.ravel()
            p_value = np.asarray(p_value).ravel()

        assert_array_almost_equal(c_value, p_value)

    fa_dict = dict(_obj2tup(cosmomvpa_struct['fa']))
    fa_keys = ['voxel_indices']
    _assert_set_equal(fa_dict.keys(), fa_keys)
    for k in fa_keys:
        assert_array_almost_equal(fa_dict[k].T, pymvpa_ds.fa[k].value)
예제 #21
0
def nifti_to_dataset(nifti_file, attr_file=None, annot_file=None, subject_id=None, session_id=None):

    logger.info("Loading fmri dataset: {}".format(nifti_file))
    ds = fmri_dataset(samples=nifti_file)

    if attr_file is not None:
        logger.info("Loading attributes: {}".format(attr_file))
        attr = ColumnData(attr_file)
        valid = min(ds.nsamples, attr.nrows)
        valid = int(valid / 180) * 180  # FIXME: ...
        print valid
        ds = ds[:valid, :]
        for k in attr.keys():
            ds.sa[k] = attr[k][:valid]

    if annot_file is not None:
        logger.info("Loading annotation: {}".format(annot_file))
        annot = nibabel.freesurfer.io.read_annot(annot_file)
        ds.fa["annotation"] = [annot[2][i] for i in annot[0]]  # FIXME: roi cannot be a fa

    if subject_id is not None:
        ds.sa["subject_id"] = [subject_id] * ds.nsamples

    if session_id is not None:
        ds.sa["session_id"] = [session_id] * ds.nsamples

    return ds
    def test_surface_voxel_query_engine(self):
        vol_shape = (10, 10, 10, 1)
        vol_affine = np.identity(4)
        vol_affine[0, 0] = vol_affine[1, 1] = vol_affine[2, 2] = 5
        vg = volgeom.VolGeom(vol_shape, vol_affine)

        # make the surfaces
        sphere_density = 10

        outer = surf.generate_sphere(sphere_density) * 25. + 15
        inner = surf.generate_sphere(sphere_density) * 20. + 15

        vs = volsurf.VolSurfMaximalMapping(vg, inner, outer)

        radius = 10

        for fallback, expected_nfeatures in ((True, 1000), (False, 183)):
            voxsel = surf_voxel_selection.voxel_selection(vs, radius)
            qe = SurfaceVoxelsQueryEngine(voxsel, fallback_euclidian_distance=fallback)

            m = _Voxel_Count_Measure()

            sl = Searchlight(m, queryengine=qe)

            data = np.random.normal(size=vol_shape)
            img = nb.Nifti1Image(data, vol_affine)
            ds = fmri_dataset(img)

            sl_map = sl(ds)

            counts = sl_map.samples

            assert_true(np.all(np.logical_and(5 <= counts, counts <= 18)))
            assert_equal(sl_map.nfeatures, expected_nfeatures)
예제 #23
0
def test_fmridataset():
    # full-blown fmri dataset testing
    import nibabel
    maskimg = nibabel.load(pathjoin(pymvpa_dataroot, 'mask.nii.gz'))
    data = maskimg.get_data().copy()
    data[data > 0] = np.arange(1, np.sum(data) + 1)
    maskimg = nibabel.Nifti1Image(data, None, maskimg.header)
    ds = fmri_dataset(samples=pathjoin(pymvpa_dataroot, 'bold.nii.gz'),
                      mask=maskimg,
                      sprefix='subj1',
                      add_fa={'myintmask': maskimg})
    ds_alt = preprocessed_dataset(pathjoin(pymvpa_dataroot, 'bold.nii.gz'),
                                  nibabel.load,
                                  fmri_dataset,
                                  mask=maskimg,
                                  sprefix='subj1',
                                  add_fa={'myintmask': maskimg})
    assert_datasets_almost_equal(ds, ds_alt)

    # content
    assert_equal(len(ds), 1452)
    assert_true(ds.nfeatures, 530)
    assert_array_equal(sorted(ds.sa.keys()), ['time_coords', 'time_indices'])
    assert_array_equal(sorted(ds.fa.keys()), ['myintmask', 'subj1_indices'])
    assert_array_equal(sorted(ds.a.keys()), [
        'imgaffine', 'imghdr', 'imgtype', 'mapper', 'subj1_dim', 'subj1_eldim'
    ])
    # vol extent
    assert_equal(ds.a.subj1_dim, (40, 20, 1))
    # check time
    assert_equal(ds.sa.time_coords[-1], 3627.5)
    # non-zero mask values
    assert_array_equal(ds.fa.myintmask, np.arange(1, ds.nfeatures + 1))
    # we know that imgtype must be:
    ok_(getattr(nibabel, ds.a.imgtype) is nibabel.Nifti1Image)
예제 #24
0
def load_example_fmri_dataset(name='1slice', literal=False):
    """Load minimal fMRI dataset that is shipped with PyMVPA."""
    from mvpa2.datasets.sources.openfmri import OpenFMRIDataset
    from mvpa2.datasets.mri import fmri_dataset
    from mvpa2.misc.io import SampleAttributes

    basedir = pathjoin(pymvpa_dataroot, 'haxby2001')
    mask = {'1slice': pathjoin(pymvpa_dataroot, 'mask.nii.gz'),
            '25mm': pathjoin(basedir, 'sub001', 'masks', '25mm',
                                 'brain.nii.gz')}[name]

    if literal:
        model = 1
        subj = 1
        openfmri = OpenFMRIDataset(basedir)
        ds = openfmri.get_model_bold_dataset(model, subj, flavor=name,
                                             mask=mask, noinfolabel='rest')
        # re-imagine the global time_coords of a concatenated time series
        # this is only for the purpose of keeping the example data in the
        # exact same shape as it has always been. in absolute terms this makes no
        # sense as there is no continuous time in this dataset
        ds.sa['run_time_coords'] = ds.sa.time_coords
        ds.sa['time_coords'] = np.arange(len(ds)) * 2.5
    else:
        if name == '25mm':
            raise ValueError("The 25mm dataset is no longer available with "
                             "numerical labels")
        attr = SampleAttributes(pathjoin(pymvpa_dataroot, 'attributes.txt'))
        ds = fmri_dataset(samples=pathjoin(pymvpa_dataroot, 'bold.nii.gz'),
                          targets=attr.targets, chunks=attr.chunks,
                          mask=mask)

    return ds
예제 #25
0
파일: ANOVA.py 프로젝트: rahulgoel20/ptsd
def nifti_to_dataset(nifti_file,
                     attr_file=None,
                     annot_file=None,
                     subject_id=None,
                     session_id=None):

    logger.info('Loading fmri dataset: {}'.format(nifti_file))
    ds = fmri_dataset(samples=nifti_file)

    if attr_file is not None:
        logger.info('Loading attributes: {}'.format(attr_file))
        attr = ColumnData(attr_file)
        valid = min(ds.nsamples, attr.nrows)
        valid = int(valid / 180) * 180  #FIXME: ...
        logger.info('valid samples: {}'.format(valid))
        ds = ds[:valid, :]
        for k in attr.keys():
            ds.sa[k] = attr[k][:valid]

    if annot_file is not None:
        logger.info('Loading annotation: {}'.format(annot_file))
        annot = nibabel.freesurfer.io.read_annot(annot_file)
        ds.fa['annotation'] = [annot[2][i]
                               for i in annot[0]]  #FIXME: roi cannot be a fa

    if subject_id is not None:
        ds.sa['subject_id'] = [subject_id] * ds.nsamples

    if session_id is not None:
        ds.sa['session_id'] = [session_id] * ds.nsamples

    return ds
예제 #26
0
    def test_volgeom_masking(self):
        maskstep = 5
        vg = volgeom.VolGeom((2 * maskstep, 2 * maskstep, 2 * maskstep), np.identity(4))

        mask = vg.get_empty_array()
        sh = vg.shape

        # mask a subset of the voxels
        rng = range(0, sh[0], maskstep)
        for i in rng:
            for j in rng:
                for k in rng:
                    mask[i, j, k] = 1

        # make a new volgeom instance
        vg = volgeom.VolGeom(vg.shape, vg.affine, mask)

        data = vg.get_masked_nifti_image(nt=1)
        msk = vg.get_masked_nifti_image()
        dset = fmri_dataset(data, mask=msk)
        vg_dset = volgeom.from_any(dset)

        # ensure that the mask is set properly and
        assert_equal(vg.nvoxels, vg.nvoxels_mask * maskstep ** 3)
        assert_equal(vg_dset, vg)

        dilates = range(0, 8, 2)
        nvoxels_masks = []  # keep track of number of voxels for each size
        for dilate in dilates:
            covers_full_volume = dilate * 2 >= maskstep * 3 ** .5 + 1

            # constr gets values: None, Sphere(0), 2, Sphere(2), ...
            for i, constr in enumerate([Sphere, lambda x:x if x else None]):
                dilater = constr(dilate)

                img_dilated = vg.get_masked_nifti_image(dilate=dilater)
                data = img_dilated.get_data()

                assert_array_equal(data, vg.get_masked_array(dilate=dilater))
                n = np.sum(data)

                # number of voxels in mask is increasing
                assert_true(all(n >= p for p in nvoxels_masks))

                # results should be identical irrespective of constr
                if i == 0:
                    # - first call with this value of dilate: has to be more
                    #   voxels than very previous dilation value, unless the
                    #   full volume is covered - then it can be equal too
                    # - every next call: ensure size matches
                    cmp = lambda x, y:(x >= y if covers_full_volume else x > y)
                    assert_true(all(cmp(n, p) for p in nvoxels_masks))
                    nvoxels_masks.append(n)
                else:
                    # same size as previous call
                    assert_equal(n, nvoxels_masks[-1])

                # if dilate is not None or zero, then it should
                # have selected all the voxels if the radius is big enough
                assert_equal(np.sum(data) == vg.nvoxels, covers_full_volume)
    def test_queryengine_io(self, fn):
        skip_if_no_external("h5py")
        from mvpa2.base.hdf5 import h5save, h5load

        vol_shape = (10, 10, 10, 1)
        vol_affine = np.identity(4)
        vg = volgeom.VolGeom(vol_shape, vol_affine)

        # generate some surfaces,
        # and add some noise to them
        sphere_density = 10
        outer = surf.generate_sphere(sphere_density) * 5 + 8
        inner = surf.generate_sphere(sphere_density) * 3 + 8
        radius = 5.0

        add_fa = ["center_distances", "grey_matter_position"]
        qe = disc_surface_queryengine(radius, vg, inner, outer, add_fa=add_fa)
        ds = fmri_dataset(vg.get_masked_nifti_image())

        # the following is not really a strong requirement. XXX remove?
        assert_raises(ValueError, lambda: qe[qe.ids[0]])

        # check that after training it behaves well
        qe.train(ds)
        i = qe.ids[0]
        try:
            m = qe[i]
        except ValueError, e:
            raise AssertionError(
                "Failed to query %r from %r after training on %r. " "Exception was: %r" % (i, qe, ds, e)
            )
예제 #28
0
def test_fmridataset():
    # full-blown fmri dataset testing
    import nibabel
    maskimg = nibabel.load(os.path.join(pymvpa_dataroot, 'mask.nii.gz'))
    data = maskimg.get_data().copy()
    data[data>0] = np.arange(1, np.sum(data) + 1)
    maskimg = nibabel.Nifti1Image(data, None, maskimg.get_header())
    ds = fmri_dataset(samples=os.path.join(pymvpa_dataroot,'bold.nii.gz'),
                      mask=maskimg,
                      sprefix='subj1',
                      add_fa={'myintmask': maskimg})
    # content
    assert_equal(len(ds), 1452)
    assert_true(ds.nfeatures, 530)
    assert_array_equal(sorted(ds.sa.keys()),
            ['time_coords', 'time_indices'])
    assert_array_equal(sorted(ds.fa.keys()),
            ['myintmask', 'subj1_indices'])
    assert_array_equal(sorted(ds.a.keys()),
            ['imghdr', 'imgtype', 'mapper', 'subj1_dim', 'subj1_eldim'])
    # vol extent
    assert_equal(ds.a.subj1_dim, (40, 20, 1))
    # check time
    assert_equal(ds.sa.time_coords[-1], 3627.5)
    # non-zero mask values
    assert_array_equal(ds.fa.myintmask, np.arange(1, ds.nfeatures + 1))
    # we know that imgtype must be:
    ok_(ds.a.imgtype is nibabel.Nifti1Image)
예제 #29
0
def test_fmridataset():
    # full-blown fmri dataset testing
    import nibabel

    maskimg = nibabel.load(os.path.join(pymvpa_dataroot, "mask.nii.gz"))
    data = maskimg.get_data().copy()
    data[data > 0] = np.arange(1, np.sum(data) + 1)
    maskimg = nibabel.Nifti1Image(data, None, maskimg.get_header())
    attr = SampleAttributes(os.path.join(pymvpa_dataroot, "attributes.txt"))
    ds = fmri_dataset(
        samples=os.path.join(pymvpa_dataroot, "bold.nii.gz"),
        targets=attr.targets,
        chunks=attr.chunks,
        mask=maskimg,
        sprefix="subj1",
        add_fa={"myintmask": maskimg},
    )
    # content
    assert_equal(len(ds), 1452)
    assert_true(ds.nfeatures, 530)
    assert_array_equal(sorted(ds.sa.keys()), ["chunks", "targets", "time_coords", "time_indices"])
    assert_array_equal(sorted(ds.fa.keys()), ["myintmask", "subj1_indices"])
    assert_array_equal(sorted(ds.a.keys()), ["imghdr", "imgtype", "mapper", "subj1_dim", "subj1_eldim"])
    # vol extent
    assert_equal(ds.a.subj1_dim, (40, 20, 1))
    # check time
    assert_equal(ds.sa.time_coords[-1], 3627.5)
    # non-zero mask values
    assert_array_equal(ds.fa.myintmask, np.arange(1, ds.nfeatures + 1))
    # we know that imgtype must be:
    ok_(ds.a.imgtype is nibabel.Nifti1Image)
예제 #30
0
def load_example_fmri_dataset(name='1slice', literal=False):
    """Load minimal fMRI dataset that is shipped with PyMVPA."""
    from mvpa2.datasets.eventrelated import events2sample_attr
    from mvpa2.datasets.sources.openfmri import OpenFMRIDataset
    from mvpa2.datasets.mri import fmri_dataset
    from mvpa2.misc.io import SampleAttributes

    basedir = os.path.join(pymvpa_dataroot, 'openfmri')
    mask = {'1slice': os.path.join(pymvpa_dataroot, 'mask.nii.gz'),
            '25mm': os.path.join(basedir, 'sub001', 'masks', '25mm',
                    'brain.nii.gz')}[name]

    if literal:
        model = 1
        subj = 1
        openfmri = OpenFMRIDataset(basedir)
        ds = openfmri.get_model_bold_dataset(model, subj, flavor=name,
                                             mask=mask, noinfolabel='rest')
        # re-imagine the global time_coords of a concatenated time series
        # this is only for the purpose of keeping the example data in the
        # exact same shape as it has always been. in absolute terms this makes no
        # sense as there is no continuous time in this dataset
        ds.sa['run_time_coords'] = ds.sa.time_coords
        ds.sa['time_coords'] = np.arange(len(ds)) * 2.5
    else:
        if name == '25mm':
            raise ValueError("The 25mm dataset is no longer available with "
                             "numerical labels")
        attr = SampleAttributes(os.path.join(pymvpa_dataroot, 'attributes.txt'))
        ds = fmri_dataset(samples=os.path.join(pymvpa_dataroot, 'bold.nii.gz'),
                          targets=attr.targets, chunks=attr.chunks,
                          mask=mask)

    return ds
예제 #31
0
    def test_queryengine_io(self, fn):
        skip_if_no_external('h5py')
        from mvpa2.base.hdf5 import h5save, h5load

        vol_shape = (10, 10, 10, 1)
        vol_affine = np.identity(4)
        vg = volgeom.VolGeom(vol_shape, vol_affine)

        # generate some surfaces,
        # and add some noise to them
        sphere_density = 10
        outer = surf.generate_sphere(sphere_density) * 5 + 8
        inner = surf.generate_sphere(sphere_density) * 3 + 8
        radius = 5.

        add_fa = ['center_distances', 'grey_matter_position']
        qe = disc_surface_queryengine(radius, vg, inner, outer, add_fa=add_fa)
        ds = fmri_dataset(vg.get_masked_nifti_image())

        # the following is not really a strong requirement. XXX remove?
        assert_raises(ValueError, lambda: qe[qe.ids[0]])

        # check that after training it behaves well
        qe.train(ds)
        i = qe.ids[0]
        try:
            m = qe[i]
        except ValueError, e:
            raise AssertionError(
                'Failed to query %r from %r after training on %r. Exception was: %r'
                % (i, qe, ds, e))
예제 #32
0
def test_fmridataset():
    # full-blown fmri dataset testing
    import nibabel
    maskimg = nibabel.load(os.path.join(pymvpa_dataroot, 'mask.nii.gz'))
    data = maskimg.get_data().copy()
    data[data > 0] = np.arange(1, np.sum(data) + 1)
    maskimg = nibabel.Nifti1Image(data, None, maskimg.get_header())
    attr = SampleAttributes(os.path.join(pymvpa_dataroot, 'attributes.txt'))
    ds = fmri_dataset(samples=os.path.join(pymvpa_dataroot, 'bold.nii.gz'),
                      targets=attr.targets,
                      chunks=attr.chunks,
                      mask=maskimg,
                      sprefix='subj1',
                      add_fa={'myintmask': maskimg})
    # content
    assert_equal(len(ds), 1452)
    assert_true(ds.nfeatures, 530)
    assert_array_equal(sorted(ds.sa.keys()),
                       ['chunks', 'targets', 'time_coords', 'time_indices'])
    assert_array_equal(sorted(ds.fa.keys()), ['myintmask', 'subj1_indices'])
    assert_array_equal(
        sorted(ds.a.keys()),
        ['imghdr', 'imgtype', 'mapper', 'subj1_dim', 'subj1_eldim'])
    # vol extent
    assert_equal(ds.a.subj1_dim, (40, 20, 1))
    # check time
    assert_equal(ds.sa.time_coords[-1], 3627.5)
    # non-zero mask values
    assert_array_equal(ds.fa.myintmask, np.arange(1, ds.nfeatures + 1))
    # we know that imgtype must be:
    ok_(ds.a.imgtype is nibabel.Nifti1Image)
예제 #33
0
파일: native.py 프로젝트: hwd15508/nidata
def load_example_fmri_dataset(name="1slice", literal=False):
    """Load minimal fMRI dataset that is shipped with PyMVPA."""
    from mvpa2.datasets.sources.openfmri import OpenFMRIDataset
    from mvpa2.datasets.mri import fmri_dataset
    from mvpa2.misc.io import SampleAttributes

    basedir = op.join(pymvpa_dataroot, "haxby2001")
    mask = {
        "1slice": op.join(pymvpa_dataroot, "mask.nii.gz"),
        "25mm": op.join(basedir, "sub001", "masks", "25mm", "brain.nii.gz"),
    }[name]

    if literal:
        model = 1
        subj = 1
        openfmri = OpenFMRIDataset(basedir)
        ds = openfmri.get_model_bold_dataset(model, subj, flavor=name, mask=mask, noinfolabel="rest")
        # re-imagine the global time_coords of a concatenated time series
        # this is only for the purpose of keeping the example data in the
        # exact same shape as it has always been. in absolute terms this makes no
        # sense as there is no continuous time in this dataset
        ds.sa["run_time_coords"] = ds.sa.time_coords
        ds.sa["time_coords"] = np.arange(len(ds)) * 2.5
    else:
        if name == "25mm":
            raise ValueError("The 25mm dataset is no longer available with " "numerical labels")
        attr = SampleAttributes(op.join(pymvpa_dataroot, "attributes.txt"))
        ds = fmri_dataset(
            samples=op.join(pymvpa_dataroot, "bold.nii.gz"), targets=attr.targets, chunks=attr.chunks, mask=mask
        )

    return ds
예제 #34
0
def prepare_subject_for_hyperalignment(subject_label, bold_fname, mask_fname,
                                       out_dir):
    print('Loading data %s with mask %s' % (bold_fname, mask_fname))
    ds = fmri_dataset(samples=bold_fname, mask=mask_fname)
    zscore(ds, chunks_attr=None)
    out_fname = os.path.join(out_dir, 'sub-%s_data.hdf5' % subject_label)
    print('Saving to %s' % out_fname)
    h5save(out_fname, ds)
예제 #35
0
def test_4d_mask():
    """ Test masking with 4D datasets

    If the time dimension has length 1, permit, otherwise fail"""
    import nibabel
    bold = pathjoin(pymvpa_dataroot, 'bold.nii.gz')
    mask = pathjoin(pymvpa_dataroot, 'mask.nii.gz')
    # mask4d.nii.gz is simply mask.nii.gz with an extra dimension added
    mask4d = pathjoin(pymvpa_dataroot, 'mask4d.nii.gz')
    # mask4dfail.nii.gz is mask.nii.gz copied twice in the 4th dimension
    mask4df = pathjoin(pymvpa_dataroot, 'mask4dfail.nii.gz')
    assert_equal(nibabel.load(mask).shape + (1, ), nibabel.load(mask4d).shape)

    bold1 = fmri_dataset(bold, mask=mask)
    bold2 = fmri_dataset(bold, mask=mask4d)
    assert_equal(bold1.shape, bold2.shape)
    assert_raises(ValueError, fmri_dataset, bold, mask=mask4df)
    def setUp(self):
        self.tmpdir = mkdtemp()

        data_ = fmri_dataset(datafn)
        datafn_hdf5 = pjoin(self.tmpdir, 'datain.hdf5')
        h5save(datafn_hdf5, data_)

        mask_ = fmri_dataset(maskfn)
        maskfn_hdf5 = pjoin(self.tmpdir, 'maskfn.hdf5')
        h5save(maskfn_hdf5, mask_)

        self.datafn = [datafn, datafn_hdf5]
        self.outfn = [
            pjoin(self.tmpdir, 'output') + ext
            for ext in ['.nii.gz', '.nii', '.hdf5', '.h5']
        ]
        self.maskfn = ['', maskfn, maskfn_hdf5]
예제 #37
0
def test_nifti_dataset():
    """Basic testing of NiftiDataset
    """
    ds = fmri_dataset(samples=os.path.join(pymvpa_dataroot,
                                           'example4d.nii.gz'),
                      targets=[1, 2],
                      sprefix='voxel')
    assert_equal(ds.nfeatures, 294912)
    assert_equal(ds.nsamples, 2)

    assert_array_equal(ds.a.voxel_eldim, ds.a.imghdr['pixdim'][1:4])
    assert_true(ds.a['voxel_dim'].value == (128, 96, 24))

    # XXX move elsewhere
    #check that mapper honours elementsize
    #nb22 = np.array([i for i in data.a.mapper.getNeighborIn((1, 1, 1), 2.2)])
    #nb20 = np.array([i for i in data.a.mapper.getNeighborIn((1, 1, 1), 2.0)])
    #self.assertTrue(nb22.shape[0] == 7)
    #self.assertTrue(nb20.shape[0] == 5)

    merged = ds.copy()
    merged.append(ds)
    assert_equal(merged.nfeatures, 294912)
    assert_equal(merged.nsamples, 4)

    # check that the header survives
    for k in merged.a.imghdr.keys():
        assert_array_equal(merged.a.imghdr[k], ds.a.imghdr[k])

    # throw away old dataset and see if new one survives
    del ds
    assert_array_equal(merged.samples[3], merged.samples[1])

    # check whether we can use a plain ndarray as mask
    mask = np.zeros((128, 96, 24), dtype='bool')
    mask[40, 20, 12] = True
    nddata = fmri_dataset(samples=os.path.join(pymvpa_dataroot,
                                               'example4d.nii.gz'),
                          targets=[1, 2],
                          mask=mask)
    assert_equal(nddata.nfeatures, 1)
    rmap = nddata.a.mapper.reverse1(np.array([44]))
    assert_equal(rmap.shape, (128, 96, 24))
    assert_equal(np.sum(rmap), 44)
    assert_equal(rmap[40, 20, 12], 44)
예제 #38
0
def run(args):
    from mvpa2.base.hdf5 import h5save
    ds = None
    if not args.txt_data is None:
        verbose(1, "Load data from TXT file '%s'" % args.txt_data)
        samples = _load_from_txt(args.txt_data)
        ds = Dataset(samples)
    elif not args.npy_data is None:
        verbose(1, "Load data from NPY file '%s'" % args.npy_data)
        samples = _load_from_npy(args.npy_data)
        ds = Dataset(samples)
    elif not args.mri_data is None:
        verbose(1, "Load data from MRI image(s) %s" % args.mri_data)
        from mvpa2.datasets.mri import fmri_dataset
        vol_attr = dict()
        if not args.add_vol_attr is None:
            # XXX add a way to use the mapper of an existing dataset to
            # add a volume attribute without having to load the entire
            # mri data again
            vol_attr = dict(args.add_vol_attr)
            if not len(args.add_vol_attr) == len(vol_attr):
                warning("--vol-attr option with duplicate attribute name: "
                        "check arguments!")
            verbose(2, "Add volumetric feature attributes: %s" % vol_attr)
        ds = fmri_dataset(args.mri_data, mask=args.mask, add_fa=vol_attr)

    if ds is None:
        if args.data is None:
            raise RuntimeError('no data source specific')
        else:
            ds = hdf2ds(args.data)[0]
    else:
        if args.data is not None:
            verbose(
                1,
                'ignoring dataset input in favor of other data source -- remove either one to disambiguate'
            )

    # act on all attribute options
    ds = process_common_dsattr_opts(ds, args)

    if not args.add_fsl_mcpar is None:
        from mvpa2.misc.fsl.base import McFlirtParams
        mc_par = McFlirtParams(args.add_fsl_mcpar)
        for param in mc_par:
            verbose(
                2, "Add motion regressor as sample attribute '%s'" %
                ('mc_' + param))
            ds.sa['mc_' + param] = mc_par[param]

    verbose(3, "Dataset summary %s" % (ds.summary()))
    # and store
    outfilename = args.output
    if not outfilename.endswith('.hdf5'):
        outfilename += '.hdf5'
    verbose(1, "Save dataset to '%s'" % outfilename)
    h5save(outfilename, ds, mkdir=True, compression=args.hdf5_compression)
예제 #39
0
def test_4d_mask():
    """ Test masking with 4D datasets

    If the time dimension has length 1, permit, otherwise fail"""
    import nibabel
    bold = pathjoin(pymvpa_dataroot, 'bold.nii.gz')
    mask = pathjoin(pymvpa_dataroot, 'mask.nii.gz')
    # mask4d.nii.gz is simply mask.nii.gz with an extra dimension added
    mask4d = pathjoin(pymvpa_dataroot, 'mask4d.nii.gz')
    # mask4dfail.nii.gz is mask.nii.gz copied twice in the 4th dimension
    mask4df = pathjoin(pymvpa_dataroot, 'mask4dfail.nii.gz')
    assert_equal(nibabel.load(mask).shape + (1,),
                 nibabel.load(mask4d).shape)

    bold1 = fmri_dataset(bold, mask=mask)
    bold2 = fmri_dataset(bold, mask=mask4d)
    assert_equal(bold1.shape, bold2.shape)
    assert_raises(ValueError, fmri_dataset, bold, mask=mask4df)
예제 #40
0
    def load_run(runstring):
        ds=fmri_dataset(samples=os.path.join(root,subject,'BOLD',runstring,'bold.nii.gz'),mask=mask)
        task, run = extract_task_and_run(runstring)

        ds.sa['chunks'] = np.empty(len(ds))
        ds.sa.chunks.fill(run)
        ds.sa['task'] = np.empty(len(ds))
        ds.sa.task.fill(task)
        return ds
예제 #41
0
    def test_niml_dset_voxsel(self, fn):
        if not externals.exists('nibabel'):
            return

        # This is actually a bit of an integration test.
        # It tests storing and retrieving searchlight results.
        # Imports are inline here so that it does not mess up the header
        # and makes the other unit tests more modular
        # XXX put this in a separate file?
        from mvpa2.misc.surfing import volgeom, surf_voxel_selection, queryengine
        from mvpa2.measures.searchlight import Searchlight
        from mvpa2.support.nibabel import surf
        from mvpa2.measures.base import Measure
        from mvpa2.datasets.mri import fmri_dataset

        class _Voxel_Count_Measure(Measure):
            # used to check voxel selection results
            is_trained = True


            def __init__(self, dtype, **kwargs):
                Measure.__init__(self, **kwargs)
                self.dtype = dtype


            def _call(self, dset):
                return self.dtype(dset.nfeatures)

        sh = (20, 20, 20)
        vg = volgeom.VolGeom(sh, np.identity(4))

        density = 20

        outer = surf.generate_sphere(density) * 10. + 5
        inner = surf.generate_sphere(density) * 5. + 5

        intermediate = outer * .5 + inner * .5
        xyz = intermediate.vertices

        radius = 50

        sel = surf_voxel_selection.run_voxel_selection(radius, vg, inner, outer)
        qe = queryengine.SurfaceVerticesQueryEngine(sel)

        for dtype in (int, float):
            sl = Searchlight(_Voxel_Count_Measure(dtype), queryengine=qe)

            ds = fmri_dataset(vg.get_empty_nifti_image(1))
            r = sl(ds)

            niml.write(fn, r)
            rr = niml.read(fn)

            os.remove(fn)

            assert_array_equal(r.samples, rr.samples)
예제 #42
0
파일: cmd_ttest.py 프로젝트: PyMVPA/PyMVPA
def run(args):
    """Run it"""
    verbose(1, "Loading %d result files" % len(args.data))

    filetype_in = guess_backend(args.data[0])

    if filetype_in == 'nifti':
        dss = [fmri_dataset(f) for f in args.data]
    elif filetype_in == 'hdf5':
        dss = [h5load(f) for f in args.data]
    data = np.asarray([d.samples[args.isample] for d in dss])

    if args.mask:
        filetype_mask = guess_backend(args.mask)
        if filetype_mask == 'nifti':
            mask = nib.load(args.mask).get_data()
        elif filetype_mask == 'hdf5':
            mask = h5load(args.mask).samples
        out_of_mask = mask == 0
    else:
        # just take where no voxel had a value
        out_of_mask = np.sum(data != 0, axis=0)==0

    t, p = ttest_1samp(data, popmean=args.chance_level, axis=0,
                       alternative=args.alternative)

    if args.stat == 'z':
        if args.alternative == 'two-sided':
            s = stats.norm.isf(p/2)
        else:
            s = stats.norm.isf(p)
        # take the sign of the original t
        s = np.abs(s) * np.sign(t)
    elif args.stat == 'p':
        s = p
    elif args.stat == 't':
        s = t
    else:
        raise ValueError('WTF you gave me? have no clue about %r' % (args.stat,))

    if s.shape != out_of_mask.shape:
        try:
            out_of_mask = out_of_mask.reshape(s.shape)
        except ValueError:
            raise ValueError('Cannot use mask of shape {0} with '
                             'data of shape {1}'.format(out_of_mask.shape, s.shape))
    s[out_of_mask] = 0

    verbose(1, "Saving to %s" % args.output)
    filetype_out = guess_backend(args.output)
    if filetype_out == 'nifti':
        map2nifti(dss[0], data=s).to_filename(args.output)
    else:  # filetype_out is hdf5
        s = Dataset(np.atleast_2d(s), fa=dss[0].fa, a=dss[0].a)
        h5save(args.output, s)
    return s
예제 #43
0
    def test_niml_dset_voxsel(self):
        if not externals.exists('nibabel'):
            return

        # This is actually a bit of an integration test.
        # It tests storing and retrieving searchlight results.
        # Imports are inline here so that it does not mess up the header
        # and makes the other unit tests more modular
        # XXX put this in a separate file?
        from mvpa2.misc.surfing import volgeom, surf_voxel_selection, queryengine
        from mvpa2.measures.searchlight import Searchlight
        from mvpa2.support.nibabel import surf
        from mvpa2.measures.base import Measure
        from mvpa2.datasets.mri import fmri_dataset

        class _Voxel_Count_Measure(Measure):
            # used to check voxel selection results
            is_trained = True

            def __init__(self, dtype, **kwargs):
                Measure.__init__(self, **kwargs)
                self.dtype = dtype

            def _call(self, dset):
                return self.dtype(dset.nfeatures)

        sh = (20, 20, 20)
        vg = volgeom.VolGeom(sh, np.identity(4))

        density = 20

        outer = surf.generate_sphere(density) * 10. + 5
        inner = surf.generate_sphere(density) * 5. + 5

        intermediate = outer * .5 + inner * .5
        xyz = intermediate.vertices

        radius = 50

        sel = surf_voxel_selection.run_voxel_selection(radius, vg, inner,
                                                       outer)
        qe = queryengine.SurfaceVerticesQueryEngine(sel)

        for dtype in (int, float):
            sl = Searchlight(_Voxel_Count_Measure(dtype), queryengine=qe)

            ds = fmri_dataset(vg.get_empty_nifti_image(1))
            r = sl(ds)

            _, fn = tempfile.mkstemp('.niml.dset', 'dset')
            niml_dset.write(fn, r)
            rr = niml_dset.read(fn)

            os.remove(fn)

            assert_array_equal(r.samples, rr.samples)
    def test_minimal_dataset(self):
        vol_shape = (10, 10, 10, 3)
        vol_affine = np.identity(4)
        vg = volgeom.VolGeom(vol_shape, vol_affine)

        data = np.random.normal(size=vol_shape)
        msk = np.ones(vol_shape[:3])
        msk[:, 1:-1:2, :] = 0

        ni_data = nb.Nifti1Image(data, vol_affine)
        ni_msk = nb.Nifti1Image(msk, vol_affine)

        ds = fmri_dataset(ni_data, mask=ni_msk)

        sphere_density = 20
        outer = surf.generate_sphere(sphere_density) * 10. + 5
        inner = surf.generate_sphere(sphere_density) * 7. + 5


        radius = 10
        sel = surf_voxel_selection.run_voxel_selection(radius, ds,
                                                       inner, outer)


        sel_fids = set.union(*(set(sel[k]) for k in sel.keys()))

        ds_vox = map(tuple, ds.fa.voxel_indices)

        vg = sel.volgeom
        sel_vox = map(tuple, vg.lin2ijk(np.asarray(list(sel_fids))))


        fid_mask = np.asarray([v in sel_vox for v in ds_vox])
        assert_array_equal(fid_mask, sel.get_dataset_feature_mask(ds))

        # check if it raises errors
        ni_neg_msk = nb.Nifti1Image(1 - msk, vol_affine)
        neg_ds = fmri_dataset(ni_data, mask=ni_neg_msk) # inverted mask

        assert_raises(ValueError, sel.get_dataset_feature_mask, neg_ds)

        min_ds = sel.get_minimal_dataset(ds)
        assert_array_equal(min_ds.samples, ds[:, fid_mask].samples)
    def test_minimal_dataset(self):
        vol_shape = (10, 10, 10, 3)
        vol_affine = np.identity(4)
        vg = volgeom.VolGeom(vol_shape, vol_affine)

        data = np.random.normal(size=vol_shape)
        msk = np.ones(vol_shape[:3])
        msk[:, 1:-1:2, :] = 0

        ni_data = nb.Nifti1Image(data, vol_affine)
        ni_msk = nb.Nifti1Image(msk, vol_affine)

        ds = fmri_dataset(ni_data, mask=ni_msk)

        sphere_density = 20
        outer = surf.generate_sphere(sphere_density) * 10. + 5
        inner = surf.generate_sphere(sphere_density) * 7. + 5


        radius = 10
        sel = surf_voxel_selection.run_voxel_selection(radius, ds,
                                                       inner, outer)


        sel_fids = set.union(*(set(sel[k]) for k in sel.keys()))

        ds_vox = map(tuple, ds.fa.voxel_indices)

        vg = sel.volgeom
        sel_vox = map(tuple, vg.lin2ijk(np.asarray(list(sel_fids))))


        fid_mask = np.asarray([v in sel_vox for v in ds_vox])
        assert_array_equal(fid_mask, sel.get_dataset_feature_mask(ds))

        # check if it raises errors
        ni_neg_msk = nb.Nifti1Image(1 - msk, vol_affine)
        neg_ds = fmri_dataset(ni_data, mask=ni_neg_msk) # inverted mask

        assert_raises(ValueError, sel.get_dataset_feature_mask, neg_ds)

        min_ds = sel.get_minimal_dataset(ds)
        assert_array_equal(min_ds.samples, ds[:, fid_mask].samples)
예제 #46
0
def test_nifti_dataset_from3_d():
    """Test NiftiDataset based on 3D volume(s)
    """
    tssrc = os.path.join(pymvpa_dataroot, 'bold.nii.gz')
    masrc = os.path.join(pymvpa_dataroot, 'mask.nii.gz')

    # Test loading of 3D volumes
    # by default we are enforcing 4D, testing here with the demo 3d mask
    ds = fmri_dataset(masrc, mask=masrc, targets=1)
    assert_equal(len(ds), 1)

    import nibabel
    plain_data = nibabel.load(masrc).get_data()
    # Lets check if mapping back works as well
    assert_array_equal(plain_data,
                       map2nifti(ds).get_data().reshape(plain_data.shape))

    # test loading from a list of filenames

    # for now we should fail if trying to load a mix of 4D and 3D volumes
    # TODO: nope -- it should work and we should test here if correctly
    dsfull_plusone = fmri_dataset((masrc, tssrc), mask=masrc, targets=1)

    # Lets prepare some custom NiftiImage
    dsfull = fmri_dataset(tssrc, mask=masrc, targets=1)
    assert_equal(len(dsfull) + 1, len(dsfull_plusone))
    assert_equal(dsfull.nfeatures, dsfull_plusone.nfeatures)
    # skip 3d mask in 0th sample

    assert_array_equal(dsfull.samples, dsfull_plusone[1:].samples)
    ds_selected = dsfull[3]
    nifti_selected = map2nifti(ds_selected)

    # Load dataset from a mix of 3D volumes
    # (given by filenames and NiftiImages)
    labels = [123, 2, 123]
    ds2 = fmri_dataset((masrc, masrc, nifti_selected),
                       mask=masrc,
                       targets=labels)
    assert_equal(ds2.nsamples, 3)
    assert_array_equal(ds2.samples[0], ds2.samples[1])
    assert_array_equal(ds2.samples[2], dsfull.samples[3])
    assert_array_equal(ds2.targets, labels)
예제 #47
0
def test_nifti_dataset():
    """Basic testing of NiftiDataset
    """
    ds = fmri_dataset(samples=os.path.join(pymvpa_dataroot,'example4d.nii.gz'),
                       targets=[1,2], sprefix='voxel')
    assert_equal(ds.nfeatures, 294912)
    assert_equal(ds.nsamples, 2)

    assert_array_equal(ds.a.voxel_eldim, ds.a.imghdr['pixdim'][1:4])
    assert_true(ds.a['voxel_dim'].value == (128,96,24))


    # XXX move elsewhere
    #check that mapper honours elementsize
    #nb22 = np.array([i for i in data.a.mapper.getNeighborIn((1, 1, 1), 2.2)])
    #nb20 = np.array([i for i in data.a.mapper.getNeighborIn((1, 1, 1), 2.0)])
    #self.failUnless(nb22.shape[0] == 7)
    #self.failUnless(nb20.shape[0] == 5)

    merged = ds.copy()
    merged.append(ds)
    assert_equal(merged.nfeatures, 294912)
    assert_equal(merged.nsamples, 4)

    # check that the header survives
    for k in merged.a.imghdr.keys():
        assert_array_equal(merged.a.imghdr[k], ds.a.imghdr[k])

    # throw away old dataset and see if new one survives
    del ds
    assert_array_equal(merged.samples[3], merged.samples[1])

    # check whether we can use a plain ndarray as mask
    mask = np.zeros((128, 96, 24), dtype='bool')
    mask[40, 20, 12] = True
    nddata = fmri_dataset(samples=os.path.join(pymvpa_dataroot,'example4d.nii.gz'),
                          targets=[1,2],
                          mask=mask)
    assert_equal(nddata.nfeatures, 1)
    rmap = nddata.a.mapper.reverse1(np.array([44]))
    assert_equal(rmap.shape, (128, 96, 24))
    assert_equal(np.sum(rmap), 44)
    assert_equal(rmap[40, 20, 12], 44)
예제 #48
0
def test_nifti_dataset_from3_d():
    """Test NiftiDataset based on 3D volume(s)
    """
    tssrc = pathjoin(pymvpa_dataroot, 'bold.nii.gz')
    masrc = pathjoin(pymvpa_dataroot, 'mask.nii.gz')

    # Test loading of 3D volumes
    # by default we are enforcing 4D, testing here with the demo 3d mask
    ds = fmri_dataset(masrc, mask=masrc, targets=1)
    assert_equal(len(ds), 1)

    import nibabel
    plain_data = nibabel.load(masrc).get_data()
    # Lets check if mapping back works as well
    assert_array_equal(plain_data,
                       map2nifti(ds).get_data().reshape(plain_data.shape))

    # test loading from a list of filenames

    # for now we should fail if trying to load a mix of 4D and 3D volumes
    # TODO: nope -- it should work and we should test here if correctly
    dsfull_plusone = fmri_dataset((masrc, tssrc), mask=masrc, targets=1)

    # Lets prepare some custom NiftiImage
    dsfull = fmri_dataset(tssrc, mask=masrc, targets=1)
    assert_equal(len(dsfull) + 1, len(dsfull_plusone))
    assert_equal(dsfull.nfeatures, dsfull_plusone.nfeatures)
    # skip 3d mask in 0th sample

    assert_array_equal(dsfull.samples, dsfull_plusone[1:].samples)
    ds_selected = dsfull[3]
    nifti_selected = map2nifti(ds_selected)

    # Load dataset from a mix of 3D volumes
    # (given by filenames and NiftiImages)
    labels = [123, 2, 123]
    ds2 = fmri_dataset((masrc, masrc, nifti_selected),
                       mask=masrc, targets=labels)
    assert_equal(ds2.nsamples, 3)
    assert_array_equal(ds2.samples[0], ds2.samples[1])
    assert_array_equal(ds2.samples[2], dsfull.samples[3])
    assert_array_equal(ds2.targets, labels)
예제 #49
0
def result_hist(scan, masksize):
    paths, subList, contrasts, masks = lmvpa.initpaths(plat)
    bfn = os.path.join(paths[0], 'Maps', 'Encoding', scan + '.nii.gz')
    f, axarr = plt.subplots(len(masks), sharex=True)
    axarr[0].set_title('Model: ' + scan + ' | Mask size: ' + str(masksize))
    for i, m in enumerate(masks):
        mask = os.path.join(paths[0], 'data', s, 'masks', s + '_' + m + '.nii.gz')
        d = fmri_dataset(bfn, mask=mask)
        axarr[i].hist(np.mean(d[0].samples,0), bins=30)
        axarr[i].axvline(x=np.median(d.samples), color='r')
        axarr[i].set_ylabel(m)
예제 #50
0
def load_example_fmri_dataset():
    """Load minimal fMRI dataset that is shipped with PyMVPA."""
    from mvpa2.datasets.mri import fmri_dataset
    from mvpa2.misc.io import SampleAttributes

    attr = SampleAttributes(os.path.join(pymvpa_dataroot, 'attributes.txt'))
    ds = fmri_dataset(samples=os.path.join(pymvpa_dataroot, 'bold.nii.gz'),
                      targets=attr.targets, chunks=attr.chunks,
                      mask=os.path.join(pymvpa_dataroot, 'mask.nii.gz'))

    return ds
예제 #51
0
def load_example_fmri_dataset():
    """Load minimal fMRI dataset that is shipped with PyMVPA."""
    from mvpa2.datasets.mri import fmri_dataset
    from mvpa2.misc.io import SampleAttributes

    attr = SampleAttributes(os.path.join(pymvpa_dataroot, 'attributes.txt'))
    ds = fmri_dataset(samples=os.path.join(pymvpa_dataroot, 'bold.nii.gz'),
                      targets=attr.targets, chunks=attr.chunks,
                      mask=os.path.join(pymvpa_dataroot, 'mask.nii.gz'))

    return ds
예제 #52
0
def result_hist(scan, masksize):
    paths, subList, contrasts, masks = lmvpa.initpaths(plat)
    bfn = os.path.join(paths[0], 'Maps', 'Encoding', scan + '.nii.gz')
    f, axarr = plt.subplots(len(masks), sharex=True)
    axarr[0].set_title('Model: ' + scan + ' | Mask size: ' + str(masksize))
    for i, m in enumerate(masks):
        mask = os.path.join(paths[0], 'data', s, 'masks',
                            s + '_' + m + '.nii.gz')
        d = fmri_dataset(bfn, mask=mask)
        axarr[i].hist(np.mean(d[0].samples, 0), bins=30)
        axarr[i].axvline(x=np.median(d.samples), color='r')
        axarr[i].set_ylabel(m)
예제 #53
0
def run(args):
    from mvpa2.base.hdf5 import h5save
    ds = None
    if not args.txt_data is None:
        verbose(1, "Load data from TXT file '%s'" % args.txt_data)
        samples = _load_from_txt(args.txt_data)
        ds = Dataset(samples)
    elif not args.npy_data is None:
        verbose(1, "Load data from NPY file '%s'" % args.npy_data)
        samples = _load_from_npy(args.npy_data)
        ds = Dataset(samples)
    elif not args.mri_data is None:
        verbose(1, "Load data from MRI image(s) %s" % args.mri_data)
        from mvpa2.datasets.mri import fmri_dataset
        vol_attr = dict()
        if not args.add_vol_attr is None:
            # XXX add a way to use the mapper of an existing dataset to
            # add a volume attribute without having to load the entire
            # mri data again
            vol_attr = dict(args.add_vol_attr)
            if not len(args.add_vol_attr) == len(vol_attr):
                warning("--vol-attr option with duplicate attribute name: "
                        "check arguments!")
            verbose(2, "Add volumetric feature attributes: %s" % vol_attr)
        ds = fmri_dataset(args.mri_data, mask=args.mask, add_fa=vol_attr)

    if ds is None:
        if args.data is None:
            raise RuntimeError('no data source specific')
        else:
            ds = hdf2ds(args.data)[0]
    else:
        if args.data is not None:
            verbose(1, 'ignoring dataset input in favor of other data source -- remove either one to disambiguate')

    # act on all attribute options
    ds = process_common_dsattr_opts(ds, args)

    if not args.add_fsl_mcpar is None:
        from mvpa2.misc.fsl.base import McFlirtParams
        mc_par = McFlirtParams(args.add_fsl_mcpar)
        for param in mc_par:
            verbose(2, "Add motion regressor as sample attribute '%s'"
                       % ('mc_' + param))
            ds.sa['mc_' + param] = mc_par[param]

    verbose(3, "Dataset summary %s" % (ds.summary()))
    # and store
    outfilename = args.output
    if not outfilename.endswith('.hdf5'):
        outfilename += '.hdf5'
    verbose(1, "Save dataset to '%s'" % outfilename)
    h5save(outfilename, ds, mkdir=True, compression=args.hdf5_compression)
예제 #54
0
    def __init__(self,fsfdir,tempderiv,motpars,time_res):
        #Sets up dataset wide variables
        
        self.tempderiv=tempderiv
        self.motpars=motpars
        self.time_res=time_res
        
        if not os.path.exists(fsfdir):
            print 'ERROR: %s does not exist!'%fsfdir
            
        if not fsfdir.endswith('/'):
            fsfdir=''.join([fsfdir,'/'])
        
        self.fsfdir=fsfdir
    
        fsffile=''.join([self.fsfdir,'design.fsf'])
        desmatfile=''.join([self.fsfdir,'design.mat'])
    
        design=read_fsl_design(fsffile)
    
        self.desmat=FslGLMDesign(desmatfile)
        
        self.nevs=self.desmat.mat.shape[1]
        self.ntp=self.desmat.mat.shape[0]
        
        self.TR=round(design['fmri(tr)'],2)
    
        self.hrf=spm_hrf(self.time_res)

        self.time_up=N.arange(0,self.TR*self.ntp+self.time_res, self.time_res);
        
        self.max_evtime=self.TR*self.ntp - 2;
        self.n_up=len(self.time_up)
        
        
        
        if not os.path.exists(fsfdir+'betaseries'):
            os.mkdir(fsfdir+'betaseries')
    
        # load data
        
        maskimg=''.join([fsfdir,'mask.nii.gz'])
        self.raw_data=fmri_dataset(fsfdir+'filtered_func_data.nii.gz',mask=maskimg)
        voxmeans = N.mean(self.raw_data.samples,axis=0)
        self.data=self.raw_data.samples-voxmeans
        
        self.nvox=self.raw_data.nfeatures
        
        cutoff=design['fmri(paradigm_hp)']/self.TR
        
        
        self.F=get_smoothing_kernel(cutoff, self.ntp)
    def __init__(self, fsfdir, time_res, name):
        #Sets up dataset wide variables

        self.name = name

        self.time_res = time_res

        if not os.path.exists(fsfdir):
            print 'ERROR: %s does not exist!' % fsfdir

        if not fsfdir.endswith('/'):
            fsfdir = ''.join([fsfdir, '/'])

        self.fsfdir = fsfdir

        fsffile = ''.join([self.fsfdir, 'design.fsf'])
        desmatfile = ''.join([self.fsfdir, 'design.mat'])

        design = read_fsl_design(fsffile)

        self.desmat = FslGLMDesign2(desmatfile)

        self.nevs = self.desmat.mat.shape[1]

        self.ntp = self.desmat.mat.shape[0]

        self.TR = round(design['fmri(tr)'], 2)

        self.hrf = spm_hrf(self.time_res)

        self.time_up = N.arange(0, self.TR * self.ntp + self.time_res,
                                self.time_res)

        self.max_evtime = self.TR * self.ntp - 2
        self.n_up = len(self.time_up)

        if not os.path.exists(fsfdir + 'betaseries'):
            os.mkdir(fsfdir + 'betaseries')

        # load data

        maskimg = ''.join([fsfdir, 'mask.nii.gz'])
        self.raw_data = fmri_dataset(fsfdir + 'filtered_func_data.nii.gz',
                                     mask=maskimg)
        voxmeans = N.mean(self.raw_data.samples, axis=0)
        self.data = self.raw_data.samples - voxmeans

        self.nvox = self.raw_data.nfeatures

        cutoff = design['fmri(paradigm_hp)'] / self.TR

        self.F = get_smoothing_kernel(cutoff, self.ntp)
예제 #56
0
def test_er_nifti_dataset_mapping():
    """Some mapping testing -- more tests is better
    """
    # z,y,x
    sample_size = (4, 3, 2)
    # t,z,y,x
    samples = np.arange(120).reshape((5, ) + sample_size)
    dsmask = np.arange(24).reshape(sample_size) % 2
    import nibabel
    tds = fmri_dataset(nibabel.Nifti1Image(samples.T, None),
                       mask=nibabel.Nifti1Image(dsmask.T, None))
    ds = eventrelated_dataset(tds,
                              events=[
                                  Event(onset=0,
                                        duration=2,
                                        label=1,
                                        chunk=1,
                                        features=[1000, 1001]),
                                  Event(onset=1,
                                        duration=2,
                                        label=2,
                                        chunk=1,
                                        features=[2000, 2001])
                              ])
    nfeatures = tds.nfeatures
    mask = np.zeros(dsmask.shape, dtype='bool')
    mask[0, 0, 0] = mask[1, 0, 1] = mask[0, 0, 1] = 1
    fmask = ds.a.mapper.forward1(mask.T)
    # select using mask in volume and all features in the other part
    ds_sel = ds[:, fmask]

    # now tests
    assert_array_equal(mask.reshape(24).nonzero()[0], [0, 1, 7])
    # two events, 2 orig features at 2 timepoints
    assert_equal(ds_sel.samples.shape, (2, 4))
    assert_array_equal(ds_sel.sa.features, [[1000, 1001], [2000, 2001]])
    assert_array_equal(ds_sel.samples, [[1, 7, 25, 31], [25, 31, 49, 55]])
    # reproducability
    assert_array_equal(ds_sel.samples,
                       ds_sel.a.mapper.forward(np.rollaxis(samples.T, -1)))

    # reverse-mapping
    rmapped = ds_sel.a.mapper.reverse1(np.arange(10, 14))
    assert_equal(np.rollaxis(rmapped, 0, 4).T.shape, (2, ) + sample_size)
    expected = np.zeros((2, ) + sample_size, dtype='int')
    expected[0, 0, 0, 1] = 10
    expected[0, 1, 0, 1] = 11
    expected[1, 0, 0, 1] = 12
    expected[1, 1, 0, 1] = 13
    assert_array_equal(np.rollaxis(rmapped, 0, 4).T, expected)
예제 #57
0
def test_strip_nibabel():
    # lots of implicit test already, just make sure it doesn't ruin other
    # datasets
    ds = Dataset([range(5)])
    strip_nibabel(ds)
    assert_true('imgtype' not in ds.a)
    # can run multiple times: idempotent
    ds = fmri_dataset(pathjoin(
        pymvpa_dataroot, 'haxby2001', 'sub001', 'BOLD', 'task001_run001',
        'bold_25mm.nii.gz'))
    strip_nibabel(ds)  # this is real
    strip_nibabel(ds)  # this is not a copy&paste error!
    assert_true('imgtype' in ds.a)
    assert_true('imgaffine' in ds.a)
    assert_equal(type(ds.a.imghdr), dict)
예제 #58
0
    def test_agreement_surface_volume(self):
        '''test agreement between volume-based and surface-based
        searchlights when using euclidean measure'''

        #import runner
        def sum_ds(ds):
            return np.sum(ds)

        radius = 3

        # make a small dataset with a mask
        sh = (10, 10, 10)
        msk = np.zeros(sh)
        for i in xrange(0, sh[0], 2):
            msk[i, :, :] = 1
        vg = volgeom.VolGeom(sh, np.identity(4), mask=msk)

        # make an image
        nt = 6
        img = vg.get_masked_nifti_image(6)
        ds = fmri_dataset(img, mask=msk)


        # run the searchlight
        sl = sphere_searchlight(sum_ds, radius=radius)
        m = sl(ds)

        # now use surface-based searchlight
        v = volsurf.from_volume(ds)
        source_surf = v.intermediate_surface
        node_msk = np.logical_not(np.isnan(source_surf.vertices[:, 0]))

        # check that the mask matches with what we used earlier
        assert_array_equal(msk.ravel() + 0., node_msk.ravel() + 0.)

        source_surf_nodes = np.nonzero(node_msk)[0]

        sel = surf_voxel_selection.voxel_selection(v, float(radius),
                                        source_surf=source_surf,
                                        source_surf_nodes=source_surf_nodes,
                                        distance_metric='euclidean')

        qe = queryengine.SurfaceVerticesQueryEngine(sel)
        sl = Searchlight(sum_ds, queryengine=qe)
        r = sl(ds)

        # check whether they give the same results
        assert_array_equal(r.samples, m.samples)