Esempio n. 1
0
def test_ex_from_masked():
    ds = Dataset.from_wizard(samples=np.atleast_2d(np.arange(5)).view(myarray),
                             targets=1,
                             chunks=1)
    # simple sequence has to be a single pattern
    assert_equal(ds.nsamples, 1)
    # array subclass survives
    ok_(isinstance(ds.samples, myarray))

    # check correct pattern layout (1x5)
    assert_array_equal(ds.samples, [[0, 1, 2, 3, 4]])

    # check for single label and origin
    assert_array_equal(ds.targets, [1])
    assert_array_equal(ds.chunks, [1])

    # now try adding pattern with wrong shape
    assert_raises(
        ValueError, vstack,
        (ds, Dataset.from_wizard(np.ones((2, 3)), targets=1, chunks=1)))

    # now add two real patterns
    ds = vstack((ds,
                 Dataset.from_wizard(np.random.standard_normal((2, 5)),
                                     targets=2,
                                     chunks=2)))
    assert_equal(ds.nsamples, 3)
    assert_array_equal(ds.targets, [1, 2, 2])
    assert_array_equal(ds.chunks, [1, 2, 2])

    # test unique class labels
    ds = vstack((ds,
                 Dataset.from_wizard(np.random.standard_normal((2, 5)),
                                     targets=3,
                                     chunks=5)))
    assert_array_equal(ds.sa['targets'].unique, [1, 2, 3])

    # test wrong attributes length
    assert_raises(ValueError,
                  Dataset.from_wizard,
                  np.random.standard_normal((4, 2, 3, 4)),
                  targets=[1, 2, 3],
                  chunks=2)
    assert_raises(ValueError,
                  Dataset.from_wizard,
                  np.random.standard_normal((4, 2, 3, 4)),
                  targets=[1, 2, 3, 4],
                  chunks=[2, 2, 2])

    # no test one that is using from_masked
    ds = datasets['3dlarge']
    for a in ds.sa:
        assert_equal(len(ds.sa[a].value), len(ds))
    for a in ds.fa:
        assert_equal(len(ds.fa[a].value), ds.nfeatures)
Esempio n. 2
0
def test_mergeds():
    data0 = Dataset.from_wizard(np.ones((5, 5)), targets=1)
    data0.fa['one'] = np.ones(5)
    data1 = Dataset.from_wizard(np.ones((5, 5)), targets=1, chunks=1)
    data1.fa['one'] = np.zeros(5)
    data2 = Dataset.from_wizard(np.ones((3, 5)), targets=2, chunks=1)
    data3 = Dataset.from_wizard(np.ones((4, 5)), targets=2)
    data4 = Dataset.from_wizard(np.ones((2, 5)), targets=3, chunks=2)
    data4.fa['test'] = np.arange(5)

    merged = vstack((data1.copy(), data2))

    ok_(merged.nfeatures == 5)
    l12 = [1] * 5 + [2] * 3
    l1 = [1] * 8
    ok_((merged.targets == l12).all())
    ok_((merged.chunks == l1).all())

    data_append = vstack((data1.copy(), data2))

    ok_(data_append.nfeatures == 5)
    ok_((data_append.targets == l12).all())
    ok_((data_append.chunks == l1).all())

    #
    # vstacking
    #
    if __debug__:
        # we need the same samples attributes in both datasets
        assert_raises(ValueError, vstack, (data2, data3))

        # tested only in __debug__
        assert_raises(ValueError, vstack, (data0, data1, data2, data3))
    datasets = (data1, data2, data4)
    merged = vstack(datasets)
    assert_equal(merged.shape,
                 (np.sum([len(ds) for ds in datasets]), data1.nfeatures))
    assert_true('test' in merged.fa)
    assert_array_equal(merged.sa.targets, [1] * 5 + [2] * 3 + [3] * 2)

    #
    # hstacking
    #
    assert_raises(ValueError, hstack, datasets)
    datasets = (data0, data1)
    merged = hstack(datasets)
    assert_equal(merged.shape,
                 (len(data1), np.sum([ds.nfeatures for ds in datasets])))
    assert_true('chunks' in merged.sa)
    assert_array_equal(merged.fa.one, [1] * 5 + [0] * 5)
Esempio n. 3
0
def test_mergeds():
    data0 = Dataset.from_wizard(np.ones((5, 5)), targets=1)
    data0.fa['one'] = np.ones(5)
    data1 = Dataset.from_wizard(np.ones((5, 5)), targets=1, chunks=1)
    data1.fa['one'] = np.zeros(5)
    data2 = Dataset.from_wizard(np.ones((3, 5)), targets=2, chunks=1)
    data3 = Dataset.from_wizard(np.ones((4, 5)), targets=2)
    data4 = Dataset.from_wizard(np.ones((2, 5)), targets=3, chunks=2)
    data4.fa['test'] = np.arange(5)

    merged = vstack((data1.copy(), data2))

    ok_(merged.nfeatures == 5)
    l12 = [1] * 5 + [2] * 3
    l1 = [1] * 8
    ok_((merged.targets == l12).all())
    ok_((merged.chunks == l1).all())

    data_append = vstack((data1.copy(), data2))

    ok_(data_append.nfeatures == 5)
    ok_((data_append.targets == l12).all())
    ok_((data_append.chunks == l1).all())

    #
    # vstacking
    #
    if __debug__:
        # we need the same samples attributes in both datasets
        assert_raises(ValueError, vstack, (data2, data3))

        # tested only in __debug__
        assert_raises(ValueError, vstack, (data0, data1, data2, data3))
    datasets = (data1, data2, data4)
    merged = vstack(datasets)
    assert_equal(merged.shape,
                 (np.sum([len(ds) for ds in datasets]), data1.nfeatures))
    assert_true('test' in merged.fa)
    assert_array_equal(merged.sa.targets, [1] * 5 + [2] * 3 + [3] * 2)

    #
    # hstacking
    #
    assert_raises(ValueError, hstack, datasets)
    datasets = (data0, data1)
    merged = hstack(datasets)
    assert_equal(merged.shape,
                 (len(data1), np.sum([ds.nfeatures for ds in datasets])))
    assert_true('chunks' in merged.sa)
    assert_array_equal(merged.fa.one, [1] * 5 + [0] * 5)
Esempio n. 4
0
def test_mergeds2():
    """Test composition of new datasets by addition of existing ones
    """
    data = dataset_wizard([range(5)], targets=1, chunks=1)

    assert_array_equal(data.UT, [1])

    # simple sequence has to be a single pattern
    assert_equal(data.nsamples, 1)
    # check correct pattern layout (1x5)
    assert_array_equal(data.samples, [[0, 1, 2, 3, 4]])

    # check for single labels and origin
    assert_array_equal(data.targets, [1])
    assert_array_equal(data.chunks, [1])

    # now try adding pattern with wrong shape
    assert_raises(ValueError, vstack,
                  (data, dataset_wizard(np.ones((2, 3)), targets=1, chunks=1)))

    # now add two real patterns
    dss = datasets['uni2large'].samples
    data = vstack((data, dataset_wizard(dss[:2, :5], targets=2, chunks=2)))
    assert_equal(data.nfeatures, 5)
    assert_array_equal(data.targets, [1, 2, 2])
    assert_array_equal(data.chunks, [1, 2, 2])

    # test automatic origins
    data = vstack((data, (dataset_wizard(dss[3:5, :5],
                                         targets=3,
                                         chunks=[0, 1]))))
    assert_array_equal(data.chunks, [1, 2, 2, 0, 1])

    # test unique class labels
    assert_array_equal(data.UT, [1, 2, 3])

    # test wrong label length
    assert_raises(ValueError,
                  dataset_wizard,
                  dss[:4, :5],
                  targets=[1, 2, 3],
                  chunks=2)

    # test wrong origin length
    assert_raises(ValueError,
                  dataset_wizard,
                  dss[:4, :5],
                  targets=[1, 2, 3, 4],
                  chunks=[2, 2, 2])
Esempio n. 5
0
    def _call(self, dataset):
        """Computes featurewise f-scores using compound comparisons."""

        targets_sa = dataset.sa[self.get_space()]
        orig_labels = targets_sa.value
        labels = orig_labels.copy()

        # Lets create a very shallow copy of a dataset with just
        # samples and targets_attr
        dataset_mod = Dataset(dataset.samples, sa={self.get_space(): labels})
        results = []
        for ul in targets_sa.unique:
            labels[orig_labels == ul] = 1
            labels[orig_labels != ul] = 2
            f_ds = OneWayAnova._call(self, dataset_mod)
            if 'fprob' in f_ds.fa:
                # rename the fprob attribute to something label specific
                # to survive final aggregation stage
                f_ds.fa['fprob_' + str(ul)] = f_ds.fa.fprob
                del f_ds.fa['fprob']
            results.append(f_ds)

        results = vstack(results)
        results.sa[self.get_space()] = targets_sa.unique
        return results
Esempio n. 6
0
    def _forward_dataset(self, ds):
        sliced_ds = [ds[sample_ids, feature_ids]
                            for sample_ids, feature_ids in
                                    zip(*(self._slice_sample_ids,
                                    self._slice_feature_ids))]

        return vstack(sliced_ds, True)
Esempio n. 7
0
    def _call(self, dataset):
        sensitivities = []
        for ind, analyzer in enumerate(self.__analyzers):
            if __debug__:
                debug("SA", "Computing sensitivity for SA#%d:%s" %
                      (ind, analyzer))
            sensitivity = analyzer(dataset)
            sensitivities.append(sensitivity)

        if __debug__:
            debug("SA",
                  "Returning %d sensitivities from %s" %
                  (len(sensitivities), self.__class__.__name__))

        sa_attr = self._sa_attr
        if isinstance(sensitivities[0], AttrDataset):
            smerged = []
            for i, s in enumerate(sensitivities):
                s.sa[sa_attr] = np.repeat(i, len(s))
                smerged.append(s)
            sensitivities = vstack(smerged)
        else:
            sensitivities = \
                Dataset(sensitivities,
                        sa={sa_attr: np.arange(len(sensitivities))})

        self.ca.sensitivities = sensitivities

        return sensitivities
Esempio n. 8
0
    def _forward_dataset(self, ds):
        sliced_ds = [ds[sample_ids, feature_ids]
                            for sample_ids, feature_ids in
                                    zip(*(self._slice_sample_ids,
                                    self._slice_feature_ids))]

        return vstack(sliced_ds, True)
Esempio n. 9
0
    def _call(self, dataset):
        """Computes featurewise f-scores using compound comparisons."""

        targets_sa = dataset.sa[self.get_space()]
        orig_labels = targets_sa.value
        labels = orig_labels.copy()

        # Lets create a very shallow copy of a dataset with just
        # samples and targets_attr
        dataset_mod = Dataset(dataset.samples,
                              sa={self.get_space() : labels})
        results = []
        for ul in targets_sa.unique:
            labels[orig_labels == ul] = 1
            labels[orig_labels != ul] = 2
            f_ds = OneWayAnova._call(self, dataset_mod)
            if 'fprob' in f_ds.fa:
                # rename the fprob attribute to something label specific
                # to survive final aggregation stage
                f_ds.fa['fprob_' + str(ul)] = f_ds.fa.fprob
                del f_ds.fa['fprob']
            results.append(f_ds)

        results = vstack(results)
        results.sa[self.get_space()] = targets_sa.unique
        return results
Esempio n. 10
0
    def test_usecase_concordancesl(self):
        import numpy as np
        from mvpa2.base.dataset import vstack
        from mvpa2.mappers.fx import mean_sample

        # Take our sample 3d dataset
        ds1 = datasets['3dsmall'].copy(deep=True)
        ds1.fa['voxel_indices'] = ds1.fa.myspace
        ds1.sa['subject'] = [1
                             ]  # not really necessary -- but let's for clarity
        ds1 = mean_sample()(
            ds1)  # so we get just a single representative sample

        def corr12(ds):
            corr = np.corrcoef(ds.samples)
            assert (corr.shape == (2, 2))  # for paranoid ones
            return corr[0, 1]

        for nsc, thr, thr_mean in ((0, 1.0, 1.0),
                                   (0.1, 0.3, 0.8)):  # just a bit of noise
            ds2 = ds1.copy(deep=True)  # make a copy for the 2nd subject
            ds2.sa['subject'] = [2]
            ds2.samples += nsc * np.random.normal(size=ds1.shape)

            # make sure that both have the same voxel indices
            assert (np.all(ds1.fa.voxel_indices == ds2.fa.voxel_indices))
            ds_both = vstack((ds1, ds2))  # join 2 images into a single dataset
            # with .sa.subject distinguishing both

            sl = sphere_searchlight(corr12, radius=2)
            slmap = sl(ds_both)
            ok_(np.all(slmap.samples >= thr))
            ok_(np.mean(slmap.samples) >= thr)
Esempio n. 11
0
    def _call(self, dataset):
        sensitivities = []
        for ind, analyzer in enumerate(self.__analyzers):
            if __debug__:
                debug("SA",
                      "Computing sensitivity for SA#%d:%s" % (ind, analyzer))
            sensitivity = analyzer(dataset)
            sensitivities.append(sensitivity)

        if __debug__:
            debug(
                "SA", "Returning %d sensitivities from %s" %
                (len(sensitivities), self.__class__.__name__))

        sa_attr = self._sa_attr
        if isinstance(sensitivities[0], AttrDataset):
            smerged = []
            for i, s in enumerate(sensitivities):
                s.sa[sa_attr] = np.repeat(i, len(s))
                smerged.append(s)
            sensitivities = vstack(smerged)
        else:
            sensitivities = \
                Dataset(sensitivities,
                        sa={sa_attr: np.arange(len(sensitivities))})

        self.ca.sensitivities = sensitivities

        return sensitivities
Esempio n. 12
0
    def test_usecase_concordancesl(self):
        import numpy as np
        from mvpa2.base.dataset import vstack
        from mvpa2.mappers.fx import mean_sample

        # Take our sample 3d dataset
        ds1 = datasets['3dsmall'].copy(deep=True)
        ds1.fa['voxel_indices'] = ds1.fa.myspace
        ds1.sa['subject'] = [1]  # not really necessary -- but let's for clarity
        ds1 = mean_sample()(ds1) # so we get just a single representative sample

        def corr12(ds):
            corr = np.corrcoef(ds.samples)
            assert(corr.shape == (2, 2)) # for paranoid ones
            return corr[0, 1]

        for nsc, thr, thr_mean in (
            (0, 1.0, 1.0),
            (0.1, 0.3, 0.8)):   # just a bit of noise
            ds2 = ds1.copy(deep=True)    # make a copy for the 2nd subject
            ds2.sa['subject'] = [2]
            ds2.samples += nsc * np.random.normal(size=ds1.shape)

            # make sure that both have the same voxel indices
            assert(np.all(ds1.fa.voxel_indices == ds2.fa.voxel_indices))
            ds_both = vstack((ds1, ds2))# join 2 images into a single dataset
                                        # with .sa.subject distinguishing both

            sl = sphere_searchlight(corr12, radius=2)
            slmap = sl(ds_both)
            ok_(np.all(slmap.samples >= thr))
            ok_(np.mean(slmap.samples) >= thr)
Esempio n. 13
0
    def test_ifs(self, svm):

        # measure for feature selection criterion and performance assesment
        # use the SAME clf!
        errorfx = mean_mismatch_error
        fmeasure = CrossValidation(svm,
                                   NFoldPartitioner(),
                                   postproc=mean_sample())
        pmeasure = ProxyMeasure(svm, postproc=BinaryFxNode(errorfx, 'targets'))

        ifs = IFS(fmeasure,
                  pmeasure,
                  Splitter('purpose', attr_values=['train', 'test']),
                  fselector=\
                    # go for lower tail selection as data_measure will return
                    # errors -> low is good


                    FixedNElementTailSelector(1, tail='lower', mode='select'),
                  )
        wdata = self.get_data()
        wdata.sa['purpose'] = np.repeat('train', len(wdata))
        tdata = self.get_data()
        tdata.sa['purpose'] = np.repeat('test', len(tdata))
        ds = vstack((wdata, tdata))
        orig_nfeatures = ds.nfeatures

        ifs.train(ds)
        resds = ifs(ds)

        # fail if orig datasets are changed
        self.assertTrue(ds.nfeatures == orig_nfeatures)

        # check that the features set with the least error is selected
        self.assertTrue(len(ifs.ca.errors))
        e = np.array(ifs.ca.errors)
        self.assertTrue(resds.nfeatures == e.argmin() + 1)

        # repeat with dataset where selection order is known
        wsignal = datasets['dumb2'].copy()
        wsignal.sa['purpose'] = np.repeat('train', len(wsignal))
        tsignal = datasets['dumb2'].copy()
        tsignal.sa['purpose'] = np.repeat('test', len(tsignal))
        signal = vstack((wsignal, tsignal))
        ifs.train(signal)
        resds = ifs(signal)
        self.assertTrue((resds.samples[:, 0] == signal.samples[:, 0]).all())
Esempio n. 14
0
def loadsubdata(p, s, m=None, c=None):
    from mvpa2.base import dataset
    fds = {}
    for sub in s.keys():
        print 'loading ' + sub
        rds = [loadrundata(p, sub, r, m, c) for r in s[sub]]
        fds[sub] = dataset.vstack(rds, a=0)
    return fds
Esempio n. 15
0
    def _forward_data(self, data):
        sliced_data = [np.vstack(data[sample_id, feature_ids]
                         for sample_id in sample_ids)
                                for sample_ids, feature_ids in
                                    zip(*(self._slice_sample_ids,
                                    self._slice_feature_ids))]

        return vstack(sliced_data)
Esempio n. 16
0
    def _forward_data(self, data):
        sliced_data = [
            np.vstack(data[sample_id, feature_ids] for sample_id in sample_ids)
            for sample_ids, feature_ids in zip(*(self._slice_sample_ids,
                                                 self._slice_feature_ids))
        ]

        return vstack(sliced_data)
Esempio n. 17
0
def loadsubdata(p, s, m=None, c=None):
    from mvpa2.base import dataset
    fds = {}
    for sub in s.keys():
        print 'loading ' + sub
        rds = [loadrundata(p, sub, r, m, c) for r in s[sub]]
        fds[sub] = dataset.vstack(rds, a=0)
    return fds
Esempio n. 18
0
def test_ex_from_masked():
    ds = Dataset.from_wizard(samples=np.atleast_2d(np.arange(5)).view(myarray),
                             targets=1, chunks=1)
    # simple sequence has to be a single pattern
    assert_equal(ds.nsamples, 1)
    # array subclass survives
    ok_(isinstance(ds.samples, myarray))

    # check correct pattern layout (1x5)
    assert_array_equal(ds.samples, [[0, 1, 2, 3, 4]])

    # check for single label and origin
    assert_array_equal(ds.targets, [1])
    assert_array_equal(ds.chunks, [1])

    # now try adding pattern with wrong shape
    assert_raises(ValueError, vstack,
                  (ds, Dataset.from_wizard(np.ones((2, 3)), targets=1, chunks=1)))

    # now add two real patterns
    ds = vstack((ds, Dataset.from_wizard(np.random.standard_normal((2, 5)),
                                         targets=2, chunks=2)))
    assert_equal(ds.nsamples, 3)
    assert_array_equal(ds.targets, [1, 2, 2])
    assert_array_equal(ds.chunks, [1, 2, 2])

    # test unique class labels
    ds = vstack((ds, Dataset.from_wizard(np.random.standard_normal((2, 5)),
                                         targets=3, chunks=5)))
    assert_array_equal(ds.sa['targets'].unique, [1, 2, 3])

    # test wrong attributes length
    assert_raises(ValueError, Dataset.from_wizard,
                  np.random.standard_normal((4, 2, 3, 4)), targets=[1, 2, 3],
                  chunks=2)
    assert_raises(ValueError, Dataset.from_wizard,
                  np.random.standard_normal((4, 2, 3, 4)), targets=[1, 2, 3, 4],
                  chunks=[2, 2, 2])

    # no test one that is using from_masked
    ds = datasets['3dlarge']
    for a in ds.sa:
        assert_equal(len(ds.sa[a].value), len(ds))
    for a in ds.fa:
        assert_equal(len(ds.fa[a].value), ds.nfeatures)
Esempio n. 19
0
def arg2ds(sources):
    """Convert a sequence of dataset sources into a dataset.

    This function would be used to used to convert a single --input
    multidata specification into a dataset. For multiple --input
    arguments execute this function in a loop.
    """
    from mvpa2.base.dataset import vstack
    return vstack(hdf2ds(sources))
Esempio n. 20
0
def arg2ds(sources):
    """Convert a sequence of dataset sources into a dataset.

    This function would be used to used to convert a single --input
    multidata specification into a dataset. For multiple --input
    arguments execute this function in a loop.
    """
    from mvpa2.base.dataset import vstack
    return vstack(hdf2ds(sources))
Esempio n. 21
0
    def test_ifs(self, svm):

        # measure for feature selection criterion and performance assesment
        # use the SAME clf!
        errorfx = mean_mismatch_error
        fmeasure = CrossValidation(svm, NFoldPartitioner(), postproc=mean_sample())
        pmeasure = ProxyMeasure(svm, postproc=BinaryFxNode(errorfx, 'targets'))

        ifs = IFS(fmeasure,
                  pmeasure,
                  Splitter('purpose', attr_values=['train', 'test']),
                  fselector=
                    # go for lower tail selection as data_measure will return
                    # errors -> low is good
                    FixedNElementTailSelector(1, tail='lower', mode='select'),
                  )
        wdata = self.get_data()
        wdata.sa['purpose'] = np.repeat('train', len(wdata))
        tdata = self.get_data()
        tdata.sa['purpose'] = np.repeat('test', len(tdata))
        ds = vstack((wdata, tdata))
        orig_nfeatures = ds.nfeatures

        ifs.train(ds)
        resds = ifs(ds)

        # fail if orig datasets are changed
        self.assertTrue(ds.nfeatures == orig_nfeatures)

        # check that the features set with the least error is selected
        self.assertTrue(len(ifs.ca.errors))
        e = np.array(ifs.ca.errors)
        self.assertTrue(resds.nfeatures == e.argmin() + 1)


        # repeat with dataset where selection order is known
        wsignal = datasets['dumb2'].copy()
        wsignal.sa['purpose'] = np.repeat('train', len(wsignal))
        tsignal = datasets['dumb2'].copy()
        tsignal.sa['purpose'] = np.repeat('test', len(tsignal))
        signal = vstack((wsignal, tsignal))
        ifs.train(signal)
        resds = ifs(signal)
        self.assertTrue((resds.samples[:,0] == signal.samples[:,0]).all())
Esempio n. 22
0
def perm_hist(subj):
	conf = AnalysisConfiguration()
	data_dir = os.environ.get('DATA_DIR') or '/home/user/data'
	sub_dir = _opj(data_dir,conf.study_name,'sub{:0>3d}'.format(subj))
	directory = _opj(data_dir,'LP/sub{:0>3d}/results/'.format(subj))
	print conf.dir_name()
	for pair in conf.conditions_to_compare:
			#print _opj(directory,conf.dir_name(),'{}*{}{}*.p'.format(conf.mask_name,pair[0],pair[1]))
			files = sorted(glob(_opj(directory,conf.dir_name(),'{}*{}{}*.p'.format(conf.mask_name,pair[0],pair[1]))))
			plt.figure()
			plt.subplot(211)
			plt.title('sub{:0>3d}-{}{}'.format(subj,pair[0],pair[1]))
			print pair, " ", len(files)
			all_maps = []
			for f in files[:-1]:
				f_h = file(f,'r')
				m = pickle.load(f_h)
				all_maps.append(m)
				if 'perm' in f:
					color = 'black'
					line_width = 1
				else:
					color = 'crimson'
					line_width = 2
				plt.hist(np.transpose(m),bins=20,histtype='step',color=[color], lw = line_width)
			perms = vstack(all_maps)
			real_f = files[-1]
			f_h = file(real_f,'r')
			real_map = pickle.load(f_h)
			color = 'crimson'
			line_width = 2
			plt.hist(np.transpose(real_map),bins=20,histtype='step',color=[color], lw = line_width)
			percentiles = np.zeros((1,len(real_map.samples[0])))
			for i,vox in enumerate(real_map.samples[0]):
			    percentiles[0,i]=percentileofscore(perms[:,i].samples.flat,vox)
			plt.subplot(212)
			print len(percentiles[0])
			plt.hist(percentiles[0],bins=20,histtype='step')
			real_map.samples=percentiles
			nii = real_f.replace("_sl_map.p", "-acc.nii.gz")
			nii_file = nib.load(nii)
			perc_results = map2nifti(real_map, imghdr=nii_file.header)
			perc_nii_filename =real_f.replace("_sl_map.p", "-percentiles_sub{:0>3d}.nii.gz".format(subj))
			perc_results.to_filename(perc_nii_filename)
			thr_prc_filename = perc_nii_filename.replace(".nii.gz","_p0.01.nii.gz")
			thr = fsl.maths.Threshold(in_file=perc_nii_filename, thresh=100,
						  out_file=thr_prc_filename)
			thr.run()
			mni_thr_filename = thr_prc_filename.replace(".nii.gz","_mni.nii.gz")
			apply_warp(sub_dir,thr_prc_filename, mni_thr_filename)

			
	plt.show()
	#plt.savefig('/tmp/sub{:0>3d}_{}{}'.format(subj,pair[0],pair[1]))
	raw_input()
Esempio n. 23
0
def test_mergeds2():
    """Test composition of new datasets by addition of existing ones
    """
    data = dataset_wizard([range(5)], targets=1, chunks=1)

    assert_array_equal(data.UT, [1])

    # simple sequence has to be a single pattern
    assert_equal(data.nsamples, 1)
    # check correct pattern layout (1x5)
    assert_array_equal(data.samples, [[0, 1, 2, 3, 4]])

    # check for single labels and origin
    assert_array_equal(data.targets, [1])
    assert_array_equal(data.chunks, [1])

    # now try adding pattern with wrong shape
    assert_raises(ValueError,
                  vstack,
                  (data, dataset_wizard(np.ones((2, 3)), targets=1, chunks=1)))

    # now add two real patterns
    dss = datasets['uni2large'].samples
    data = vstack((data, dataset_wizard(dss[:2, :5], targets=2, chunks=2)))
    assert_equal(data.nfeatures, 5)
    assert_array_equal(data.targets, [1, 2, 2])
    assert_array_equal(data.chunks, [1, 2, 2])

    # test automatic origins
    data = vstack((data, (dataset_wizard(dss[3:5, :5], targets=3, chunks=[0, 1]))))
    assert_array_equal(data.chunks, [1, 2, 2, 0, 1])

    # test unique class labels
    assert_array_equal(data.UT, [1, 2, 3])

    # test wrong label length
    assert_raises(ValueError, dataset_wizard, dss[:4, :5], targets=[ 1, 2, 3 ],
                                         chunks=2)

    # test wrong origin length
    assert_raises(ValueError, dataset_wizard, dss[:4, :5],
                  targets=[ 1, 2, 3, 4 ], chunks=[ 2, 2, 2 ])
Esempio n. 24
0
def load_subject_ds(conf_file, 
                    task, 
                    extra_sa=None,
                    prepro=StandardPreprocessingPipeline(), 
                    **kwargs):
    
    """
    This is identical to load_subjectwise_ds but we can
    specify a preprocessing pipeline to manage data
    
    """
    
    # TODO: conf file should include the full path
    conf = read_configuration(conf_file, task)
           
    conf.update(kwargs)
    logger.debug(conf)
    
    data_path = conf['data_path']
    
    # Subject file should be included in configuration
    subject_file = conf['subjects']
    subjects, extra_sa = load_subject_file(subject_file)
        
    logger.info('Merging %s subjects from %s' % (str(len(subjects)), data_path))
    
    for i, subj in enumerate(subjects):
        
        ds = load_dataset(data_path, subj, task, **conf)
        
        if ds == None:
            continue
        
        ds = prepro.transform(ds)
        
        # add extra samples
        if extra_sa != None:
            for k, v in extra_sa.iteritems():
                if len(v) == len(subjects):
                    ds.sa[k] = [v[i] for _ in range(ds.samples.shape[0])]
        
        
        # First subject
        if i == 0:
            ds_merged = ds.copy()
        else:
            ds_merged = vstack((ds_merged, ds))
            ds_merged.a.update(ds.a)
            
        
        del ds

    return ds_merged
Esempio n. 25
0
def load_subjectwise_ds(path, 
                       subjects, 
                       conf_file, 
                       task, 
                       extra_sa=None,  
                       **kwargs):
    """
    extra_sa: dict or None, sample attributes added to the final dataset, they should be
    the same length as the subjects.
    
    subject: either a list of subjects or a csv file
    
    """
    
    conf = read_configuration(os.path.join(path, conf_file), task)
           
    conf.update(kwargs)
    logger.debug(conf)
    
    data_path = conf['data_path']
    
    
    if isinstance(subjects, str):        
        subjects, extra_sa = load_subject_file(subjects)
        
    
    logger.info('Merging subjects from '+data_path)
    
    for i, subj in enumerate(subjects):
        
        ds = load_dataset(data_path, subj, task, **conf)
        
        ds = detrend_dataset(ds, task, **conf)
        ds = normalize_dataset(ds, **conf)
        
        # add extra samples
        if extra_sa != None:
            for k, v in extra_sa.iteritems():
                if len(v) == len(subjects):
                    ds.sa[k] = [v[i] for _ in range(ds.samples.shape[0])]
        
        
        # First subject
        if i == 0:
            ds_merged = ds.copy()
        else:
            ds_merged = vstack((ds_merged, ds))
            ds_merged.a.update(ds.a)
            
        
        del ds

    return ds_merged, ['group'], conf
Esempio n. 26
0
def test_stack_add_dataset_attributes():
    data0 = Dataset.from_wizard(np.ones((5, 5)), targets=1)
    data0.a['one'] = np.ones(2)
    data0.a['two'] = 2
    data0.a['three'] = 'three'
    data0.a['common'] = range(10)
    data0.a['array'] = np.arange(10)
    data1 = Dataset.from_wizard(np.ones((5, 5)), targets=1)
    data1.a['one'] = np.ones(3)
    data1.a['two'] = 3
    data1.a['four'] = 'four'
    data1.a['common'] = range(10)
    data1.a['array'] = np.arange(10)


    vstacker = lambda x: vstack((data0, data1), a=x)
    hstacker = lambda x: hstack((data0, data1), a=x)

    add_params = (1, None, 'unique', 'uniques', 'all', 'drop_nonunique')

    for stacker in (vstacker, hstacker):
        for add_param in add_params:
            if add_param == 'unique':
                assert_raises(DatasetError, stacker, add_param)
                continue

            r = stacker(add_param)

            if add_param == 1:
                assert_array_equal(data1.a.one, r.a.one)
                assert_equal(r.a.two, 3)
                assert_equal(r.a.four, 'four')
                assert_true('three' not in r.a.keys())
                assert_true('array' in r.a.keys())
            elif add_param == 'uniques':
                assert_equal(set(r.a.keys()),
                             set(['one', 'two', 'three',
                                  'four', 'common', 'array']))
                assert_equal(r.a.two, (2, 3))
                assert_equal(r.a.four, ('four',))
            elif add_param == 'all':
                assert_equal(set(r.a.keys()),
                             set(['one', 'two', 'three',
                                  'four', 'common', 'array']))
                assert_equal(r.a.two, (2, 3))
                assert_equal(r.a.three, ('three', None))
            elif add_param == 'drop_nonunique':
                assert_equal(set(r.a.keys()),
                             set(['common', 'three', 'four', 'array']))
                assert_equal(r.a.three, 'three')
                assert_equal(r.a.four, 'four')
                assert_equal(r.a.common, range(10))
                assert_array_equal(r.a.array, np.arange(10))
Esempio n. 27
0
def test_labelpermutation_randomsampling():
    ds = vstack([Dataset.from_wizard(np.ones((5, 10)), targets=range(5), chunks=i)
                    for i in xrange(1, 6)])
    # assign some feature attributes
    ds.fa['roi'] = np.repeat(np.arange(5), 2)
    ds.fa['lucky'] = np.arange(10) % 2
    # use subclass for testing if it would survive
    ds.samples = ds.samples.view(myarray)

    ok_(ds.get_nsamples_per_attr('targets') == {0:5, 1:5, 2:5, 3:5, 4:5})
    sample = ds.random_samples(2)
    ok_(sample.get_nsamples_per_attr('targets').values() == [ 2, 2, 2, 2, 2 ])
    ok_((ds.sa['chunks'].unique == range(1, 6)).all())
Esempio n. 28
0
def test_labelpermutation_randomsampling():
    ds = vstack([Dataset.from_wizard(np.ones((5, 10)), targets=range(5), chunks=i)
                    for i in xrange(1, 6)])
    # assign some feature attributes
    ds.fa['roi'] = np.repeat(np.arange(5), 2)
    ds.fa['lucky'] = np.arange(10) % 2
    # use subclass for testing if it would survive
    ds.samples = ds.samples.view(myarray)

    ok_(ds.get_nsamples_per_attr('targets') == {0:5, 1:5, 2:5, 3:5, 4:5})
    sample = ds.random_samples(2)
    ok_(sample.get_nsamples_per_attr('targets').values() == [ 2, 2, 2, 2, 2 ])
    ok_((ds.sa['chunks'].unique == range(1, 6)).all())
Esempio n. 29
0
def create_betas_per_trial_with_pymvpa_roni(study_path, subj, conf, mask_name, flavor, TR):
    dhandle = OpenFMRIDataset(study_path)
    model = 1
    task = 1
    # Do this for other tasks as well. not only the first
    mask_fname = _opj(study_path, "sub{:0>3d}".format(subj), "masks", conf.mvpa_tasks[0], "{}.nii.gz".format(mask_name))
    print mask_fname
    run_datasets = []
    for run_id in dhandle.get_task_bold_run_ids(task)[subj]:
        if type(run_id) == str:
            continue

            # all_events = dhandle.get_bold_run_model(model, subj, run_id)
        all_events = get_bold_run_model(dhandle, 2, subj, run_id)
        run_events = []
        i = 0
        for event in all_events:
            if event["task"] == task:
                event["condition"] = "{}-{}".format(event["condition"], event["id"])
                run_events.append(event)
                i += 1

                # load BOLD data for this run (with masking); add 0-based chunk ID
        run_ds = dhandle.get_bold_run_dataset(subj, task, run_id, flavor=flavor, chunks=run_id - 1, mask=mask_fname)
        # convert event info into a sample attribute and assign as 'targets'
        run_ds.sa.time_coords = run_ds.sa.time_indices * TR
        run_ds.sa["targets"] = events2sample_attr(run_events, run_ds.sa.time_coords, noinfolabel="rest")
        # additional time series preprocessing can go here
        poly_detrend(run_ds, polyord=1, chunks_attr="chunks")
        zscore(run_ds, chunks_attr="chunks", param_est=("targets", ["rest"]), dtype="float32")
        glm_dataset = fit_event_hrf_model(run_ds, run_events, time_attr="time_coords", condition_attr="condition")
        glm_dataset.sa["targets"] = [x[: x.find("-")] for x in glm_dataset.sa.condition]
        glm_dataset.sa["id"] = [x[x.find("-") + 1 :] for x in glm_dataset.sa.condition]
        glm_dataset.sa.condition = glm_dataset.sa["targets"]
        glm_dataset.sa["chunks"] = [run_id - 1] * len(glm_dataset.samples)

        # If a trial was dropped (the subject pressed on a button) than the counter trial from the
        # other condition should also be dropped
        for pair in conf.conditions_to_compare:
            cond_bool = np.array([c in pair for c in glm_dataset.sa["condition"]])
            sub_dataset = glm_dataset[cond_bool]
            c = Counter(sub_dataset.sa.id)
            for value in c:
                if c[value] < 2:
                    id_bool = np.array([value in cond_id for cond_id in glm_dataset.sa["id"]])
                    glm_dataset = glm_dataset[np.bitwise_not(np.logical_and(id_bool, cond_bool))]

        run_datasets.append(glm_dataset)

    return vstack(run_datasets, 0)
Esempio n. 30
0
def get_dsties1():
    ds = datasets["uni2small"].copy()
    dtarget = ds.targets[0]  # duplicate target
    tied_samples = ds.targets == dtarget
    ds2 = ds[tied_samples].copy(deep=True)
    # add similar noise to both ties
    noise_level = 0.2
    ds2.samples += np.random.normal(size=ds2.shape) * noise_level
    ds[tied_samples].samples += np.random.normal(size=ds2.shape) * noise_level
    ds2.targets[:] = "TI"  # 'E' would have been swallowed since it is S2 here
    ds = vstack((ds, ds2))
    ds.a.ties = [dtarget, "TI"]
    ds.a.ties_idx = [ds.targets == t for t in ds.a.ties]
    return ds
Esempio n. 31
0
 def _balance_attr(self, ds):
     
     balanced_ds = []
     logger.debug(np.unique(ds.sa[self._attr].value))
     for attribute in np.unique(ds.sa[self._attr].value):
         ds_ = slice_dataset(ds, selection_dict={self._attr:[attribute]})
         
         ds_b = self._balance(ds_)  
         
         balanced_ds.append(ds_b)
                     
     balanced_ds = vstack(balanced_ds)
     balanced_ds.a.update(ds.a)
     
     return balanced_ds  
Esempio n. 32
0
def get_dsties1():
    ds = datasets['uni2small'].copy()
    dtarget = ds.targets[0]  # duplicate target
    tied_samples = ds.targets == dtarget
    ds2 = ds[tied_samples].copy(deep=True)
    # add similar noise to both ties
    noise_level = 0.2
    ds2.samples += \
                  np.random.normal(size=ds2.shape)*noise_level
    ds[tied_samples].samples += \
                  np.random.normal(size=ds2.shape)*noise_level
    ds2.targets[:] = 'TI'  # 'E' would have been swallowed since it is S2 here
    ds = vstack((ds, ds2))
    ds.a.ties = [dtarget, 'TI']
    ds.a.ties_idx = [ds.targets == t for t in ds.a.ties]
    return ds
Esempio n. 33
0
def test_nifti_dataset():
    """Basic testing of NiftiDataset
    """
    ds = fmri_dataset(samples=os.path.join(pymvpa_dataroot,
                                           'example4d.nii.gz'),
                      targets=[1, 2],
                      sprefix='voxel')
    assert_equal(ds.nfeatures, 294912)
    assert_equal(ds.nsamples, 2)

    assert_array_equal(ds.a.voxel_eldim, ds.a.imghdr['pixdim'][1:4])
    assert_true(ds.a['voxel_dim'].value == (128, 96, 24))

    # XXX move elsewhere
    #check that mapper honours elementsize
    #nb22 = np.array([i for i in data.a.mapper.getNeighborIn((1, 1, 1), 2.2)])
    #nb20 = np.array([i for i in data.a.mapper.getNeighborIn((1, 1, 1), 2.0)])
    #self.assertTrue(nb22.shape[0] == 7)
    #self.assertTrue(nb20.shape[0] == 5)

    merged = vstack((ds.copy(), ds), a=0)
    assert_equal(merged.nfeatures, 294912)
    assert_equal(merged.nsamples, 4)

    # check that the header survives
    for k in merged.a.imghdr.keys():
        assert_array_equal(merged.a.imghdr[k], ds.a.imghdr[k])

    # throw away old dataset and see if new one survives
    del ds
    assert_array_equal(merged.samples[3], merged.samples[1])

    # check whether we can use a plain ndarray as mask
    mask = np.zeros((128, 96, 24), dtype='bool')
    mask[40, 20, 12] = True
    nddata = fmri_dataset(samples=os.path.join(pymvpa_dataroot,
                                               'example4d.nii.gz'),
                          targets=[1, 2],
                          mask=mask)
    assert_equal(nddata.nfeatures, 1)
    rmap = nddata.a.mapper.reverse1(np.array([44]))
    assert_equal(rmap.shape, (128, 96, 24))
    assert_equal(np.sum(rmap), 44)
    assert_equal(rmap[40, 20, 12], 44)
Esempio n. 34
0
 def transform(self, ds):
     
     ds_ = SampleSlicer(self._selection).transform(ds)
     
     iterable = [np.unique(ds_.sa[a].value) for a in self._attr]
     
     ds_stack = []
     for attr in product(*iterable):
         
         mask = np.ones_like(ds_.targets, dtype=np.bool)
         
         for i, a in enumerate(attr):
             mask = np.logical_and(mask, ds_.sa[self._attr[i]].value == a)
         
         ds_stacked = hstack([d for d in ds_[mask]])
         ds_stacked = self.update_attribute(ds_stacked)
         ds_stack.append(ds_stacked)
     
     return vstack(ds_stack)
Esempio n. 35
0
    def transform(self, ds):

        ds_ = SampleSlicer(self._selection).transform(ds)

        iterable = [np.unique(ds_.sa[a].value) for a in self._attr]

        ds_stack = []
        for attr in product(*iterable):

            mask = np.ones_like(ds_.targets, dtype=np.bool)

            for i, a in enumerate(attr):
                mask = np.logical_and(mask, ds_.sa[self._attr[i]].value == a)

            ds_stacked = hstack([d for d in ds_[mask]])
            ds_stacked = self.update_attribute(ds_stacked)
            ds_stack.append(ds_stacked)

        return vstack(ds_stack)
Esempio n. 36
0
def load_meg_seed_ds(conf_file, task, prepro=Node(), **kwargs):

    # TODO: conf file should include the full path
    conf = read_configuration(conf_file, task)

    conf.update(kwargs)
    logger.debug(conf)

    data_path = conf['data_path']

    # Subject file should be included in configuration
    subject_file = conf['subjects']
    subjects, extra_sa = load_subject_file(subject_file)

    logger.info('Merging %s subjects from %s' %
                (str(len(subjects)), data_path))

    for i, subj in enumerate(subjects):

        ds = load_mat_ds(data_path, subj, task, **conf)

        ds = prepro.transform(ds)

        logger.debug(ds.shape)

        # add extra samples
        if extra_sa != None:
            for k, v in extra_sa.iteritems():
                if len(v) == len(subjects):
                    ds.sa[k] = [v[i] for _ in range(ds.samples.shape[0])]

        # First subject
        if i == 0:
            ds_merged = ds.copy()
        else:
            ds_merged = vstack((ds_merged, ds))
            ds_merged.a.update(ds.a)

        del ds

    ds_merged.a['prepro'] = prepro.get_names()

    return ds_merged
Esempio n. 37
0
def test_nifti_dataset():
    """Basic testing of NiftiDataset
    """
    ds = fmri_dataset(samples=os.path.join(pymvpa_dataroot, 'example4d.nii.gz'),
                       targets=[1,2], sprefix='voxel')
    assert_equal(ds.nfeatures, 294912)
    assert_equal(ds.nsamples, 2)

    assert_array_equal(ds.a.voxel_eldim, ds.a.imghdr['pixdim'][1:4])
    assert_true(ds.a['voxel_dim'].value == (128,96,24))


    # XXX move elsewhere
    #check that mapper honours elementsize
    #nb22 = np.array([i for i in data.a.mapper.getNeighborIn((1, 1, 1), 2.2)])
    #nb20 = np.array([i for i in data.a.mapper.getNeighborIn((1, 1, 1), 2.0)])
    #self.assertTrue(nb22.shape[0] == 7)
    #self.assertTrue(nb20.shape[0] == 5)

    merged = vstack((ds.copy(), ds), a=0)
    assert_equal(merged.nfeatures, 294912)
    assert_equal(merged.nsamples, 4)

    # check that the header survives
    for k in merged.a.imghdr.keys():
        assert_array_equal(merged.a.imghdr[k], ds.a.imghdr[k])

    # throw away old dataset and see if new one survives
    del ds
    assert_array_equal(merged.samples[3], merged.samples[1])

    # check whether we can use a plain ndarray as mask
    mask = np.zeros((128, 96, 24), dtype='bool')
    mask[40, 20, 12] = True
    nddata = fmri_dataset(samples=os.path.join(pymvpa_dataroot,'example4d.nii.gz'),
                          targets=[1,2],
                          mask=mask)
    assert_equal(nddata.nfeatures, 1)
    rmap = nddata.a.mapper.reverse1(np.array([44]))
    assert_equal(rmap.shape, (128, 96, 24))
    assert_equal(np.sum(rmap), 44)
    assert_equal(rmap[40, 20, 12], 44)
Esempio n. 38
0
 def _forward_dataset(self, ds):
     if self.__chunks_attr is None:
         return self._forward_dataset_helper(ds)
     else:
         # strip down dataset to speedup local processing
         if self.__attr_strategy == "remove":
             keep_sa = []
         else:
             keep_sa = None
         proc_ds = ds.copy(deep=False, sa=keep_sa, fa=[], a=[])
         # process all chunks individually
         # use a customsplitter to speed-up splitting
         spl = Splitter(self.__chunks_attr)
         dses = [self._forward_dataset_helper(d) for d in spl.generate(proc_ds)]
         # and merge them again
         mds = vstack(dses)
         # put back attributes
         mds.fa.update(ds.fa)
         mds.a.update(ds.a)
         return mds
Esempio n. 39
0
def load_meg_seed_ds(conf_file, task, prepro=Node(), **kwargs):
    
    # TODO: conf file should include the full path
    conf = read_configuration(conf_file, task)
           
    conf.update(kwargs)
    logger.debug(conf)
    
    data_path = conf['data_path']
    
    # Subject file should be included in configuration
    subject_file = conf['subjects']
    subjects, extra_sa = load_subject_file(subject_file)
        
    logger.info('Merging %s subjects from %s' % (str(len(subjects)), data_path))
    
    for i, subj in enumerate(subjects):
        
        ds = load_mat_ds(data_path, subj, task, **conf)
        
        ds = prepro.transform(ds)
        
        logger.debug(ds.shape)
        
        # add extra samples
        if extra_sa != None:
            for k, v in extra_sa.iteritems():
                if len(v) == len(subjects):
                    ds.sa[k] = [v[i] for _ in range(ds.samples.shape[0])]
                    
        # First subject
        if i == 0:
            ds_merged = ds.copy()
        else:
            ds_merged = vstack((ds_merged, ds))
            ds_merged.a.update(ds.a)
            
        
        del ds

    return ds_merged
Esempio n. 40
0
 def _forward_dataset(self, ds):
     if self.__chunks_attr is None:
         return self._forward_dataset_helper(ds)
     else:
         # strip down dataset to speedup local processing
         if self.__attr_strategy == 'remove':
             keep_sa = []
         else:
             keep_sa = None
         proc_ds = ds.copy(deep=False, sa=keep_sa, fa=[], a=[])
         # process all chunks individually
         # use a customsplitter to speed-up splitting
         spl = Splitter(self.__chunks_attr)
         dses = [self._forward_dataset_helper(d)
                     for d in spl.generate(proc_ds)]
         # and merge them again
         mds = vstack(dses)
         # put back attributes
         mds.fa.update(ds.fa)
         mds.a.update(ds.a)
         return mds
Esempio n. 41
0
def multiple_chunks(func, n_chunks, *args, **kwargs):
    """Replicate datasets multiple times raising different chunks

    Given some randomized (noisy) generator of a dataset with a single
    chunk call generator multiple times and place results into a
    distinct chunks.

    Returns
    -------
    ds : `mvpa2.datasets.base.Dataset`
    """
    dss = []
    for chunk in xrange(n_chunks):
        ds_ = func(*args, **kwargs)
        # might not have chunks at all
        if not ds_.sa.has_key('chunks'):
            ds_.sa['chunks'] = np.repeat(chunk + 1, ds_.nsamples)
        else:
            ds_.sa.chunks[:] = chunk + 1
        dss.append(ds_)

    return vstack(dss)
Esempio n. 42
0
def multiple_chunks(func, n_chunks, *args, **kwargs):
    """Replicate datasets multiple times raising different chunks

    Given some randomized (noisy) generator of a dataset with a single
    chunk call generator multiple times and place results into a
    distinct chunks.

    Returns
    -------
    ds : `mvpa2.datasets.base.Dataset`
    """
    dss = []
    for chunk in xrange(n_chunks):
        ds_ = func(*args, **kwargs)
        # might not have chunks at all
        if not 'chunks' in ds_.sa:
            ds_.sa['chunks'] = np.repeat(chunk + 1, ds_.nsamples)
        else:
            ds_.sa.chunks[:] = chunk + 1
        dss.append(ds_)

    return vstack(dss)
Esempio n. 43
0
def create_betas_per_trial_with_pymvpa(study_path, subj, conf, mask_name, flavor, TR):
    dhandle = OpenFMRIDataset(study_path)
    model = 1
    task = 1
    # Do this for other tasks as well. not only the first
    mask_fname = _opj(study_path, "sub{:0>3d}".format(subj), "masks", conf.mvpa_tasks[0], "{}.nii.gz".format(mask_name))
    print mask_fname
    run_datasets = []
    for run_id in dhandle.get_task_bold_run_ids(task)[subj]:
        if type(run_id) == str:
            continue
        all_events = dhandle.get_bold_run_model(model, subj, run_id)
        run_events = []
        i = 0
        for event in all_events:
            if event["task"] == task:
                event["condition"] = "{}-{}".format(event["condition"], i)
                run_events.append(event)
                i += 1

                # load BOLD data for this run (with masking); add 0-based chunk ID
        run_ds = dhandle.get_bold_run_dataset(subj, task, run_id, flavor=flavor, chunks=run_id - 1, mask=mask_fname)
        # convert event info into a sample attribute and assign as 'targets'
        run_ds.sa.time_coords = run_ds.sa.time_indices * TR
        print run_id

        run_ds.sa["targets"] = events2sample_attr(run_events, run_ds.sa.time_coords, noinfolabel="rest")
        # additional time series preprocessing can go here
        poly_detrend(run_ds, polyord=1, chunks_attr="chunks")
        zscore(run_ds, chunks_attr="chunks", param_est=("targets", ["rest"]), dtype="float32")
        glm_dataset = fit_event_hrf_model(run_ds, run_events, time_attr="time_coords", condition_attr="condition")
        glm_dataset.sa["targets"] = [x[: x.find("-")] for x in glm_dataset.sa.condition]
        glm_dataset.sa.condition = glm_dataset.sa["targets"]
        glm_dataset.sa["chunks"] = [run_id - 1] * len(glm_dataset.samples)
        run_datasets.append(glm_dataset)
    return vstack(run_datasets, 0)
    def test_vstack_and_origids_issue(self):
        # That is actually what swaroop hit
        skip_if_no_external('shogun', ver_dep='shogun:rev', min_version=4455)

        # Inspired by the problem Swaroop ran into
        k  = LinearSGKernel(normalizer_cls=False)
        k_ = LinearSGKernel(normalizer_cls=False)   # to be cached
        ck = CachedKernel(k_)

        clf = sgSVM(svm_impl='libsvm', kernel=k, C=-1)
        clf_ = sgSVM(svm_impl='libsvm', kernel=ck, C=-1)

        cvte = CrossValidation(clf, NFoldPartitioner())
        cvte_ = CrossValidation(clf_, NFoldPartitioner())

        ds = datasets['uni2large'].copy(deep=True)
        ok_(~('orig_ids' in ds.sa))     # assure that there are None
        ck.compute(ds)                  # so we initialize origids
        ok_('origids' in ds.sa)
        ds2 = ds.copy(deep=True)
        ds2.samples = np.zeros(ds2.shape)
        from mvpa2.base.dataset import vstack
        ds_vstacked = vstack((ds2, ds))
        # should complaint now since there would not be unique
        # samples' origids
        if __debug__:
            assert_raises(ValueError, ck.compute, ds_vstacked)

        ds_vstacked.init_origids('samples')      # reset origids
        ck.compute(ds_vstacked)

        errs = cvte(ds_vstacked)
        errs_ = cvte_(ds_vstacked)
        # Following test would have failed since origids
        # were just ints, and then non-unique after vstack
        assert_array_equal(errs.samples, errs_.samples)
Esempio n. 45
0
    def test_vstack_and_origids_issue(self):
        # That is actually what swaroop hit
        skip_if_no_external('shogun', ver_dep='shogun:rev', min_version=4455)

        # Inspired by the problem Swaroop ran into
        k  = LinearSGKernel(normalizer_cls=False)
        k_ = LinearSGKernel(normalizer_cls=False)   # to be cached
        ck = CachedKernel(k_)

        clf = sgSVM(svm_impl='libsvm', kernel=k, C=-1)
        clf_ = sgSVM(svm_impl='libsvm', kernel=ck, C=-1)

        cvte = CrossValidation(clf, NFoldPartitioner())
        cvte_ = CrossValidation(clf_, NFoldPartitioner())

        ds = datasets['uni2large'].copy(deep=True)
        ok_(~('orig_ids' in ds.sa))     # assure that there are None
        ck.compute(ds)                  # so we initialize origids
        ok_('origids' in ds.sa)
        ds2 = ds.copy(deep=True)
        ds2.samples = np.zeros(ds2.shape)
        from mvpa2.base.dataset import vstack
        ds_vstacked = vstack((ds2, ds))
        # should complaint now since there would not be unique
        # samples' origids
        if __debug__:
            assert_raises(ValueError, ck.compute, ds_vstacked)

        ds_vstacked.init_origids('samples')      # reset origids
        ck.compute(ds_vstacked)

        errs = cvte(ds_vstacked)
        errs_ = cvte_(ds_vstacked)
        # Following test would have failed since origids
        # were just ints, and then non-unique after vstack
        assert_array_equal(errs.samples, errs_.samples)
Esempio n. 46
0
def runsub(sub,
           thisContrast,
           thisContrastStr,
           filterLen,
           filterOrd,
           paramEst,
           chunklen,
           alphas=np.logspace(0, 3, 20),
           debug=False,
           write=False,
           roi='grayMatter'):
    thisSub = {sub: subList[sub]}
    mc_params = lmvpa.loadmotionparams(paths, thisSub)
    beta_events = lmvpa.loadevents(paths, thisSub)
    dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
    thisDS = dsdict[sub]

    # savitsky golay filtering
    sg.sg_filter(thisDS, filterLen, filterOrd)
    # gallant group zscores before regression.

    # zscore w.r.t. rest trials
    # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
    # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
    zscore(thisDS, chunks_attr='chunks')

    # kay method: leave out a model run, use it to fit an HRF for each voxel
    # huth method: essentially use FIR
    # mumford method: deconvolution with canonical HRF

    # refit events and regress...
    # get timing data from timing files
    # rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub])
    rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub],
                                     contrasts)  # adding features

    # we can model out motion and just not use those betas.
    # Ridge
    if isinstance(thisContrast, basestring):
        thisContrast = [thisContrast]
    # instead of binarizing each one, make them parametric
    desX, rds = lmvpa.make_designmat(rds,
                                     events,
                                     time_attr='time_coords',
                                     condition_attr=thisContrast,
                                     design_kwargs={
                                         'hrf_model': 'canonical',
                                         'drift_model': 'blank'
                                     },
                                     regr_attrs=None)
    # want to collapse ap and cr, but have anim separate
    desX['motion'] = make_dmtx(rds.sa['time_coords'].value,
                               paradigm=None,
                               add_regs=mc_params[sub],
                               drift_model='blank')

    des = lmvpa.make_parammat(desX, hrf='canonical', zscore=True)

    # set chunklen and nchunks
    # split by language and pictures
    lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(
        thisDS.sa['chunks'].unique) / 2]
    pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(
        thisDS.sa['chunks'].unique) / 2]
    ldes = cp.copy(des)
    pdes = cp.copy(des)

    ldes.matrix = ldes.matrix[lidx]
    pdes.matrix = pdes.matrix[pidx]
    nchunks = int(len(thisDS) * paramEst / chunklen)
    nboots = 50
    covarmat = None
    mus = None
    lwts, lalphas, lres, lceil = bsr.bootstrap_ridge(rds[lidx],
                                                     ldes,
                                                     chunklen=chunklen,
                                                     nchunks=nchunks,
                                                     cov0=covarmat,
                                                     mu0=mus,
                                                     part_attr='chunks',
                                                     mode='test',
                                                     alphas=alphas,
                                                     single_alpha=True,
                                                     normalpha=False,
                                                     nboots=nboots,
                                                     corrmin=.2,
                                                     singcutoff=1e-10,
                                                     joined=None,
                                                     plot=debug,
                                                     use_corr=True)

    pwts, palphas, pres, pceil = bsr.bootstrap_ridge(rds[pidx],
                                                     pdes,
                                                     chunklen=chunklen,
                                                     nchunks=nchunks,
                                                     part_attr='chunks',
                                                     mode='test',
                                                     alphas=alphas,
                                                     single_alpha=True,
                                                     normalpha=False,
                                                     nboots=nboots,
                                                     corrmin=.2,
                                                     singcutoff=1e-10,
                                                     joined=None,
                                                     plot=debug,
                                                     use_corr=True)
    print 'language ' + str(np.mean(lres))

    # pictures within
    print 'pictures: ' + str(np.mean(pres))

    # need to change outstring
    if write:
        from mvpa2.base import dataset
        map2nifti(thisDS, dataset.vstack([lres, pres])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_corrs.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lwts, pwts])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_weights.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lalphas, palphas])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_alphas.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lceil, pceil])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_ceiling.nii.gz'))

    del lres, pres, lwts, pwts, lalphas, palphas, lceil, pceil
    crossSet = thisDS.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cwts, calphas, cres, cceil = bsr.bootstrap_ridge(crossSet,
                                                     des,
                                                     chunklen=chunklen,
                                                     nchunks=nchunks,
                                                     part_attr='chunks',
                                                     mode='test',
                                                     alphas=alphas,
                                                     single_alpha=True,
                                                     normalpha=False,
                                                     nboots=nboots,
                                                     corrmin=.2,
                                                     singcutoff=1e-10,
                                                     joined=None,
                                                     use_corr=True)
    print 'cross: ' + str(np.mean(cres))
    if write:
        map2nifti(thisDS, cres[0]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_P2L_ridge_corr.nii.gz'))
        map2nifti(thisDS, cres[1]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_L2P_ridge_corr.nii.gz'))

        map2nifti(thisDS, cwts[0]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_P2L_ridge_weights.nii.gz'))
        map2nifti(thisDS, cwts[1]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_L2P_ridge_weights.nii.gz'))

        map2nifti(thisDS, calphas[calphas.chunks == 1]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_P2L_ridge_alphas.nii.gz'))
        map2nifti(thisDS, calphas[calphas.chunks == 2]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_L2P_ridge_alphas.nii.gz'))

        map2nifti(thisDS, cceil[0]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_P2L_ridge_ceiling.nii.gz'))
        map2nifti(thisDS, cceil[1]).to_filename(
            os.path.join(
                paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' +
                thisContrastStr + '_L2P_ridge_ceiling.nii.gz'))
    del cres, cwts, calphas, cceil
Esempio n. 47
0
def runsub(sub, thisContrast, r, dstype='raw', roi='grayMatter', filterLen=49, filterOrd=3, write=False):

    if dstype == 'raw':
        outdir='PyMVPA'
        print "working with raw data"
        thisSub = {sub: subList[sub]}
        dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi)
        thisDS = dsdict[sub]
        mc_params = lmvpa.loadmotionparams(paths, thisSub)
        beta_events = lmvpa.loadevents(paths, thisSub)
        # savitsky golay filtering
        sg.sg_filter(thisDS, filterLen, filterOrd)
        # gallant group zscores before regression.

        # zscore w.r.t. rest trials
        # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
        # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
        zscore(thisDS, chunks_attr='chunks')
        print "beta extraction"
        ## BETA EXTRACTION ##
        rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub])
        evds = er.fit_event_hrf_model(rds, events, time_attr='time_coords',
                                      condition_attr=('trial_type', 'chunks'),
                                      design_kwargs={'add_regs': mc_params[sub], 'hrf_model': 'canonical'},
                                      return_model=True)

        fds = lmvpa.replacetargets(evds, contrasts, thisContrast)
        fds = fds[fds.targets != '0']
    else:
        outdir=os.path.join('LSS', dstype)
        print "loading betas"
        fds = lmvpa.loadsubbetas(paths, sub, btype=dstype, m=roi)
        fds.sa['targets'] = fds.sa[thisContrast]
        zscore(fds, chunks_attr='chunks')

    fds = lmvpa.sortds(fds)
    print "searchlights"
    ## initialize classifier
    clf = svm.LinearNuSVMC()
    cv = CrossValidation(clf, NFoldPartitioner())
    from mvpa2.measures.searchlight import sphere_searchlight
    cvSL = sphere_searchlight(cv, radius=r)


    # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs
    lidx = fds.chunks < fds.sa['chunks'].unique[len(fds.sa['chunks'].unique)/2]
    pidx = fds.chunks >= fds.sa['chunks'].unique[len(fds.sa['chunks'].unique) / 2]

    lres = sl.run_cv_sl(cvSL, fds[lidx].copy(deep=False))
    pres = sl.run_cv_sl(cvSL, fds[pidx].copy(deep=False))

    if write:
        from mvpa2.base import dataset
        map2nifti(fds, dataset.vstack([lres, pres])).\
            to_filename(os.path.join(
                        paths[0], 'Maps', outdir,
                        sub + '_' + roi + '_' + thisContrast + '_cvsl.nii.gz'))

    del lres, pres, cvSL

    cvSL = sphere_searchlight(cv, radius=r)
    crossSet = fds.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cres = sl.run_cv_sl(cvSL, crossSet.copy(deep=False))
    if write:
        map2nifti(fds, cres[0]).to_filename(
            os.path.join(paths[0], 'Maps', outdir,
                         sub + '_' + roi + '_' + (thisContrast) + '_P2L.nii.gz'))
        map2nifti(fds, cres[1]).to_filename(
            os.path.join(paths[0], 'Maps', outdir,
                         sub + '_' + roi + '_' + (thisContrast) + '_L2P.nii.gz'))
Esempio n. 48
0
def runsub(sub,
           thisContrast,
           filterLen,
           filterOrd,
           thisContrastStr,
           roi='grayMatter'):
    thisSub = {sub: subList[sub]}
    mc_params = lmvpa.loadmotionparams(paths, thisSub)
    beta_events = lmvpa.loadevents(paths, thisSub)
    dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
    thisDS = dsdict[sub]

    # savitsky golay filtering
    sg.sg_filter(thisDS, filterLen, filterOrd)
    # gallant group zscores before regression.

    # zscore w.r.t. rest trials
    # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
    # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
    zscore(thisDS, chunks_attr='chunks')

    # kay method: leave out a model run, use it to fit an HRF for each voxel
    # huth method: essentially use FIR
    # mumford method: deconvolution with canonical HRF

    # refit events and regress...
    # get timing data from timing files
    rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub],
                                     contrasts)  # adding features

    # we can model out motion and just not use those betas.
    if isinstance(thisContrast, basestring):
        thisContrast = [thisContrast]
    # instead of binarizing each one, make them parametric
    desX, rds = lmvpa.make_designmat(rds,
                                     events,
                                     time_attr='time_coords',
                                     condition_attr=thisContrast,
                                     design_kwargs={
                                         'hrf_model': 'canonical',
                                         'drift_model': 'blank'
                                     },
                                     regr_attrs=None)
    # want to collapse ap and cr, but have anim separate
    des = lmvpa.make_parammat(desX)

    # set chunklen and nchunks
    # split by language and pictures
    lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(
        thisDS.sa['chunks'].unique) / 2]
    pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(
        thisDS.sa['chunks'].unique) / 2]
    ldes = cp.copy(des)
    pdes = cp.copy(des)

    ldes.matrix = ldes.matrix[lidx]
    pdes.matrix = pdes.matrix[pidx]

    lwts, lres, lceil = bsr.bootstrap_linear(rds[lidx],
                                             ldes,
                                             part_attr='chunks',
                                             mode='test')
    pwts, pres, pceil = bsr.bootstrap_linear(rds[pidx],
                                             pdes,
                                             part_attr='chunks',
                                             mode='test')

    # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs
    print 'language ' + str(np.mean(lres))

    # pictures within
    print 'pictures: ' + str(np.mean(pres))
    from mvpa2.base import dataset
    map2nifti(thisDS, dataset.vstack([lres, pres])) \
        .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                  '_univar_corr.nii.gz'))
    map2nifti(thisDS, dataset.vstack([lwts, pwts])) \
        .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                  '_univar_betas.nii.gz'))
    map2nifti(thisDS, dataset.vstack([lceil, pceil])) \
        .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                  '_univar_ceiling.nii.gz'))
    del lres, pres, lwts, pwts, lceil, pceil

    crossSet = thisDS.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cwts, cres, cceil = bsr.bootstrap_linear(crossSet,
                                             des,
                                             part_attr='chunks',
                                             mode='test')
    print 'cross: ' + str(np.mean(cres))

    map2nifti(thisDS, cres[0]).to_filename(
        os.path.join(
            paths[0], 'Maps', 'Encoding',
            sub + '_' + roi + '_' + thisContrastStr + '_P2L_univar.nii.gz'))
    map2nifti(thisDS, cres[1]).to_filename(
        os.path.join(
            paths[0], 'Maps', 'Encoding',
            sub + '_' + roi + '_' + thisContrastStr + '_L2P_univar.nii.gz'))

    map2nifti(thisDS, cwts[0]).to_filename(
        os.path.join(
            paths[0], 'Maps', 'Encoding',
            sub + '_' + roi + '_' + thisContrastStr + '_P2L_betas.nii.gz'))
    map2nifti(thisDS, cwts[1]).to_filename(
        os.path.join(
            paths[0], 'Maps', 'Encoding',
            sub + '_' + roi + '_' + thisContrastStr + '_L2P_betas.nii.gz'))

    map2nifti(thisDS, cceil[0]).to_filename(
        os.path.join(
            paths[0], 'Maps', 'Encoding',
            sub + '_' + roi + '_' + thisContrastStr + '_P2L_betas.nii.gz'))
    map2nifti(thisDS, cceil[1]).to_filename(
        os.path.join(
            paths[0], 'Maps', 'Encoding',
            sub + '_' + roi + '_' + thisContrastStr + '_L2P_betas.nii.gz'))
Esempio n. 49
0
def load_struct_timeseries(study_dir,
                           subject,
                           mask,
                           feature_mask=None,
                           verbose=1,
                           zscore_run=True):
    """Load structure learning timeseries as a dataset."""
    # functional timeseries for each run
    subject_dir = os.path.join(study_dir, f'tesser_{subject}')
    runs = list(range(1, 7))
    bold_images = []
    for run in runs:
        bold = os.path.join(
            subject_dir, 'BOLD', 'antsreg', 'data',
            f'functional_run_{run}_bold_mcf_brain_corr_notemp.feat',
            'filtered_func_data.nii.gz')
        if not os.path.exists(bold):
            raise IOError(f'BOLD file does not exist: {bold}')
        bold_images.append(bold)

    # mask image to select voxels to load
    mask_dir = os.path.join(subject_dir, 'anatomy', 'antsreg', 'data',
                            'funcunwarpspace', 'rois', 'mni')
    mask_file = os.path.join(mask_dir, f'{mask}.nii.gz')
    if not os.path.exists(mask_file):
        raise IOError(f'Mask file does not exist: {mask_file}')
    if verbose:
        print(f'Masking with: {mask_file}')

    # feature mask, if specified
    if feature_mask is not None:
        feature_file = os.path.join(mask_dir, f'{feature_mask}.nii.gz')
        if not os.path.exists(feature_file):
            raise IOError(f'Feature mask does not exist: {feature_file}')
        if verbose:
            print(f'Using features within: {feature_file}')
        add_fa = {'include': feature_file}
    else:
        add_fa = None

    # load images and concatenate
    ds_list = []
    for run, bold_image in zip(runs, bold_images):
        ds_run = fmri_dataset(bold_image, mask=mask_file, add_fa=add_fa)
        ds_run.sa['run'] = np.tile(run, ds_run.shape[0])
        ds_list.append(ds_run)
    ds = vstack(ds_list)

    # copy attributes needed for reverse mapping to nifti images
    ds.a = ds_run.a

    # normalize within run
    if zscore_run:
        zscore(ds, chunks_attr='run')

    # set the include feature attribute
    if feature_mask is not None:
        ds.fa.include = ds.fa.include.astype(bool)
    else:
        ds.fa['include'] = np.ones(ds.shape[1], dtype=bool)
    return ds
Esempio n. 50
0
def runsub(sub, thisContrast, thisContrastStr, testContrast,
           filterLen, filterOrd, write=False, debug=False,
           alphas=1, roi='grayMatter'):
    thisSub = {sub: subList[sub]}
    mc_params = lmvpa.loadmotionparams(paths, thisSub)
    beta_events = lmvpa.loadevents(paths, thisSub)
    dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
    thisDS = dsdict[sub]

    # savitsky golay filtering
    sg.sg_filter(thisDS, filterLen, filterOrd)
    # gallant group zscores before regression.

    # zscore w.r.t. rest trials
    # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
    # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
    zscore(thisDS, chunks_attr='chunks')

    # kay method: leave out a model run, use it to fit an HRF for each voxel
    # huth method: essentially use FIR
    # mumford method: deconvolution with canonical HRF

    # get timing data from timing files
    rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts)  # adding features

    # we can model out motion and just not use those betas.
    # Ridge
    if isinstance(thisContrast, basestring):
        thisContrast = [thisContrast]
    desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast,
                                     design_kwargs={'hrf_model': 'canonical', 'drift_model': 'blank'},
                                     regr_attrs=None)
    # 'add_regs': mc_params[sub]

    desX['motion'] = make_dmtx(rds.sa['time_coords'].value, paradigm=None, add_regs=mc_params[sub], drift_model='blank')

    des = lmvpa.make_parammat(desX, hrf='canonical', zscore=True)

    # split by language and pictures
    lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    ldes = cp.copy(des)
    pdes = cp.copy(des)

    ldes.matrix = ldes.matrix[lidx]
    pdes.matrix = pdes.matrix[pidx]

    covarmat = None
    mus = None
    lwts, _, lres, lceil = bsr.bootstrap_ridge(ds=rds[lidx], des=ldes, chunklen=1, nchunks=1,
                                               cov0=None, mu0=None, part_attr='chunks', mode='test',
                                               alphas=[alphas[0]], single_alpha=True, normalpha=False,
                                               nboots=1, corrmin=.2, singcutoff=1e-10, joined=None,
                                               use_corr=True)
    print 'language ' + str(np.mean(lres))

    pwts, _, pres, pceil = bsr.bootstrap_ridge(ds=rds[pidx], des=pdes, chunklen=1, nchunks=1,
                                               cov0=None, mu0=None, part_attr='chunks', mode='test',
                                               alphas=[alphas[1]], single_alpha=True, normalpha=False,
                                               nboots=1, corrmin=.2, singcutoff=1e-10, joined=None,
                                               use_corr=True)

    # pictures within
    print 'pictures: ' + str(np.mean(pres))
    if write:
        map2nifti(thisDS, dataset.vstack([lres, pres])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_corrs.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lwts, pwts])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_wts.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lceil, pceil])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_ceiling.nii.gz'))

    for t in testContrast:
        tstr = '+'.join(t)
        lcorr = lmvpa.testmodel(wts=lwts, des=ldes, ds=rds[lidx], tc=cp.copy(t), use_corr=True)
        pcorr = lmvpa.testmodel(wts=pwts, des=pdes, ds=rds[pidx], tc=cp.copy(t), use_corr=True)
        if write:
            map2nifti(thisDS, dataset.vstack([lcorr, pcorr])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_test_corrs.nii.gz'))

    del lres, pres, lwts, pwts, lceil, pceil
    crossSet = thisDS.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    # cwts, cres, cceil = bsr.ridge(rds[pidx], pdes, mu0=mus, cov0=covarmat,
    #                                             part_attr='chunks', mode='test', alphas=alphas[0], single_alpha=True,
    #                                             normalpha=False, corrmin=.2, singcutoff=1e-10, joined=None,
    #                                             use_corr=True)
    cwts, _, cres, cceil = bsr.bootstrap_ridge(ds=crossSet, des=des, chunklen=1, nchunks=1,
                                               cov0=None, mu0=None, part_attr='chunks', mode='test',
                                               alphas=[alphas[2]], single_alpha=True, normalpha=False,
                                               nboots=1, corrmin=.2, singcutoff=1e-10, joined=None,
                                               use_corr=True)
    for t in testContrast:
        tstr = '+'.join(t)
        ccorr = lmvpa.testmodel(wts=cwts, des=des, ds=crossSet, tc=cp.copy(t), use_corr=True)
        if write:
            map2nifti(thisDS, ccorr[0]) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr +
                                      '_P2L_ridge_alpha_' + str(alphas[2]) + '_test_corr.nii.gz'))
            map2nifti(thisDS, ccorr[1]) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr +
                                      '_L2P_ridge_alpha_' + str(alphas[2]) + '_test_corr.nii.gz'))
    print 'cross: ' + str(np.mean(cres))
    if write:
        map2nifti(thisDS, cres[0]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                     '_P2L_ridge_alpha_' + str(alphas[2]) + '_corr.nii.gz'))
        map2nifti(thisDS, cres[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_L2P_ridge_alpha_' + str(alphas[2]) + '_corr.nii.gz'))

        map2nifti(thisDS, cwts[cwts.chunks==1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_P2L_ridge_alpha_' + str(alphas[2]) + '_wts.nii.gz'))
        map2nifti(thisDS, cwts[cwts.chunks==2]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_L2P_ridge_alpha' + str(alphas[2]) + '_wts.nii.gz'))

        map2nifti(thisDS, cceil[0]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_P2L_ridge_alpha_' + str(alphas[2]) + '_ceiling.nii.gz'))
        map2nifti(thisDS, cceil[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_L2P_ridge_alpha_' + str(alphas[2]) + '_ceiling.nii.gz'))
    del cres, cwts, cceil
Esempio n. 51
0
    def _call(self, ds):
        # local binding
        generator = self._generator
        node = self._node
        ca = self.ca
        space = self.get_space()
        concat_as = self._concat_as

        if self.ca.is_enabled("stats") and (not 'stats' in node.ca or
                                            not node.ca.is_enabled("stats")):
            warning("'stats' conditional attribute was enabled, but "
                    "the assigned node '%s' either doesn't support it, "
                    "or it is disabled" % node)
        # precharge conditional attributes
        ca.datasets = []

        # run the node an all generated datasets
        results = []
        for i, sds in enumerate(generator.generate(ds) if generator else [ds]):
            if __debug__:
                debug('REPM', "%d-th iteration of %s on %s", (i, self, sds))
            if ca.is_enabled("datasets"):
                # store dataset in ca
                ca.datasets.append(sds)
            # run the beast
            result = node(sds)
            # callback
            if self._callback is not None:
                self._callback(data=sds, node=node, result=result)
            # subclass postprocessing
            result = self._repetition_postcall(sds, node, result)
            if space:
                # XXX maybe try to get something more informative from the
                # processing node (e.g. in 0.5 it used to be 'chunks'->'chunks'
                # to indicate what was trained and what was tested. Now it is
                # more tricky, because `node` could be anything
                result.set_attr(space, (i, ))
            # store
            results.append(result)

            if ca.is_enabled("stats") and 'stats' in node.ca \
               and node.ca.is_enabled("stats"):
                if not ca.is_set('stats'):
                    # create empty stats container of matching type
                    ca.stats = node.ca['stats'].value.__class__()
                # harvest summary stats
                ca['stats'].value.__iadd__(node.ca['stats'].value)

        # charge condition attribute
        self.ca.repetition_results = results

        # stack all results into a single Dataset
        if concat_as == 'samples':
            results = vstack(results, True)
        elif concat_as == 'features':
            results = hstack(results, True)
        else:
            raise ValueError("Unknown concatenation mode '%s'" % concat_as)
        # no need to store the raw results, since the Measure class will
        # automatically store them in a CA
        return results
Esempio n. 52
0
def runsub(sub, thisContrast, filterLen, filterOrd, thisContrastStr, roi='grayMatter'):
    thisSub = {sub: subList[sub]}
    mc_params = lmvpa.loadmotionparams(paths, thisSub)
    beta_events = lmvpa.loadevents(paths, thisSub)
    dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
    thisDS = dsdict[sub]

    # savitsky golay filtering
    sg.sg_filter(thisDS, filterLen, filterOrd)
    # gallant group zscores before regression.

    # zscore w.r.t. rest trials
    # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
    # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
    zscore(thisDS, chunks_attr='chunks')

    # kay method: leave out a model run, use it to fit an HRF for each voxel
    # huth method: essentially use FIR
    # mumford method: deconvolution with canonical HRF

    # refit events and regress...
    # get timing data from timing files
    rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts) # adding features

    # we can model out motion and just not use those betas.
    if isinstance(thisContrast, basestring):
        thisContrast = [thisContrast]
    # instead of binarizing each one, make them parametric
    desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast,
                                 design_kwargs={'hrf_model': 'canonical', 'drift_model': 'blank'},
                                 regr_attrs=None)
    # want to collapse ap and cr, but have anim separate
    des = lmvpa.make_parammat(desX)

    # set chunklen and nchunks
    # split by language and pictures
    lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    ldes = cp.copy(des)
    pdes = cp.copy(des)

    ldes.matrix = ldes.matrix[lidx]
    pdes.matrix = pdes.matrix[pidx]

    lwts, lres, lceil = bsr.bootstrap_linear(rds[lidx], ldes, part_attr='chunks', mode='test')
    pwts, pres, pceil = bsr.bootstrap_linear(rds[pidx], pdes, part_attr='chunks', mode='test')

    # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs
    print 'language ' + str(np.mean(lres))

    # pictures within
    print 'pictures: ' + str(np.mean(pres))
    from mvpa2.base import dataset
    map2nifti(thisDS, dataset.vstack([lres, pres])) \
        .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                  '_univar_corr.nii.gz'))
    map2nifti(thisDS, dataset.vstack([lwts, pwts])) \
        .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                  '_univar_betas.nii.gz'))
    map2nifti(thisDS, dataset.vstack([lceil, pceil])) \
        .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                  '_univar_ceiling.nii.gz'))
    del lres, pres, lwts, pwts, lceil, pceil

    crossSet = thisDS.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cwts, cres, cceil = bsr.bootstrap_linear(crossSet, des, part_attr='chunks', mode='test')
    print 'cross: ' + str(np.mean(cres))

    map2nifti(thisDS, cres[0]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_univar.nii.gz'))
    map2nifti(thisDS, cres[1]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_univar.nii.gz'))

    map2nifti(thisDS, cwts[0]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_betas.nii.gz'))
    map2nifti(thisDS, cwts[1]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_betas.nii.gz'))

    map2nifti(thisDS, cceil[0]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_betas.nii.gz'))
    map2nifti(thisDS, cceil[1]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_betas.nii.gz'))
Esempio n. 53
0
    def _call(self, ds):
        # local binding
        generator = self._generator
        node = self._node
        ca = self.ca
        space = self.get_space()
        concat_as = self._concat_as

        if self.ca.is_enabled("stats") and (not node.ca.has_key("stats") or
                                            not node.ca.is_enabled("stats")):
            warning("'stats' conditional attribute was enabled, but "
                    "the assigned node '%s' either doesn't support it, "
                    "or it is disabled" % node)
        # precharge conditional attributes
        ca.datasets = []

        # run the node an all generated datasets
        results = []
        for i, sds in enumerate(generator.generate(ds)):
            if __debug__:
                debug('REPM', "%d-th iteration of %s on %s",
                      (i, self, sds))
            if ca.is_enabled("datasets"):
                # store dataset in ca
                ca.datasets.append(sds)
            # run the beast
            result = node(sds)
            # callback
            if not self._callback is None:
                self._callback(data=sds, node=node, result=result)
            # subclass postprocessing
            result = self._repetition_postcall(sds, node, result)
            if space:
                # XXX maybe try to get something more informative from the
                # processing node (e.g. in 0.5 it used to be 'chunks'->'chunks'
                # to indicate what was trained and what was tested. Now it is
                # more tricky, because `node` could be anything
                result.set_attr(space, (i,))
            # store
            results.append(result)

            if ca.is_enabled("stats") and node.ca.has_key("stats") \
               and node.ca.is_enabled("stats"):
                if not ca.is_set('stats'):
                    # create empty stats container of matching type
                    ca.stats = node.ca['stats'].value.__class__()
                # harvest summary stats
                ca['stats'].value.__iadd__(node.ca['stats'].value)

        # charge condition attribute
        self.ca.repetition_results = results

        # stack all results into a single Dataset
        if concat_as == 'samples':
            results = vstack(results, True)
        elif concat_as == 'features':
            results = hstack(results, True)
        else:
            raise ValueError("Unkown concatenation mode '%s'" % concat_as)
        # no need to store the raw results, since the Measure class will
        # automatically store them in a CA
        return results
Esempio n. 54
0
def runsub(sub, thisContrast, thisContrastStr,
           filterLen, filterOrd,
           paramEst, chunklen, alphas=np.logspace(0, 3, 20), debug=False, write=False, roi='grayMatter'):
    thisSub = {sub: subList[sub]}
    mc_params = lmvpa.loadmotionparams(paths, thisSub)
    beta_events = lmvpa.loadevents(paths, thisSub)
    dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
    thisDS = dsdict[sub]

    # savitsky golay filtering
    sg.sg_filter(thisDS, filterLen, filterOrd)
    # gallant group zscores before regression.

    # zscore w.r.t. rest trials
    # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
    # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
    zscore(thisDS, chunks_attr='chunks')

    # kay method: leave out a model run, use it to fit an HRF for each voxel
    # huth method: essentially use FIR
    # mumford method: deconvolution with canonical HRF

    # refit events and regress...
    # get timing data from timing files
    # rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub])
    rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts) # adding features

    # we can model out motion and just not use those betas.
    # Ridge
    if isinstance(thisContrast, basestring):
        thisContrast = [thisContrast]
    # instead of binarizing each one, make them parametric
    desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast,
                                     design_kwargs={'hrf_model': 'canonical', 'drift_model': 'blank'},
                                     regr_attrs=None)
    # want to collapse ap and cr, but have anim separate
    desX['motion'] = make_dmtx(rds.sa['time_coords'].value, paradigm=None, add_regs=mc_params[sub], drift_model='blank')

    des = lmvpa.make_parammat(desX, hrf='canonical', zscore=True)

    # set chunklen and nchunks
    # split by language and pictures
    lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    ldes = cp.copy(des)
    pdes = cp.copy(des)

    ldes.matrix = ldes.matrix[lidx]
    pdes.matrix = pdes.matrix[pidx]
    nchunks = int(len(thisDS)*paramEst / chunklen)
    nboots=50
    covarmat = None
    mus = None
    lwts, lalphas, lres, lceil = bsr.bootstrap_ridge(rds[lidx], ldes, chunklen=chunklen, nchunks=nchunks,
                                              cov0=covarmat, mu0=mus, part_attr='chunks', mode='test',
                                              alphas=alphas, single_alpha=True, normalpha=False,
                                              nboots=nboots, corrmin=.2, singcutoff=1e-10, joined=None,
                                              plot=debug, use_corr=True)

    pwts, palphas, pres, pceil = bsr.bootstrap_ridge(rds[pidx], pdes, chunklen=chunklen, nchunks=nchunks,
                                              part_attr='chunks', mode='test',
                                              alphas=alphas, single_alpha=True, normalpha=False,
                                              nboots=nboots, corrmin=.2, singcutoff=1e-10, joined=None,
                                              plot=debug, use_corr=True)
    print 'language ' + str(np.mean(lres))

    # pictures within
    print 'pictures: ' + str(np.mean(pres))

# need to change outstring
    if write:
        from mvpa2.base import dataset
        map2nifti(thisDS, dataset.vstack([lres, pres])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_corrs.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lwts, pwts])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_weights.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lalphas, palphas])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_alphas.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lceil, pceil])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_ceiling.nii.gz'))

    del lres, pres, lwts, pwts, lalphas, palphas, lceil, pceil
    crossSet = thisDS.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cwts, calphas, cres, cceil = bsr.bootstrap_ridge(crossSet, des, chunklen=chunklen, nchunks=nchunks,
                                              part_attr='chunks', mode='test',
                                              alphas=alphas, single_alpha=True, normalpha=False,
                                              nboots=nboots, corrmin=.2, singcutoff=1e-10, joined=None,
                                              use_corr=True)
    print 'cross: ' + str(np.mean(cres))
    if write:
        map2nifti(thisDS, cres[0]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_corr.nii.gz'))
        map2nifti(thisDS, cres[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_corr.nii.gz'))

        map2nifti(thisDS, cwts[0]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_weights.nii.gz'))
        map2nifti(thisDS, cwts[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_weights.nii.gz'))

        map2nifti(thisDS, calphas[calphas.chunks==1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_alphas.nii.gz'))
        map2nifti(thisDS, calphas[calphas.chunks==2]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_alphas.nii.gz'))

        map2nifti(thisDS, cceil[0]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_ceiling.nii.gz'))
        map2nifti(thisDS, cceil[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_ceiling.nii.gz'))
    del cres, cwts, calphas, cceil
Esempio n. 55
0
def runsub(sub, thisContrast, thisContrastStr, testContrast,
           filterLen, filterOrd, write=False, debug=False,
           alphas=1, roi='grayMatter'):
    thisSub = {sub: subList[sub]}
    mc_params = lmvpa.loadmotionparams(paths, thisSub)
    beta_events = lmvpa.loadevents(paths, thisSub)
    dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
    thisDS = dsdict[sub]

    # savitsky golay filtering
    sg.sg_filter(thisDS, filterLen, filterOrd)
    # gallant group zscores before regression.

    # zscore w.r.t. rest trials
    # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
    # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
    zscore(thisDS, chunks_attr='chunks')

    # kay method: leave out a model run, use it to fit an HRF for each voxel
    # huth method: essentially use FIR
    # mumford method: deconvolution with canonical HRF

    # get timing data from timing files
    rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts)  # adding features

    # we can model out motion and just not use those betas.
    # Ridge
    if isinstance(thisContrast, basestring):
        thisContrast = [thisContrast]
    desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast,
                                     design_kwargs={'hrf_model': 'canonical', 'drift_model': 'blank'},
                                     regr_attrs=None)
    # 'add_regs': mc_params[sub]

    desX['motion'] = make_dmtx(rds.sa['time_coords'].value, paradigm=None, add_regs=mc_params[sub], drift_model='blank')

    des = lmvpa.make_parammat(desX, hrf='canonical', zscore=True)

    # split by language and pictures
    lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
    ldes = cp.copy(des)
    pdes = cp.copy(des)

    ldes.matrix = ldes.matrix[lidx]
    pdes.matrix = pdes.matrix[pidx]

    covarmat = None
    mus = None
    lwts, _, lres, lceil = bsr.bootstrap_ridge(ds=rds[lidx], des=ldes, chunklen=1, nchunks=1,
                                               cov0=None, mu0=None, part_attr='chunks', mode='test',
                                               alphas=[alphas[0]], single_alpha=True, normalpha=False,
                                               nboots=1, corrmin=.2, singcutoff=1e-10, joined=None,
                                               use_corr=True)
    print 'language ' + str(np.mean(lres))

    pwts, _, pres, pceil = bsr.bootstrap_ridge(ds=rds[pidx], des=pdes, chunklen=1, nchunks=1,
                                               cov0=None, mu0=None, part_attr='chunks', mode='test',
                                               alphas=[alphas[1]], single_alpha=True, normalpha=False,
                                               nboots=1, corrmin=.2, singcutoff=1e-10, joined=None,
                                               use_corr=True)

    # pictures within
    print 'pictures: ' + str(np.mean(pres))
    if write:
        map2nifti(thisDS, dataset.vstack([lres, pres])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_corrs.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lwts, pwts])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_wts.nii.gz'))
        map2nifti(thisDS, dataset.vstack([lceil, pceil])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_ceiling.nii.gz'))

    for t in testContrast:
        tstr = '+'.join(t)
        lcorr = lmvpa.testmodel(wts=lwts, des=ldes, ds=rds[lidx], tc=cp.copy(t), use_corr=True)
        pcorr = lmvpa.testmodel(wts=pwts, des=pdes, ds=rds[pidx], tc=cp.copy(t), use_corr=True)
        if write:
            map2nifti(thisDS, dataset.vstack([lcorr, pcorr])) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr +
                                      '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_test_corrs.nii.gz'))

    del lres, pres, lwts, pwts, lceil, pceil
    crossSet = thisDS.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    # cwts, cres, cceil = bsr.ridge(rds[pidx], pdes, mu0=mus, cov0=covarmat,
    #                                             part_attr='chunks', mode='test', alphas=alphas[0], single_alpha=True,
    #                                             normalpha=False, corrmin=.2, singcutoff=1e-10, joined=None,
    #                                             use_corr=True)
    cwts, _, cres, cceil = bsr.bootstrap_ridge(ds=crossSet, des=des, chunklen=1, nchunks=1,
                                               cov0=None, mu0=None, part_attr='chunks', mode='test',
                                               alphas=[alphas[2]], single_alpha=True, normalpha=False,
                                               nboots=1, corrmin=.2, singcutoff=1e-10, joined=None,
                                               use_corr=True)
    for t in testContrast:
        tstr = '+'.join(t)
        ccorr = lmvpa.testmodel(wts=cwts, des=des, ds=crossSet, tc=cp.copy(t), use_corr=True)
        if write:
            map2nifti(thisDS, ccorr[0]) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr +
                                      '_P2L_ridge_alpha_' + str(alphas[2]) + '_test_corr.nii.gz'))
            map2nifti(thisDS, ccorr[1]) \
            .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr +
                                      '_L2P_ridge_alpha_' + str(alphas[2]) + '_test_corr.nii.gz'))
    print 'cross: ' + str(np.mean(cres))
    if write:
        map2nifti(thisDS, cres[0]).to_filename(
        os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                     '_P2L_ridge_alpha_' + str(alphas[2]) + '_corr.nii.gz'))
        map2nifti(thisDS, cres[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_L2P_ridge_alpha_' + str(alphas[2]) + '_corr.nii.gz'))

        map2nifti(thisDS, cwts[cwts.chunks==1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_P2L_ridge_alpha_' + str(alphas[2]) + '_wts.nii.gz'))
        map2nifti(thisDS, cwts[cwts.chunks==2]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_L2P_ridge_alpha' + str(alphas[2]) + '_wts.nii.gz'))

        map2nifti(thisDS, cceil[0]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_P2L_ridge_alpha_' + str(alphas[2]) + '_ceiling.nii.gz'))
        map2nifti(thisDS, cceil[1]).to_filename(
            os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr +
                         '_L2P_ridge_alpha_' + str(alphas[2]) + '_ceiling.nii.gz'))
    del cres, cwts, cceil