Exemple #1
0
def _goodies_pre011():
    """Goodies activator for ipython < 0.11
    """
    try:
        if not cfg.getboolean('ipython', 'complete protected', False):
            ipget().IP.Completer.omit__names = 2
    finally:
        pass

    if cfg.getboolean('ipython', 'complete collections attributes', True):
        from mvpa2.support.ipython.ipy_pymvpa_completer \
             import activate as ipy_completer_activate
        ipy_completer_activate()
Exemple #2
0
def _goodies_pre011():
    """Goodies activator for ipython < 0.11
    """
    try:
        if not cfg.getboolean('ipython', 'complete protected', False):
            ipget().IP.Completer.omit__names = 2
    finally:
        pass

    if cfg.getboolean('ipython', 'complete collections attributes', True):
        from mvpa2.support.ipython.ipy_pymvpa_completer \
             import activate as ipy_completer_activate
        ipy_completer_activate()
    def test_dist_p_value(self):
        """Basic testing of DistPValue"""
        if not externals.exists('scipy'):
            return
        ndb = 200
        ndu = 20
        nperd = 2
        pthr = 0.05
        Nbins = 400

        # Lets generate already normed data (on sphere) and add some nonbogus features
        datau = (np.random.normal(size=(nperd, ndb)))
        dist = np.sqrt((datau * datau).sum(axis=1))

        datas = (datau.T / dist.T).T
        tn = datax = datas[0, :]
        dataxmax = np.max(np.abs(datax))

        # now lets add true positive features
        tp = [-dataxmax * 1.1] * (ndu//2) + [dataxmax * 1.1] * (ndu//2)
        x = np.hstack((datax, tp))

        # lets add just pure normal to it
        x = np.vstack((x, np.random.normal(size=x.shape))).T
        for distPValue in (DistPValue(), DistPValue(fpp=0.05)):
            result = distPValue(x)
            self.assertTrue((result>=0).all)
            self.assertTrue((result<=1).all)

        if cfg.getboolean('tests', 'labile', default='yes'):
            self.assertTrue(distPValue.ca.positives_recovered[0] > 10)
            self.assertTrue((np.array(distPValue.ca.positives_recovered) +
                             np.array(distPValue.ca.nulldist_number) == ndb + ndu).all())
            self.assertEqual(distPValue.ca.positives_recovered[1], 0)
    def test_dist_p_value(self):
        """Basic testing of DistPValue"""
        if not externals.exists('scipy'):
            return
        ndb = 200
        ndu = 20
        nperd = 2
        pthr = 0.05
        Nbins = 400

        # Lets generate already normed data (on sphere) and add some nonbogus features
        datau = (np.random.normal(size=(nperd, ndb)))
        dist = np.sqrt((datau * datau).sum(axis=1))

        datas = (datau.T / dist.T).T
        tn = datax = datas[0, :]
        dataxmax = np.max(np.abs(datax))

        # now lets add true positive features
        tp = [-dataxmax * 1.1] * (ndu//2) + [dataxmax * 1.1] * (ndu//2)
        x = np.hstack((datax, tp))

        # lets add just pure normal to it
        x = np.vstack((x, np.random.normal(size=x.shape))).T
        for distPValue in (DistPValue(), DistPValue(fpp=0.05)):
            result = distPValue(x)
            self.assertTrue((result>=0).all)
            self.assertTrue((result<=1).all)

        if cfg.getboolean('tests', 'labile', default='yes'):
            self.assertTrue(distPValue.ca.positives_recovered[0] > 10)
            self.assertTrue((np.array(distPValue.ca.positives_recovered) +
                             np.array(distPValue.ca.nulldist_number) == ndb + ndu).all())
            self.assertEqual(distPValue.ca.positives_recovered[1], 0)
Exemple #5
0
def ipy_activate_pymvpa_goodies():
    """Activate PyMVPA additions to IPython

    Currently known goodies (controlled via PyMVPA configuration) are:

    * completions of collections' attributes
    * disabling by default protected attributes of instances in completions
    """
    try:
        if not cfg.getboolean('ipython', 'complete protected', False):
            ipget().IP.Completer.omit__names = 2
    finally:
        pass

    if cfg.getboolean('ipython', 'complete collections attributes', True):
        from mvpa2.support.ipython.ipy_pymvpa_completer \
             import activate as ipy_completer_activate
        ipy_completer_activate()
Exemple #6
0
def test_efdr():
    # generate the data
    n = 100000
    x = np.random.randn(n)
    x[:3000] += 3
    #
    # make the tests
    efdr = emp_null.ENN(x)
    if cfg.getboolean('tests', 'labile', default='yes'):
        # 2.9 instead of stricter 3.0 for tolerance
        np.testing.assert_array_less(efdr.fdr(2.9), 0.15)
        np.testing.assert_array_less(-efdr.threshold(alpha=0.05), -3)
        np.testing.assert_array_less(-efdr.uncorrected_threshold(alpha=0.001), -3)
Exemple #7
0
# We might need to suppress the warnings:
if externals.exists('scipy'):
    externals._suppress_scipy_warnings()
# And check if we aren't under IPython so we could pacify completion
# a bit
externals.exists('running ipython env', force=True, raise_=False)
# Check for matplotlib so matplotlib backend becomes set according to
# our configuration
externals.exists('matplotlib', force=True, raise_=False)

#
# Hooks
#

# Attach custom top-level exception handler
if cfg.getboolean('debug', 'wtf', default=False):
    import sys
    _sys_excepthook = sys.excepthook
    def _pymvpa_excepthook(*args):
        """Custom exception handler to report also pymvpa's wtf

        Calls original handler, and then collects WTF and spits it out
        """
        ret = _sys_excepthook(*args)
        sys.stdout.write("PyMVPA's WTF: collecting information... hold on...")
        sys.stdout.flush()
        wtfs = wtf()
        sys.stdout.write("\rPyMVPA's WTF:                                       \n")
        sys.stdout.write(str(wtfs))
        return ret
    sys.excepthook = _pymvpa_excepthook
    # Verify that we have the same ca enabled
    # TODO
    #ok_(set(lrn.ca.enabled) == set(lrn__.ca.enabled))
    # and having the same values?
    # TODO

    # now lets do predict and manually compute error
    predictions = lrn__.predict(ds[ds.sa.train == 2].samples)
    error__ = errorfx(predictions, ds[ds.sa.train == 2].sa.targets)

    if 'non-deterministic' in lrn_.__tags__:
        # might be different... let's allow to vary quite a bit
        # and new error should be no more than twice the old one
        # (better than no check at all)
        # TODO: smarter check, since 'twice' is quite coarse
        #       especially if original error happens to be 0 ;)
        if cfg.getboolean('tests', 'labile', default='yes'):
            ok_(np.asscalar(error_) <= 2 * np.asscalar(error))
            ok_(np.asscalar(error__) <= 2 * np.asscalar(error))
    else:
        # must match precisely
        # but not on windows 32 bit - had miniscule difference
        cmp_ = assert_array_almost_equal if on_windows else assert_array_equal
        cmp_(error, error_)
        cmp_(error, error__)

    # TODO: verify ca's

    #print "I PASSED!!!! %s" % lrn
Exemple #9
0
# import the main unittest interface
from mvpa2.tests import run as test

#
# Externals-dependent tune ups
#

# PyMVPA is useless without numpy
# Also, this check enforcing population of externals.versions
# for possible later version checks, hence don't remove
externals.exists("numpy", force=True, raise_=True)
# We might need to suppress the warnings:

# If instructed -- no python or numpy warnings (like ctypes version
# for slmr), e.g. for during doctests
if cfg.getboolean("warnings", "suppress", default=False):
    import warnings

    warnings.simplefilter("ignore")
    # NumPy
    np.seterr(**dict([(x, "ignore") for x in np.geterr()]))

if externals.exists("scipy"):
    externals._suppress_scipy_warnings()

# And check if we aren't under IPython so we could pacify completion
# a bit
externals.exists("running ipython env", force=True, raise_=False)
# Check for matplotlib so matplotlib backend becomes set according to
# our configuration
externals.exists("matplotlib", force=True, raise_=False)
Exemple #10
0
    def test_basic_functioning(self, ref_ds, zscore_common, zscore_all):
        ha = Hyperalignment(ref_ds=ref_ds,
                            zscore_all=zscore_all,
                            zscore_common=zscore_common)
        if ref_ds is None:
            ref_ds = 0  # by default should be this one

        # get a dataset with some prominent trends in it
        ds4l = datasets['uni4large']
        # lets select for now only meaningful features
        ds_orig = ds4l[:, ds4l.a.nonbogus_features]
        nf = ds_orig.nfeatures
        n = 4  # # of datasets to generate
        Rs, dss_rotated, dss_rotated_clean, random_shifts, random_scales \
            = [], [], [], [], []

        # now lets compose derived datasets by using some random
        # rotation(s)
        for i in xrange(n):
            ## if False: # i == ref_ds:
            #     # Do not rotate the target space so we could check later on
            #     # if we transform back nicely
            #     R = np.eye(ds_orig.nfeatures)
            ## else:
            ds_ = random_affine_transformation(ds_orig,
                                               scale_fac=100,
                                               shift_fac=10)
            Rs.append(ds_.a.random_rotation)
            # reusing random data from dataset itself
            random_scales += [ds_.a.random_scale]
            random_shifts += [ds_.a.random_shift]
            random_noise = ds4l.samples[:, ds4l.a.bogus_features[:4]]

            ## if (zscore_common or zscore_all):
            ##     # for later on testing of "precise" reconstruction
            ##     zscore(ds_, chunks_attr=None)

            dss_rotated_clean.append(ds_)

            ds_ = ds_.copy()
            ds_.samples = ds_.samples + 0.1 * random_noise
            dss_rotated.append(ds_)

        # Lets test two scenarios -- in one with no noise -- we should get
        # close to perfect reconstruction.  If noise was added -- not so good
        for noisy, dss in ((False, dss_rotated_clean), (True, dss_rotated)):
            # to verify that original datasets didn't get changed by
            # Hyperalignment store their idhashes of samples
            idhashes = [idhash(ds.samples) for ds in dss]
            idhashes_targets = [idhash(ds.targets) for ds in dss]

            mappers = ha(dss)

            idhashes_ = [idhash(ds.samples) for ds in dss]
            idhashes_targets_ = [idhash(ds.targets) for ds in dss]
            self.assertEqual(
                idhashes,
                idhashes_,
                msg="Hyperalignment must not change original data.")
            self.assertEqual(
                idhashes_targets,
                idhashes_targets_,
                msg="Hyperalignment must not change original data targets.")

            self.assertEqual(ref_ds, ha.ca.chosen_ref_ds)

            # Map data back

            dss_clean_back = [
                m.forward(ds_) for m, ds_ in zip(mappers, dss_rotated_clean)
            ]

            ds_norm = np.linalg.norm(dss[ref_ds].samples)
            nddss = []
            ndcss = []
            ds_orig_Rref = np.dot(ds_orig.samples, Rs[ref_ds]) \
                * random_scales[ref_ds] \
                + random_shifts[ref_ds]
            if zscore_common or zscore_all:
                zscore(Dataset(ds_orig_Rref), chunks_attr=None)
            for ds_back in dss_clean_back:
                # if we used zscoring of common, we cannot rely
                # that range/offset could be matched, so lets use
                # corrcoef
                ndcs = np.diag(np.corrcoef(ds_back.samples.T,
                                           ds_orig_Rref.T)[nf:, :nf],
                               k=0)
                ndcss += [ndcs]
                dds = ds_back.samples - ds_orig_Rref
                ndds = np.linalg.norm(dds) / ds_norm
                nddss += [ndds]
            snoisy = ('clean', 'noisy')[int(noisy)]
            do_labile = cfg.getboolean('tests', 'labile', default='yes')
            if not noisy or do_labile:
                # First compare correlations
                self.assertTrue(
                    np.all(np.array(ndcss) >= (0.9, 0.85)[int(noisy)]),
                    msg="Should have reconstructed original dataset more or"
                    " less. Got correlations %s in %s case." % (ndcss, snoisy))
                if not (zscore_all or zscore_common):
                    # if we didn't zscore -- all of them should be really close
                    self.assertTrue(
                        np.all(np.array(nddss) <= (1e-10, 1e-1)[int(noisy)]),
                        msg="Should have reconstructed original dataset well "
                        "without zscoring. Got normed differences %s in %s case."
                        % (nddss, snoisy))
                elif do_labile:
                    # otherwise they all should be somewhat close
                    self.assertTrue(
                        np.all(np.array(nddss) <= (.2, 3)[int(noisy)]),
                        msg="Should have reconstructed original dataset more or"
                        " less for all. Got normed differences %s in %s case."
                        % (nddss, snoisy))
                    self.assertTrue(
                        np.all(nddss[ref_ds] <= .09),
                        msg="Should have reconstructed original dataset quite "
                        "well even with zscoring. Got normed differences %s "
                        "in %s case." % (nddss, snoisy))
                    # yoh: and leave 5% of difference for a chance and numerical
                    #      fluctuations ;)
                    self.assertTrue(
                        np.all(np.array(nddss) >= 0.95 * nddss[ref_ds]),
                        msg="Should have reconstructed orig_ds best of all. "
                        "Got normed differences %s in %s case with ref_ds=%d."
                        % (nddss, snoisy, ref_ds))

        # Lets see how well we do if asked to compute residuals
        ha = Hyperalignment(
            ref_ds=ref_ds,
            level2_niter=2,
            enable_ca=['training_residual_errors', 'residual_errors'])
        mappers = ha(dss_rotated_clean)
        self.assertTrue(
            np.all(ha.ca.training_residual_errors.sa.levels ==
                   ['1', '2:0', '2:1']))
        rterrors = ha.ca.training_residual_errors.samples
        # just basic tests:
        self.assertEqual(rterrors[0, ref_ds], 0)
        self.assertEqual(rterrors.shape, (3, n))
        rerrors = ha.ca.residual_errors.samples
        self.assertEqual(rerrors.shape, (1, n))
Exemple #11
0
# import the main unittest interface
from mvpa2.tests import run as test

#
# Externals-dependent tune ups
#

# PyMVPA is useless without numpy
# Also, this check enforcing population of externals.versions
# for possible later version checks, hence don't remove
externals.exists('numpy', force=True, raise_=True)
# We might need to suppress the warnings:

# If instructed -- no python or numpy warnings (like ctypes version
# for slmr), e.g. for during doctests
if cfg.getboolean('warnings', 'suppress', default=False):
    import warnings
    warnings.simplefilter('ignore')
    # NumPy
    np.seterr(**dict([(x, 'ignore') for x in np.geterr()]))

if externals.exists('scipy'):
    externals._suppress_scipy_warnings()

# And check if we aren't under IPython so we could pacify completion
# a bit
externals.exists('running ipython env', force=True, raise_=False)
# Check for matplotlib so matplotlib backend becomes set according to
# our configuration
externals.exists('matplotlib', force=True, raise_=False)
Exemple #12
0
pl.subplot(2, 2, fig)
pl.title("SVM-Sensitivity Profiles")
lines = plot_err_line(xy_sens[..., 0], linestyle="-", fmt="ko", errtype="std")
lines[0][0].set_label("X")
lines = plot_err_line(xy_sens[..., 1], linestyle="-", fmt="go", errtype="std")
lines[0][0].set_label("Y")
pl.legend()
pl.ylim((-0.1, 0.1))
pl.xlim(0, 100)
pl.axhline(y=0, color="0.6", ls="--")
pl.xlabel("Timepoints")

from mvpa2.base import cfg

if cfg.getboolean("examples", "interactive", True):
    # show all the cool figures
    pl.show()

"""
The following figure is not exactly identical to the product of this code, but
rather shows the result of a few minutes of beautifications in Inkscape_.

.. _Inkscape: http://www.inkscape.org/

.. figure:: ../pics/ex_eyemovements.*
   :align: center

   Gaze track for viewing upright vs. inverted faces. The figure shows the mean
   gaze path for both conditions overlayed on an example face. The panels to
   the left and below show the X and Y coordinates over the trial timecourse
Exemple #13
0
"""

pl.subplot(2, 2, fig)
pl.title('SVM-Sensitivity Profiles')
lines = plot_err_line(xy_sens[..., 0], linestyle='-', fmt='ko', errtype='std')
lines[0][0].set_label('X')
lines = plot_err_line(xy_sens[..., 1], linestyle='-', fmt='go', errtype='std')
lines[0][0].set_label('Y')
pl.legend()
pl.ylim((-0.1, 0.1))
pl.xlim(0,100)
pl.axhline(y=0, color='0.6', ls='--')
pl.xlabel('Timepoints')

from mvpa2.base import cfg
if cfg.getboolean('examples', 'interactive', True):
    # show all the cool figures
    pl.show()

"""
The following figure is not exactly identical to the product of this code, but
rather shows the result of a few minutes of beautifications in Inkscape_.

.. _Inkscape: http://www.inkscape.org/

.. figure:: ../pics/ex_eyemovements.*
   :align: center

   Gaze track for viewing upright vs. inverted faces. The figure shows the mean
   gaze path for both conditions overlayed on an example face. The panels to
   the left and below show the X and Y coordinates over the trial timecourse
    def test_basic_functioning(self, ref_ds, zscore_common):
        # get a dataset with some prominent trends in it
        ds4l = datasets['uni4large']
        # lets select for now only meaningful features
        ds_orig = ds4l[:, ds4l.a.nonbogus_features]
        nf = ds_orig.nfeatures
        n = 5 # # of datasets to generate
        Rs, dss_rotated, dss_rotated_clean, random_shifts, random_scales \
            = [], [], [], [], []
        # now lets compose derived datasets by using some random
        # rotation(s)
        for i in xrange(n):
            R = get_random_rotation(ds_orig.nfeatures)
            Rs.append(R)
            ds_ = ds_orig.copy()
            # reusing random data from dataset itself
            random_scales += [ds_orig.samples[i, 3] * 100]
            random_shifts += [ds_orig.samples[i+10] * 10]
            random_noise = ds4l.samples[:, ds4l.a.bogus_features[:4]]
            ds_.samples = np.dot(ds_orig.samples, R) * random_scales[-1] \
                          + random_shifts[-1]
            dss_rotated_clean.append(ds_)

            ds_ = ds_.copy()
            ds_.samples = ds_.samples + 0.1 * random_noise
            dss_rotated.append(ds_)

        ha = Hyperalignment(ref_ds=ref_ds, zscore_common=zscore_common)
        if ref_ds is None:
            ref_ds = 0                      # by default should be this one
        # Lets test two scenarios -- in one with no noise -- we should get
        # close to perfect reconstruction.  If noise was added -- not so good
        for noisy, dss in ((False, dss_rotated_clean),
                           (True, dss_rotated)):
            mappers = ha(dss)
            self.failUnlessEqual(ref_ds, ha.ca.choosen_ref_ds)
            # Map data back

            dss_clean_back = [m.forward(ds_)
                              for m, ds_ in zip(mappers, dss_rotated_clean)]

            ds_norm = np.linalg.norm(dss[ref_ds].samples)
            nddss = []
            ndcss = []
            ds_orig_Rref = np.dot(ds_orig.samples, Rs[ref_ds]) \
                           * random_scales[ref_ds] \
                           + random_shifts[ref_ds]
            for ds_back in dss_clean_back:
                # if we used zscoring of common, we cannot rely
                # that range/offset could be matched, so lets use
                # corrcoef
                ndcs = np.diag(np.corrcoef(ds_back.samples.T,
                                           ds_orig_Rref.T)[nf:, :nf], k=0)
                ndcss += [ndcs]
                dds = ds_back.samples - ds_orig_Rref
                ndds = np.linalg.norm(dds) / ds_norm
                nddss += [ndds]
            if not noisy or cfg.getboolean('tests', 'labile', default='yes'):
                # First compare correlations
                self.failUnless(np.all(np.array(ndcss)
                                       >= (0.9, 0.85)[int(noisy)]),
                        msg="Should have reconstructed original dataset more or"
                        " less. Got correlations %s in %s case."
                        % (ndcss, ('clean', 'noisy')[int(noisy)]))
                if not zscore_common:
                    # only reasonable without zscoring
                    self.failUnless(np.all(np.array(nddss)
                                           <= (1e-10, 1e-2)[int(noisy)]),
                        msg="Should have reconstructed original dataset more or"
                        " less. Got normed differences %s in %s case."
                        % (nddss, ('clean', 'noisy')[int(noisy)]))

        # Lets see how well we do if asked to compute residuals
        ha = Hyperalignment(ref_ds=ref_ds, level2_niter=2,
                            enable_ca=['residual_errors'])
        mappers = ha(dss_rotated_clean)
        self.failUnless(np.all(ha.ca.residual_errors.sa.levels ==
                              ['1', '2:0', '2:1', '3']))
        rerrors = ha.ca.residual_errors.samples
        # just basic tests:
        self.failUnlessEqual(rerrors[0, ref_ds], 0)
        self.failUnlessEqual(rerrors.shape, (4, n))
        pass
Exemple #15
0
        raise AssertionError, \
              "Failed to load trained lrn due to %r" % (e,)

    # Verify that we have the same ca enabled
    # TODO
    #ok_(set(lrn.ca.enabled) == set(lrn__.ca.enabled))
    # and having the same values?
    # TODO

    # now lets do predict and manually compute error
    predictions = lrn__.predict(ds[ds.sa.train == 2].samples)
    error__ = errorfx(predictions, ds[ds.sa.train == 2].sa.targets)

    if 'non-deterministic' in lrn_.__tags__:
        # might be different... let's allow to vary quite a bit
        # and new error should be no more than twice the old one
        # (better than no check at all)
        # TODO: smarter check, since 'twice' is quite coarse
        #       especially if original error happens to be 0 ;)
        if cfg.getboolean('tests', 'labile', default='yes'):
            ok_(np.asscalar(error_) <= 2*np.asscalar(error))
            ok_(np.asscalar(error__) <= 2*np.asscalar(error))
    else:
        # must match precisely
        assert_array_equal(error, error_)
        assert_array_equal(error, error__)

    # TODO: verify ca's

    #print "I PASSED!!!! %s" % lrn
Exemple #16
0
# import the main unittest interface
from mvpa2.tests import run as test

#
# Externals-dependent tune ups
#

# PyMVPA is useless without numpy
# Also, this check enforcing population of externals.versions
# for possible later version checks, hence don't remove
externals.exists('numpy', force=True, raise_=True)
# We might need to suppress the warnings:

# If instructed -- no python or numpy warnings (like ctypes version
# for slmr), e.g. for during doctests
if cfg.getboolean('warnings', 'suppress', default=False):
    import warnings
    warnings.simplefilter('ignore')
    # NumPy
    np.seterr(**dict([(x, 'ignore') for x in np.geterr()]))

if externals.exists('scipy'):
    externals._suppress_scipy_warnings()

# And check if we aren't under IPython so we could pacify completion
# a bit
externals.exists('running ipython env', force=True, raise_=False)
# Check for matplotlib so matplotlib backend becomes set according to
# our configuration
externals.exists('matplotlib', force=True, raise_=False)
Exemple #17
0
mvpa2.seed(0)  # to reproduce the plot

dataset_kwargs = dict(nfeatures=2,
                      nchunks=10,
                      snr=2,
                      nlabels=4,
                      means=[[0, 1], [1, 0], [1, 1], [0, 0]])

dataset_train = normal_feature_dataset(**dataset_kwargs)
dataset_plot = normal_feature_dataset(**dataset_kwargs)

# make a new figure
pl.figure(figsize=(9, 9))

for i, k in enumerate((1, 3, 9, 20)):
    knn = kNN(k)

    print "Processing kNN(%i) problem..." % k
    pl.subplot(2, 2, i + 1)
    """
    """

    knn.train(dataset_train)

    plot_decision_boundary_2d(dataset_plot, clf=knn, maps='targets')

if cfg.getboolean('examples', 'interactive', True):
    # show all the cool figures
    pl.show()
Exemple #18
0
    def test_basic_functioning(self, ref_ds, zscore_common, zscore_all):
        ha = Hyperalignment(ref_ds=ref_ds,
                            zscore_all=zscore_all,
                            zscore_common=zscore_common)
        if ref_ds is None:
            ref_ds = 0                      # by default should be this one

        # get a dataset with some prominent trends in it
        ds4l = datasets['uni4large']
        # lets select for now only meaningful features
        ds_orig = ds4l[:, ds4l.a.nonbogus_features]
        nf = ds_orig.nfeatures
        n = 4 # # of datasets to generate
        Rs, dss_rotated, dss_rotated_clean, random_shifts, random_scales \
            = [], [], [], [], []

        # now lets compose derived datasets by using some random
        # rotation(s)
        for i in xrange(n):
            ## if False: # i == ref_ds:
            #     # Do not rotate the target space so we could check later on
            #     # if we transform back nicely
            #     R = np.eye(ds_orig.nfeatures)
            ## else:
            ds_ = random_affine_transformation(ds_orig, scale_fac=100, shift_fac=10)
            Rs.append(ds_.a.random_rotation)
            # reusing random data from dataset itself
            random_scales += [ds_.a.random_scale]
            random_shifts += [ds_.a.random_shift]
            random_noise = ds4l.samples[:, ds4l.a.bogus_features[:4]]

            ## if (zscore_common or zscore_all):
            ##     # for later on testing of "precise" reconstruction
            ##     zscore(ds_, chunks_attr=None)

            dss_rotated_clean.append(ds_)

            ds_ = ds_.copy()
            ds_.samples = ds_.samples + 0.1 * random_noise
            dss_rotated.append(ds_)

        # Lets test two scenarios -- in one with no noise -- we should get
        # close to perfect reconstruction.  If noise was added -- not so good
        for noisy, dss in ((False, dss_rotated_clean),
                           (True, dss_rotated)):
            # to verify that original datasets didn't get changed by
            # Hyperalignment store their idhashes of samples
            idhashes = [idhash(ds.samples) for ds in dss]
            idhashes_targets = [idhash(ds.targets) for ds in dss]

            mappers = ha(dss)

            idhashes_ = [idhash(ds.samples) for ds in dss]
            idhashes_targets_ = [idhash(ds.targets) for ds in dss]
            self.assertEqual(idhashes, idhashes_,
                msg="Hyperalignment must not change original data.")
            self.assertEqual(idhashes_targets, idhashes_targets_,
                msg="Hyperalignment must not change original data targets.")

            self.assertEqual(ref_ds, ha.ca.chosen_ref_ds)

            # Map data back

            dss_clean_back = [m.forward(ds_)
                              for m, ds_ in zip(mappers, dss_rotated_clean)]

            ds_norm = np.linalg.norm(dss[ref_ds].samples)
            nddss = []
            ndcss = []
            ds_orig_Rref = np.dot(ds_orig.samples, Rs[ref_ds]) \
                           * random_scales[ref_ds] \
                           + random_shifts[ref_ds]
            if zscore_common or zscore_all:
                zscore(Dataset(ds_orig_Rref), chunks_attr=None)
            for ds_back in dss_clean_back:
                # if we used zscoring of common, we cannot rely
                # that range/offset could be matched, so lets use
                # corrcoef
                ndcs = np.diag(np.corrcoef(ds_back.samples.T,
                                           ds_orig_Rref.T)[nf:, :nf], k=0)
                ndcss += [ndcs]
                dds = ds_back.samples - ds_orig_Rref
                ndds = np.linalg.norm(dds) / ds_norm
                nddss += [ndds]
            snoisy = ('clean', 'noisy')[int(noisy)]
            do_labile = cfg.getboolean('tests', 'labile', default='yes')
            if not noisy or do_labile:
                # First compare correlations
                self.assertTrue(np.all(np.array(ndcss)
                                       >= (0.9, 0.85)[int(noisy)]),
                        msg="Should have reconstructed original dataset more or"
                        " less. Got correlations %s in %s case."
                        % (ndcss, snoisy))
                if not (zscore_all or zscore_common):
                    # if we didn't zscore -- all of them should be really close
                    self.assertTrue(np.all(np.array(nddss)
                                       <= (1e-10, 1e-1)[int(noisy)]),
                        msg="Should have reconstructed original dataset well "
                        "without zscoring. Got normed differences %s in %s case."
                        % (nddss, snoisy))
                elif do_labile:
                    # otherwise they all should be somewhat close
                    self.assertTrue(np.all(np.array(nddss)
                                           <= (.2, 3)[int(noisy)]),
                        msg="Should have reconstructed original dataset more or"
                        " less for all. Got normed differences %s in %s case."
                        % (nddss, snoisy))
                    self.assertTrue(np.all(nddss[ref_ds] <= .09),
                        msg="Should have reconstructed original dataset quite "
                        "well even with zscoring. Got normed differences %s "
                        "in %s case." % (nddss, snoisy))
                    # yoh: and leave 5% of difference for a chance and numerical
                    #      fluctuations ;)
                    self.assertTrue(np.all(np.array(nddss) >= 0.95*nddss[ref_ds]),
                        msg="Should have reconstructed orig_ds best of all. "
                        "Got normed differences %s in %s case with ref_ds=%d."
                        % (nddss, snoisy, ref_ds))

        # Lets see how well we do if asked to compute residuals
        ha = Hyperalignment(ref_ds=ref_ds, level2_niter=2,
                            enable_ca=['training_residual_errors',
                                       'residual_errors'])
        mappers = ha(dss_rotated_clean)
        self.assertTrue(np.all(ha.ca.training_residual_errors.sa.levels ==
                              ['1', '2:0', '2:1']))
        rterrors = ha.ca.training_residual_errors.samples
        # just basic tests:
        self.assertEqual(rterrors[0, ref_ds], 0)
        self.assertEqual(rterrors.shape, (3, n))
        rerrors = ha.ca.residual_errors.samples
        self.assertEqual(rerrors.shape, (1, n))