Exemplo n.º 1
0
    def test_dist_p_value(self):
        """Basic testing of DistPValue"""
        if not externals.exists('scipy'):
            return
        ndb = 200
        ndu = 20
        nperd = 2
        pthr = 0.05
        Nbins = 400

        # Lets generate already normed data (on sphere) and add some nonbogus features
        datau = (np.random.normal(size=(nperd, ndb)))
        dist = np.sqrt((datau * datau).sum(axis=1))

        datas = (datau.T / dist.T).T
        tn = datax = datas[0, :]
        dataxmax = np.max(np.abs(datax))

        # now lets add true positive features
        tp = [-dataxmax * 1.1] * (ndu/2) + [dataxmax * 1.1] * (ndu/2)
        x = np.hstack((datax, tp))

        # lets add just pure normal to it
        x = np.vstack((x, np.random.normal(size=x.shape))).T
        for distPValue in (DistPValue(), DistPValue(fpp=0.05)):
            result = distPValue(x)
            self.failUnless((result>=0).all)
            self.failUnless((result<=1).all)

        if cfg.getboolean('tests', 'labile', default='yes'):
            self.failUnless(distPValue.ca.positives_recovered[0] > 10)
            self.failUnless((np.array(distPValue.ca.positives_recovered) +
                             np.array(distPValue.ca.nulldist_number) == ndb + ndu).all())
            self.failUnlessEqual(distPValue.ca.positives_recovered[1], 0)
Exemplo n.º 2
0
    def test_dist_p_value(self):
        """Basic testing of DistPValue"""
        if not externals.exists('scipy'):
            return
        ndb = 200
        ndu = 20
        nperd = 2
        pthr = 0.05
        Nbins = 400

        # Lets generate already normed data (on sphere) and add some nonbogus features
        datau = (np.random.normal(size=(nperd, ndb)))
        dist = np.sqrt((datau * datau).sum(axis=1))

        datas = (datau.T / dist.T).T
        tn = datax = datas[0, :]
        dataxmax = np.max(np.abs(datax))

        # now lets add true positive features
        tp = [-dataxmax * 1.1] * (ndu/2) + [dataxmax * 1.1] * (ndu/2)
        x = np.hstack((datax, tp))

        # lets add just pure normal to it
        x = np.vstack((x, np.random.normal(size=x.shape))).T
        for distPValue in (DistPValue(), DistPValue(fpp=0.05)):
            result = distPValue(x)
            self.failUnless((result>=0).all)
            self.failUnless((result<=1).all)

        if cfg.getboolean('tests', 'labile', default='yes'):
            self.failUnless(distPValue.ca.positives_recovered[0] > 10)
            self.failUnless((np.array(distPValue.ca.positives_recovered) +
                             np.array(distPValue.ca.nulldist_number) == ndb + ndu).all())
            self.failUnless(distPValue.ca.positives_recovered[1] == 0)
Exemplo n.º 3
0
    def test_basic_functioning(self, ref_ds, zscore_common):
        # get a dataset with some prominent trends in it
        ds4l = datasets["uni4large"]
        # lets select for now only meaningful features
        ds_orig = ds4l[:, ds4l.a.nonbogus_features]
        n = 5  # # of datasets to generate
        Rs, dss_rotated, dss_rotated_clean, random_shifts, random_scales = [], [], [], [], []
        # now lets compose derived datasets by using some random
        # rotation(s)
        for i in xrange(n):
            R = get_random_rotation(ds_orig.nfeatures)
            Rs.append(R)
            ds_ = ds_orig.copy()
            # reusing random data from dataset itself
            random_scales += [ds_orig.samples[i, 3] * 100]
            random_shifts += [ds_orig.samples[i + 10] * 10]
            random_noise = ds4l.samples[:, ds4l.a.bogus_features[:4]]
            ds_.samples = np.dot(ds_orig.samples, R) * random_scales[-1] + random_shifts[-1]
            dss_rotated_clean.append(ds_)

            ds_ = ds_.copy()
            ds_.samples = ds_.samples + 0.1 * random_noise
            dss_rotated.append(ds_)

        ha = Hyperalignment(ref_ds=ref_ds, zscore_common=zscore_common)
        if ref_ds is None:
            ref_ds = 0  # by default should be this one
        # Lets test two scenarios -- in one with no noise -- we should get
        # close to perfect reconstruction.  If noise was added -- not so good
        for noisy, dss in ((False, dss_rotated_clean), (True, dss_rotated)):
            mappers = ha(dss)
            self.failUnlessEqual(ref_ds, ha.ca.choosen_ref_ds)
            # Map data back

            dss_clean_back = [m.forward(ds_) for m, ds_ in zip(mappers, dss_rotated_clean)]

            ds_norm = np.linalg.norm(dss[ref_ds].samples)
            nddss = []
            ds_orig_Rref = np.dot(ds_orig.samples, Rs[ref_ds]) * random_scales[ref_ds] + random_shifts[ref_ds]
            for ds_back in dss_clean_back:
                dds = ds_back.samples - ds_orig_Rref
                ndds = np.linalg.norm(dds) / ds_norm
                nddss += [ndds]
            if not noisy or cfg.getboolean("tests", "labile", default="yes"):
                self.failUnless(
                    np.all(ndds <= (1e-10, 1e-2)[int(noisy)]),
                    msg="Should have reconstructed original dataset more or"
                    "less. Got normed differences %s in %s case." % (nddss, ("clean", "noisy")[int(noisy)]),
                )

        # Lets see how well we do if asked to compute residuals
        ha = Hyperalignment(ref_ds=ref_ds, level2_niter=2, enable_ca=["residual_errors"])
        mappers = ha(dss_rotated_clean)
        self.failUnless(np.all(ha.ca.residual_errors.sa.levels == ["1", "2:0", "2:1", "3"]))
        rerrors = ha.ca.residual_errors.samples
        # just basic tests:
        self.failUnlessEqual(rerrors[0, ref_ds], 0)
        self.failUnlessEqual(rerrors.shape, (4, n))
        pass
Exemplo n.º 4
0
def ipy_activate_pymvpa_goodies():
    """Activate PyMVPA additions to IPython

    Currently known goodies (controlled via PyMVPA configuration) are:

    * completions of collections' attributes
    * disabling by default protected attributes of instances in completions
    """
    try:
        if not cfg.getboolean('ipython', 'complete protected', False):
            ipget().IP.Completer.omit__names = 2
    finally:
        pass

    if cfg.getboolean('ipython', 'complete collections attributes', True):
        from mvpa.support.ipython.ipy_pymvpa_completer \
             import activate as ipy_completer_activate
        ipy_completer_activate()
Exemplo n.º 5
0
def test_efdr():
    # generate the data
    n = 100000
    x = np.random.randn(n)
    x[:3000] += 3
    #
    # make the tests
    efdr = emp_null.ENN(x)
    np.testing.assert_array_less(efdr.fdr(3.0), 0.15)
    if cfg.getboolean('tests', 'labile', default='yes'):
        np.testing.assert_array_less(-efdr.threshold(alpha=0.05), -3)
    np.testing.assert_array_less(-efdr.uncorrected_threshold(alpha=0.001), -3)
Exemplo n.º 6
0
def test_efdr():
    # generate the data
    n = 100000
    x = np.random.randn(n)
    x[:3000] += 3
    #
    # make the tests
    efdr = emp_null.ENN(x)
    # 2.9 instead of stricter 3.0 for tolerance
    np.testing.assert_array_less(efdr.fdr(2.9), 0.15)
    if cfg.getboolean('tests', 'labile', default='yes'):
        np.testing.assert_array_less(-efdr.threshold(alpha=0.05), -3)
    np.testing.assert_array_less(-efdr.uncorrected_threshold(alpha=0.001), -3)
Exemplo n.º 7
0
    result = kernel.compute(data)

# In the following we draw some 2D functions at random from the
# distribution N(O,kernel) defined by each available kernel and
# plot them. These plots shows the flexibility of a given kernel
# (with default parameters) when doing interpolation. The choice
# of a kernel defines a prior probability over the function space
# used for regression/classfication with GPR/GPC.
count = 1
for k in kernel_dictionary.keys():
    pl.subplot(3, 4, count)
    # X = np.random.rand(size)*12.0-6.0
    # X.sort()
    X = np.arange(-1, 1, 0.02)
    X = X[:, np.newaxis]
    ker = kernel_dictionary[k]()
    ker.compute(X, X)
    print k
    K = np.asarray(ker)
    for i in range(10):
        f = np.random.multivariate_normal(np.zeros(X.shape[0]), K)
        pl.plot(X[:, 0], f, "b-")

    pl.title(k)
    pl.axis("tight")
    count += 1

if cfg.getboolean("examples", "interactive", True):
    # show all the cool figures
    pl.show()
Exemplo n.º 8
0
"""

pl.subplot(2, 2, fig)
pl.title('SVM-Sensitivity Profiles')
lines = plot_err_line(xy_sens[..., 0], linestyle='-', fmt='ko', errtype='std')
lines[0][0].set_label('X')
lines = plot_err_line(xy_sens[..., 1], linestyle='-', fmt='go', errtype='std')
lines[0][0].set_label('Y')
pl.legend()
pl.ylim((-0.1, 0.1))
pl.xlim(0, 100)
pl.axhline(y=0, color='0.6', ls='--')
pl.xlabel('Timepoints')

from mvpa.base import cfg
if cfg.getboolean('examples', 'interactive', True):
    # show all the cool figures
    pl.show()
"""
The following figure is not exactly identical to the product of this code, but
rather shows the result of a few minutes of beautifications in Inkscape_.

.. _Inkscape: http://www.inkscape.org/

.. figure:: ../pics/ex_eyemovements.*
   :align: center

   Gaze track for viewing upright vs. inverted faces. The figure shows the mean
   gaze path for both conditions overlayed on an example face. The panels to
   the left and below show the X and Y coordinates over the trial timecourse
   (shaded aread corresponds to one standard deviation across all trials above
Exemplo n.º 9
0
"""

pl.subplot(2, 2, fig)
pl.title('SVM-Sensitivity Profiles')
lines = plot_err_line(xy_sens[..., 0], linestyle='-', fmt='ko', errtype='std')
lines[0][0].set_label('X')
lines = plot_err_line(xy_sens[..., 1], linestyle='-', fmt='go', errtype='std')
lines[0][0].set_label('Y')
pl.legend()
pl.ylim((-0.1, 0.1))
pl.xlim(0,100)
pl.axhline(y=0, color='0.6', ls='--')
pl.xlabel('Timepoints')

from mvpa.base import cfg
if cfg.getboolean('examples', 'interactive', True):
    # show all the cool figures
    pl.show()

"""
The following figure is not exactly identical to the product of this code, but
rather shows the result of a few minutes of beautifications in Inkscape_.

.. _Inkscape: http://www.inkscape.org/

.. figure:: ../pics/ex_eyemovements.*
   :align: center

   Gaze track for viewing upright vs. inverted faces. The figure shows the mean
   gaze path for both conditions overlayed on an example face. The panels to
   the left and below show the X and Y coordinates over the trial timecourse
Exemplo n.º 10
0
    def test_basic_functioning(self, ref_ds, zscore_common):
        # get a dataset with some prominent trends in it
        ds4l = datasets['uni4large']
        # lets select for now only meaningful features
        ds_orig = ds4l[:, ds4l.a.nonbogus_features]
        nf = ds_orig.nfeatures
        n = 5 # # of datasets to generate
        Rs, dss_rotated, dss_rotated_clean, random_shifts, random_scales \
            = [], [], [], [], []
        # now lets compose derived datasets by using some random
        # rotation(s)
        for i in xrange(n):
            R = get_random_rotation(ds_orig.nfeatures)
            Rs.append(R)
            ds_ = ds_orig.copy()
            # reusing random data from dataset itself
            random_scales += [ds_orig.samples[i, 3] * 100]
            random_shifts += [ds_orig.samples[i+10] * 10]
            random_noise = ds4l.samples[:, ds4l.a.bogus_features[:4]]
            ds_.samples = np.dot(ds_orig.samples, R) * random_scales[-1] \
                          + random_shifts[-1]
            dss_rotated_clean.append(ds_)

            ds_ = ds_.copy()
            ds_.samples = ds_.samples + 0.1 * random_noise
            dss_rotated.append(ds_)

        ha = Hyperalignment(ref_ds=ref_ds, zscore_common=zscore_common)
        if ref_ds is None:
            ref_ds = 0                      # by default should be this one
        # Lets test two scenarios -- in one with no noise -- we should get
        # close to perfect reconstruction.  If noise was added -- not so good
        for noisy, dss in ((False, dss_rotated_clean),
                           (True, dss_rotated)):
            mappers = ha(dss)
            self.failUnlessEqual(ref_ds, ha.ca.choosen_ref_ds)
            # Map data back

            dss_clean_back = [m.forward(ds_)
                              for m, ds_ in zip(mappers, dss_rotated_clean)]

            ds_norm = np.linalg.norm(dss[ref_ds].samples)
            nddss = []
            ndcss = []
            ds_orig_Rref = np.dot(ds_orig.samples, Rs[ref_ds]) \
                           * random_scales[ref_ds] \
                           + random_shifts[ref_ds]
            for ds_back in dss_clean_back:
                # if we used zscoring of common, we cannot rely
                # that range/offset could be matched, so lets use
                # corrcoef
                ndcs = np.diag(np.corrcoef(ds_back.samples.T,
                                           ds_orig_Rref.T)[nf:, :nf], k=0)
                ndcss += [ndcs]
                dds = ds_back.samples - ds_orig_Rref
                ndds = np.linalg.norm(dds) / ds_norm
                nddss += [ndds]
            if not noisy or cfg.getboolean('tests', 'labile', default='yes'):
                # First compare correlations
                self.failUnless(np.all(np.array(ndcss)
                                       >= (0.95, 0.9)[int(noisy)]),
                        msg="Should have reconstructed original dataset more or"
                        " less. Got correlations %s in %s case."
                        % (ndcss, ('clean', 'noisy')[int(noisy)]))
                if not zscore_common:
                    # only reasonable without zscoring
                    self.failUnless(np.all(np.array(nddss)
                                           <= (1e-10, 1e-2)[int(noisy)]),
                        msg="Should have reconstructed original dataset more or"
                        " less. Got normed differences %s in %s case."
                        % (nddss, ('clean', 'noisy')[int(noisy)]))

        # Lets see how well we do if asked to compute residuals
        ha = Hyperalignment(ref_ds=ref_ds, level2_niter=2,
                            enable_ca=['residual_errors'])
        mappers = ha(dss_rotated_clean)
        self.failUnless(np.all(ha.ca.residual_errors.sa.levels ==
                              ['1', '2:0', '2:1', '3']))
        rerrors = ha.ca.residual_errors.samples
        # just basic tests:
        self.failUnlessEqual(rerrors[0, ref_ds], 0)
        self.failUnlessEqual(rerrors.shape, (4, n))
        pass
Exemplo n.º 11
0
# Also, this check enforcing population of externals.versions
# for possible later version checks, hence don't remove
externals.exists('numpy', force=True, raise_=True)
# We might need to suppress the warnings:
if externals.exists('scipy'):
    externals._suppress_scipy_warnings()
# And check if we aren't under IPython so we could pacify completion
# a bit
externals.exists('running ipython env', force=True, raise_=False)

if __debug__:
    debug('RANDOM', 'Seeding RNG with %d' % _random_seed)
    debug('INIT', 'mvpa end')

# Attach custom top-level exception handler
if cfg.getboolean('debug', 'wtf', default=False):
    import sys
    _sys_excepthook = sys.excepthook

    def _pymvpa_excepthook(*args):
        """Custom exception handler to report also pymvpa's wtf

        Calls original handler, and then collects WTF and spits it out
        """
        ret = _sys_excepthook(*args)
        sys.stdout.write("PyMVPA's WTF: collecting information... hold on...")
        sys.stdout.flush()
        wtfs = wtf()
        sys.stdout.write(
            "\rPyMVPA's WTF:                                       \n")
        sys.stdout.write(str(wtfs))
Exemplo n.º 12
0
def analyzeFile(datapath, attpath, subj):
    # (X, Y, trial id) for all timepoints

    data = np.loadtxt(datapath+subj, usecols = (2,9,10), skiprows = 21)

  
    newdata = [elem for elem in data if elem[0]!= 1] #Cancel first run
    newdata = np.asanyarray(newdata)
    newdata = [elem for elem in newdata if elem[1]!= 0] #Delete eyeblinks!
    newdata = np.asanyarray(newdata)
    
    
    attribs = np.loadtxt(attpath)
      
    raw_ds = Dataset(newdata[:,1:], sa = {'trial': newdata[:,0] - 1}, fa = {'fid': ['rawX', 'rawY']})
    
    print 'Dataset loaded...'
    

    #Variables to be setted
    
    npoints = 200;
    timeStimulus = 10 #sec
    nchunks =  6
    
    
    #Downsampling data to have a timeserie of 500 values.
    ds = fft_resample(raw_ds, num=npoints, window='hann', chunks_attr='trial', attr_strategy='sample')
    
    bm = BoxcarMapper(np.arange(len(ds.sa['trial'].unique)) * npoints, boxlength = npoints)
    bm.train(ds)
    ds=ds.get_mapped(bm)

    ds.sa.update({'type': attribs[:].astype(int)})
    
    fm = FlattenMapper()
    fm.train(ds)
    # want to make a copy to keep the original pristine for later plotting
    fds = ds.copy().get_mapped(fm)

    # simplify the trial attribute
    fds.sa['trial'] = [t[0] for t in ds.sa.trial]
   
    chunks = np.zeros(len(fds), dtype='int')
    for o in fds.sa['type'].unique:
        chunks[fds.sa.type == o] = np.arange(len(fds.sa.type == o)) % nchunks
        fds.sa['chunks'] = chunks
    
    
    clf = SVM(space='type')
    mclf = SplitClassifier(clf, space='type', enable_ca=['confusion'])
    
    cvte = CrossValidation(clf, NFoldPartitioner(cvtype = 1), enable_ca=['stats', 'repetition_results'])
    cv_results = cvte (fds)

    print cvte.ca.stats
    
    sensana = mclf.get_sensitivity_analyzer()
    sens = sensana(fds)
        
    
    xy_sens = fds.a.mapper[1].reverse(sens).samples

    # descriptive plots
    pl.figure(figsize = (12, 10))
    # original screen size was
    axes = ('x', 'y')
    screen_size = np.array((1280, 960))
    screen_center = screen_size / 2
    colors = ('r','b')
    fig = 1

    pl.subplot(2, 2, fig)
    pl.title('Mean Gaze Track')
    face_img = pl.imread('/home/robbis/development/eyeAnalysis/Face2F.bmp')
# determine the extend of the image in original screen coordinates
# to match with gaze position
    orig_img_extent=(screen_center[0] - face_img.shape[1]/2,
                 screen_center[0] + face_img.shape[1]/2,
                 screen_center[1] + face_img.shape[0]/2,
                 screen_center[1] - face_img.shape[0]/2)
# show face image and put it with original pixel coordinates
    pl.imshow(face_img,
          extent=orig_img_extent,
          cmap=pl.cm.gray, origin='Upper')
    pl.plot(np.mean(ds.samples[ds.sa.type == 1,:,0], axis=0),
        np.mean(ds.samples[ds.sa.type == 1,:,1], axis=0),
        colors[0], label='imagination')
    pl.plot(np.mean(ds.samples[ds.sa.type == 2,:,0], axis=0),
        np.mean(ds.samples[ds.sa.type == 2,:,1], axis=0),
        colors[1], label='perception')
    pl.axis(orig_img_extent)
    pl.legend()
    fig += 1

    pl.subplot(2, 2, fig)
    pl.title('Gaze Position X-Coordinate')
    plot_erp(ds.samples[ds.sa.type == 1,:,1], pre=0, errtype = 'std',
         color=colors[0], SR=npoints/timeStimulus)
    plot_erp(ds.samples[ds.sa.type == 2,:,1], pre=0, errtype = 'std',
         color=colors[1], SR=npoints/timeStimulus)
    pl.ylim(orig_img_extent[2:])
    pl.xlabel('Peristimulus Time')
    fig += 1

    pl.subplot(2, 2, fig)
    pl.title('Gaze Position Y-Coordinate')
    plot_erp(ds.samples[ds.sa.type == 1,:,0], pre=0, errtype = 'std',
         color=colors[0], SR=npoints/timeStimulus)
    plot_erp(ds.samples[ds.sa.type == 2,:,0], pre=0, errtype = 'std',
         color=colors[1], SR=npoints/timeStimulus)
    pl.ylim(orig_img_extent[:2])
    pl.xlabel('Peristimulus Time')
    fig += 1
    
    pl.subplot(2, 2, fig)
    pl.title('SVM-Sensitivity Profiles')
    lines = plot_err_line(xy_sens[..., 0], linestyle='-', fmt='ko', errtype='std')
    lines[0][0].set_label('X')
    lines = plot_err_line(xy_sens[..., 1], linestyle='-', fmt='go', errtype='std')
    lines[0][0].set_label('Y')
    pl.legend()
    pl.ylim((-0.01, 0.01))
    pl.xlim(0,100)
    pl.axhline(y=0, color='0.6', ls='--')
    pl.xlabel('Timepoints')

    from mvpa.base import cfg
    if cfg.getboolean('examples', 'interactive', True):
    # show all the cool figures
       #pl.show()
        figureName = '/home/robbis/development/eyeAnalysis/results/' + subj[:-4] + '.png';
        print 'Saving '+figureName
        pl.savefig(figureName)

    return figureName+'.png'
Exemplo n.º 13
0
# Also, this check enforcing population of externals.versions
# for possible later version checks, hence don't remove
externals.exists('numpy', force=True, raise_=True)
# We might need to suppress the warnings:
if externals.exists('scipy'):
    externals._suppress_scipy_warnings()
# And check if we aren't under IPython so we could pacify completion
# a bit
externals.exists('running ipython env', force=True, raise_=False)

if __debug__:
    debug('RANDOM', 'Seeding RNG with %d' % _random_seed)
    debug('INIT', 'mvpa end')

# Attach custom top-level exception handler
if cfg.getboolean('debug', 'wtf', default=False):
    import sys
    _sys_excepthook = sys.excepthook
    def _pymvpa_excepthook(*args):
        """Custom exception handler to report also pymvpa's wtf

        Calls original handler, and then collects WTF and spits it out
        """
        ret = _sys_excepthook(*args)
        sys.stdout.write("PyMVPA's WTF: collecting information... hold on...")
        sys.stdout.flush()
        wtfs = wtf()
        sys.stdout.write("\rPyMVPA's WTF:                                       \n")
        sys.stdout.write(str(wtfs))
        return ret
    sys.excepthook = _pymvpa_excepthook
Exemplo n.º 14
0
    def test_basic_functioning(self, ref_ds, zscore_common):
        # get a dataset with some prominent trends in it
        ds4l = datasets['uni4large']
        # lets select for now only meaningful features
        ds_orig = ds4l[:, ds4l.a.nonbogus_features]
        nf = ds_orig.nfeatures
        n = 5  # # of datasets to generate
        Rs, dss_rotated, dss_rotated_clean, random_shifts, random_scales \
            = [], [], [], [], []
        # now lets compose derived datasets by using some random
        # rotation(s)
        for i in xrange(n):
            R = get_random_rotation(ds_orig.nfeatures)
            Rs.append(R)
            ds_ = ds_orig.copy()
            # reusing random data from dataset itself
            random_scales += [ds_orig.samples[i, 3] * 100]
            random_shifts += [ds_orig.samples[i + 10] * 10]
            random_noise = ds4l.samples[:, ds4l.a.bogus_features[:4]]
            ds_.samples = np.dot(ds_orig.samples, R) * random_scales[-1] \
                          + random_shifts[-1]
            dss_rotated_clean.append(ds_)

            ds_ = ds_.copy()
            ds_.samples = ds_.samples + 0.1 * random_noise
            dss_rotated.append(ds_)

        ha = Hyperalignment(ref_ds=ref_ds, zscore_common=zscore_common)
        if ref_ds is None:
            ref_ds = 0  # by default should be this one
        # Lets test two scenarios -- in one with no noise -- we should get
        # close to perfect reconstruction.  If noise was added -- not so good
        for noisy, dss in ((False, dss_rotated_clean), (True, dss_rotated)):
            mappers = ha(dss)
            self.failUnlessEqual(ref_ds, ha.ca.choosen_ref_ds)
            # Map data back

            dss_clean_back = [
                m.forward(ds_) for m, ds_ in zip(mappers, dss_rotated_clean)
            ]

            ds_norm = np.linalg.norm(dss[ref_ds].samples)
            nddss = []
            ndcss = []
            ds_orig_Rref = np.dot(ds_orig.samples, Rs[ref_ds]) \
                           * random_scales[ref_ds] \
                           + random_shifts[ref_ds]
            for ds_back in dss_clean_back:
                # if we used zscoring of common, we cannot rely
                # that range/offset could be matched, so lets use
                # corrcoef
                ndcs = np.diag(np.corrcoef(ds_back.samples.T,
                                           ds_orig_Rref.T)[nf:, :nf],
                               k=0)
                ndcss += [ndcs]
                dds = ds_back.samples - ds_orig_Rref
                ndds = np.linalg.norm(dds) / ds_norm
                nddss += [ndds]
            if not noisy or cfg.getboolean('tests', 'labile', default='yes'):
                # First compare correlations
                self.failUnless(
                    np.all(np.array(ndcss) >= (0.95, 0.9)[int(noisy)]),
                    msg="Should have reconstructed original dataset more or"
                    " less. Got correlations %s in %s case." %
                    (ndcss, ('clean', 'noisy')[int(noisy)]))
                if not zscore_common:
                    # only reasonable without zscoring
                    self.failUnless(
                        np.all(np.array(nddss) <= (1e-10, 1e-2)[int(noisy)]),
                        msg="Should have reconstructed original dataset more or"
                        " less. Got normed differences %s in %s case." %
                        (nddss, ('clean', 'noisy')[int(noisy)]))

        # Lets see how well we do if asked to compute residuals
        ha = Hyperalignment(ref_ds=ref_ds,
                            level2_niter=2,
                            enable_ca=['residual_errors'])
        mappers = ha(dss_rotated_clean)
        self.failUnless(
            np.all(
                ha.ca.residual_errors.sa.levels == ['1', '2:0', '2:1', '3']))
        rerrors = ha.ca.residual_errors.samples
        # just basic tests:
        self.failUnlessEqual(rerrors[0, ref_ds], 0)
        self.failUnlessEqual(rerrors.shape, (4, n))
        pass