コード例 #1
0
def some_svms():
    """Returns a couple of FeatureSelectionClassifiers
    based on SVMs with different numbers of features and/or
    sensitivity measure"""
    clfr1 = FeatureSelectionClassifier(SVM(descr="libsvm.LinSVM(C=def)",
                                           probability=1),
                                       SensitivityBasedFeatureSelection(
                                           OneWayAnova(),
                                           FixedNElementTailSelector(
                                               500,
                                               mode='select',
                                               tail='upper')),
                                       descr="LinSVM on 500 (ANOVA)")
    clfr2 = FeatureSelectionClassifier(
        SVM(descr="libsvm.LinSVM(C=def)", probability=1),
        SensitivityBasedFeatureSelection(
            SVM().getSensitivityAnalyzer(transformer=Absolute),
            FixedNElementTailSelector(500, mode='select', tail='upper')),
        descr="LinSVM on 500 (SVM)")
    clfr3 = SVM()
    clfr4 = FeatureSelectionClassifier(
        SVM(descr="libsvm.LinSVM(C=def)", probability=1),
        SensitivityBasedFeatureSelection(
            SVM().getSensitivityAnalyzer(transformer=Absolute),
            FractionTailSelector(0.05, mode='select', tail='upper'),
        ),
        descr="LinSVM on 5 % (SVM)")
    return [clfr1, clfr2, clfr3, clfr3]
コード例 #2
0
ファイル: test_clf.py プロジェクト: heqing-psychology/PyMVPA
    def test_feature_selection_classifier(self):
        from mvpa.featsel.base import \
             SensitivityBasedFeatureSelection
        from mvpa.featsel.helpers import \
             FixedNElementTailSelector

        # should give lowest weight to the feature with lowest index
        sens_ana = SillySensitivityAnalyzer()
        # should give lowest weight to the feature with highest index
        sens_ana_rev = SillySensitivityAnalyzer(mult=-1)

        # corresponding feature selections
        feat_sel = SensitivityBasedFeatureSelection(
            sens_ana, FixedNElementTailSelector(1, mode='discard'))

        feat_sel_rev = SensitivityBasedFeatureSelection(
            sens_ana_rev, FixedNElementTailSelector(1))

        samples = np.array([[0, 0, -1], [1, 0, 1], [-1, -1, 1], [-1, 0, 1],
                            [1, -1, 1]])

        testdata3 = dataset_wizard(samples=samples, targets=1)
        # dummy train data so proper mapper gets created
        traindata = dataset_wizard(samples=np.array([[0, 0, -1], [1, 0, 1]]),
                                   targets=[1, 2])

        # targets
        res110 = [1, 1, 1, -1, -1]
        res011 = [-1, 1, -1, 1, -1]

        # first classifier -- 0th feature should be discarded
        clf011 = FeatureSelectionClassifier(self.clf_sign,
                                            feat_sel,
                                            enable_ca=['feature_ids'])

        self.clf_sign.ca.change_temporarily(enable_ca=['estimates'])
        clf011.train(traindata)

        self.failUnlessEqual(clf011.predict(testdata3.samples), res011)
        # just silly test if we get values assigned in the 'ProxyClassifier'
        self.failUnless(len(clf011.ca.estimates) == len(res110),
                        msg="We need to pass values into ProxyClassifier")
        self.clf_sign.ca.reset_changed_temporarily()

        self.failUnlessEqual(len(clf011.ca.feature_ids), 2)
        "Feature selection classifier had to be trained on 2 features"

        # first classifier -- last feature should be discarded
        clf011 = FeatureSelectionClassifier(self.clf_sign, feat_sel_rev)
        clf011.train(traindata)
        self.failUnlessEqual(clf011.predict(testdata3.samples), res110)
コード例 #3
0
ファイル: test_clf.py プロジェクト: heqing-psychology/PyMVPA
    def test_feature_selection_classifier_with_regression(self):
        from mvpa.featsel.base import \
             SensitivityBasedFeatureSelection
        from mvpa.featsel.helpers import \
             FixedNElementTailSelector
        if sample_clf_reg is None:
            # none regression was found, so nothing to test
            return
        # should give lowest weight to the feature with lowest index
        sens_ana = SillySensitivityAnalyzer()

        # corresponding feature selections
        feat_sel = SensitivityBasedFeatureSelection(
            sens_ana, FixedNElementTailSelector(1, mode='discard'))

        # now test with regression-based classifier. The problem is
        # that it is determining predictions twice from values and
        # then setting the values from the results, which the second
        # time is set to predictions.  The final outcome is that the
        # values are actually predictions...
        dat = dataset_wizard(samples=np.random.randn(4, 10),
                             targets=[-1, -1, 1, 1])
        clf_reg = FeatureSelectionClassifier(sample_clf_reg, feat_sel)
        clf_reg.train(dat)
        _ = clf_reg.predict(dat.samples)
        self.failIf(
            (np.array(clf_reg.ca.estimates) -
             clf_reg.ca.predictions).sum() == 0,
            msg="Values were set to the predictions in %s." % sample_clf_reg)
コード例 #4
0
ファイル: ifs.py プロジェクト: heqing-psychology/PyMVPA
    def __init__(self,
                 data_measure,
                 transfer_error,
                 bestdetector=BestDetector(),
                 stopping_criterion=NBackHistoryStopCrit(BestDetector()),
                 feature_selector=FixedNElementTailSelector(1,
                                                            tail='upper',
                                                            mode='select'),
                 **kwargs):
        """Initialize incremental feature search

        Parameters
        ----------
        data_measure : DatasetMeasure
          Computed for each candidate feature selection. The measure has
          to compute a scalar value.
        transfer_error : TransferError
          Compute against a test dataset for each incremental feature
          set.
        bestdetector : Functor
          Given a list of error values it has to return a boolean that
          signals whether the latest error value is the total minimum.
        stopping_criterion : Functor
          Given a list of error values it has to return whether the
          criterion is fulfilled.
        """
        # bases init first
        FeatureSelection.__init__(self, **kwargs)

        self.__data_measure = data_measure
        self.__transfer_error = transfer_error
        self.__feature_selector = feature_selector
        self.__bestdetector = bestdetector
        self.__stopping_criterion = stopping_criterion
コード例 #5
0
    def test_rfe(self, clf):

        # sensitivity analyser and transfer error quantifier use the SAME clf!
        sens_ana = clf.get_sensitivity_analyzer(postproc=maxofabs_sample())
        trans_error = TransferError(clf)
        # because the clf is already trained when computing the sensitivity
        # map, prevent retraining for transfer error calculation
        # Use absolute of the svm weights as sensitivity
        rfe = RFE(sens_ana,
                  trans_error,
                  feature_selector=FixedNElementTailSelector(1),
                  train_clf=False)

        wdata = self.get_data()
        wdata_nfeatures = wdata.nfeatures
        tdata = self.get_data_t()
        tdata_nfeatures = tdata.nfeatures

        sdata, stdata = rfe(wdata, tdata)

        # fail if orig datasets are changed
        self.failUnless(wdata.nfeatures == wdata_nfeatures)
        self.failUnless(tdata.nfeatures == tdata_nfeatures)

        # check that the features set with the least error is selected
        if len(rfe.ca.errors):
            e = np.array(rfe.ca.errors)
            self.failUnless(sdata.nfeatures == wdata_nfeatures - e.argmin())
        else:
            self.failUnless(sdata.nfeatures == wdata_nfeatures)

        # silly check if nfeatures is in decreasing order
        nfeatures = np.array(rfe.ca.nfeatures).copy()
        nfeatures.sort()
        self.failUnless( (nfeatures[::-1] == rfe.ca.nfeatures).all() )

        # check if history has elements for every step
        self.failUnless(set(rfe.ca.history)
                        == set(range(len(np.array(rfe.ca.errors)))))

        # Last (the largest number) can be present multiple times even
        # if we remove 1 feature at a time -- just need to stop well
        # in advance when we have more than 1 feature left ;)
        self.failUnless(rfe.ca.nfeatures[-1]
                        == len(np.where(rfe.ca.history
                                       ==max(rfe.ca.history))[0]))
コード例 #6
0
    def test_sensitivity_based_feature_selection(self, clf):

        # sensitivity analyser and transfer error quantifier use the SAME clf!
        sens_ana = clf.get_sensitivity_analyzer(postproc=maxofabs_sample())

        # of features to remove
        Nremove = 2

        # because the clf is already trained when computing the sensitivity
        # map, prevent retraining for transfer error calculation
        # Use absolute of the svm weights as sensitivity
        fe = SensitivityBasedFeatureSelection(sens_ana,
                feature_selector=FixedNElementTailSelector(2),
                enable_ca=["sensitivity", "selected_ids"])

        wdata = self.get_data()
        tdata = self.get_data_t()
        # XXX for now convert to numeric labels, but should better be taken
        # care of during clf refactoring
        am = AttributeMap()
        wdata.targets = am.to_numeric(wdata.targets)
        tdata.targets = am.to_numeric(tdata.targets)

        wdata_nfeatures = wdata.nfeatures
        tdata_nfeatures = tdata.nfeatures

        sdata, stdata = fe(wdata, tdata)

        # fail if orig datasets are changed
        self.failUnless(wdata.nfeatures == wdata_nfeatures)
        self.failUnless(tdata.nfeatures == tdata_nfeatures)

        # silly check if nfeatures got a single one removed
        self.failUnlessEqual(wdata.nfeatures, sdata.nfeatures+Nremove,
            msg="We had to remove just a single feature")

        self.failUnlessEqual(tdata.nfeatures, stdata.nfeatures+Nremove,
            msg="We had to remove just a single feature in testing as well")

        self.failUnlessEqual(fe.ca.sensitivity.nfeatures, wdata_nfeatures,
            msg="Sensitivity have to have # of features equal to original")

        self.failUnlessEqual(len(fe.ca.selected_ids), sdata.nfeatures,
            msg="# of selected features must be equal the one in the result dataset")
コード例 #7
0
    def test_feature_selection_pipeline(self):
        sens_ana = SillySensitivityAnalyzer()

        wdata = self.get_data()
        wdata_nfeatures = wdata.nfeatures
        tdata = self.get_data_t()
        tdata_nfeatures = tdata.nfeatures

        # test silly one first ;-)
        self.failUnlessEqual(sens_ana(wdata).samples[0,0], -int(wdata_nfeatures/2))

        # OLD: first remove 25% == 6, and then 4, total removing 10
        # NOW: test should be independent of the numerical number of features
        feature_selections = [SensitivityBasedFeatureSelection(
                                sens_ana,
                                FractionTailSelector(0.25)),
                              SensitivityBasedFeatureSelection(
                                sens_ana,
                                FixedNElementTailSelector(4))
                              ]

        # create a FeatureSelection pipeline
        feat_sel_pipeline = FeatureSelectionPipeline(
            feature_selections=feature_selections,
            enable_ca=['nfeatures', 'selected_ids'])

        sdata, stdata = feat_sel_pipeline(wdata, tdata)

        self.failUnlessEqual(len(feat_sel_pipeline.feature_selections),
                             len(feature_selections),
                             msg="Test the property feature_selections")

        desired_nfeatures = int(np.ceil(wdata_nfeatures*0.75))
        self.failUnlessEqual(feat_sel_pipeline.ca.nfeatures,
                             [wdata_nfeatures, desired_nfeatures],
                             msg="Test if nfeatures get assigned properly."
                             " Got %s!=%s" % (feat_sel_pipeline.ca.nfeatures,
                                              [wdata_nfeatures, desired_nfeatures]))

        self.failUnlessEqual(list(feat_sel_pipeline.ca.selected_ids),
                             range(int(wdata_nfeatures*0.25)+4, wdata_nfeatures))
コード例 #8
0
    def __test_fspipeline_with_split_classifier(self, basic_clf):
        #basic_clf = LinearNuSVMC()
        multi_clf = MulticlassClassifier(clf=basic_clf)
        #svm_weigths = LinearSVMWeights(svm)

        # Proper RFE: aggregate sensitivities across multiple splits,
        # but also due to multi class those need to be aggregated
        # somehow. Transfer error here should be 'leave-1-out' error
        # of split classifier itself
        sclf = SplitClassifier(clf=basic_clf)
        rfe = RFE(sensitivity_analyzer=sclf.get_sensitivity_analyzer(
            enable_ca=["sensitivities"]),
                  transfer_error=trans_error,
                  feature_selector=FeatureSelectionPipeline([
                      FractionTailSelector(0.5),
                      FixedNElementTailSelector(1)
                  ]),
                  train_clf=True)

        # and we get sensitivity analyzer which works on splits and uses
        # sensitivity
        selected_features = rfe(self.dataset)
コード例 #9
0
    def test_ifs(self, svm):

        # data measure and transfer error quantifier use the SAME clf!
        trans_error = TransferError(svm)
        data_measure = CrossValidatedTransferError(trans_error,
                                                   NFoldSplitter(1),
                                                   postproc=mean_sample())

        ifs = IFS(data_measure,
                  trans_error,
                  feature_selector=\
                    # go for lower tail selection as data_measure will return
                    # errors -> low is good


                    FixedNElementTailSelector(1, tail='lower', mode='select'),
                  )
        wdata = self.get_data()
        wdata_nfeatures = wdata.nfeatures
        tdata = self.get_data()
        tdata_nfeatures = tdata.nfeatures

        sdata, stdata = ifs(wdata, tdata)

        # fail if orig datasets are changed
        self.failUnless(wdata.nfeatures == wdata_nfeatures)
        self.failUnless(tdata.nfeatures == tdata_nfeatures)

        # check that the features set with the least error is selected
        self.failUnless(len(ifs.ca.errors))
        e = np.array(ifs.ca.errors)
        self.failUnless(sdata.nfeatures == e.argmin() + 1)

        # repeat with dataset where selection order is known
        signal = datasets['dumb2']
        sdata, stdata = ifs(signal, signal)
        self.failUnless((sdata.samples[:, 0] == signal.samples[:, 0]).all())
コード例 #10
0
    def test_feature_selector(self):
        """Test feature selector"""
        # remove 10% weekest
        selector = FractionTailSelector(0.1)
        data = np.array([3.5, 10, 7, 5, -0.4, 0, 0, 2, 10, 9])
        # == rank [4, 5, 6, 7, 0, 3, 2, 9, 1, 8]
        target10 = np.array([0, 1, 2, 3, 5, 6, 7, 8, 9])
        target30 = np.array([0, 1, 2, 3, 7, 8, 9])

        self.failUnlessRaises(UnknownStateError,
                              selector.ca.__getattribute__, 'ndiscarded')
        self.failUnless((selector(data) == target10).all())
        selector.felements = 0.30      # discard 30%
        self.failUnless(selector.felements == 0.3)
        self.failUnless((selector(data) == target30).all())
        self.failUnless(selector.ca.ndiscarded == 3) # se 3 were discarded

        selector = FixedNElementTailSelector(1)
        #                   0   1   2  3   4    5  6  7  8   9
        data = np.array([3.5, 10, 7, 5, -0.4, 0, 0, 2, 10, 9])
        self.failUnless((selector(data) == target10).all())

        selector.nelements = 3
        self.failUnless(selector.nelements == 3)
        self.failUnless((selector(data) == target30).all())
        self.failUnless(selector.ca.ndiscarded == 3)

        # test range selector
        # simple range 'above'
        self.failUnless((RangeElementSelector(lower=0)(data) == \
                         np.array([0,1,2,3,7,8,9])).all())

        self.failUnless((RangeElementSelector(lower=0,
                                              inclusive=True)(data) == \
                         np.array([0,1,2,3,5,6,7,8,9])).all())

        self.failUnless((RangeElementSelector(lower=0, mode='discard',
                                              inclusive=True)(data) == \
                         np.array([4])).all())

        # simple range 'below'
        self.failUnless((RangeElementSelector(upper=2)(data) == \
                         np.array([4,5,6])).all())

        self.failUnless((RangeElementSelector(upper=2,
                                              inclusive=True)(data) == \
                         np.array([4,5,6,7])).all())

        self.failUnless((RangeElementSelector(upper=2, mode='discard',
                                              inclusive=True)(data) == \
                         np.array([0,1,2,3,8,9])).all())


        # ranges
        self.failUnless((RangeElementSelector(lower=2, upper=9)(data) == \
                         np.array([0,2,3])).all())

        self.failUnless((RangeElementSelector(lower=2, upper=9,
                                              inclusive=True)(data) == \
                         np.array([0,2,3,7,9])).all())

        self.failUnless((RangeElementSelector(upper=2, lower=9, mode='discard',
                                              inclusive=True)(data) ==
                         RangeElementSelector(lower=2, upper=9,
                                              inclusive=False)(data)).all())

        # non-0 elements -- should be equivalent to np.nonzero()[0]
        self.failUnless((RangeElementSelector()(data) == \
                         np.nonzero(data)[0]).all())
コード例 #11
0
ファイル: warehouse.py プロジェクト: heqing-psychology/PyMVPA
        descr="kNN on SMLR(lm=1) non-0")

clfswh += \
    FeatureSelectionClassifier(
        kNN(),
        SensitivityBasedFeatureSelection(
           OneWayAnova(),
           FractionTailSelector(0.05, mode='select', tail='upper')),
        descr="kNN on 5%(ANOVA)")

clfswh += \
    FeatureSelectionClassifier(
        kNN(),
        SensitivityBasedFeatureSelection(
           OneWayAnova(),
           FixedNElementTailSelector(50, mode='select', tail='upper')),
        descr="kNN on 50(ANOVA)")

# GNB
clfswh += GNB(descr="GNB()")
clfswh += GNB(common_variance=True, descr="GNB(common_variance=True)")
clfswh += GNB(prior='uniform', descr="GNB(prior='uniform')")
clfswh += \
    FeatureSelectionClassifier(
        GNB(),
        SensitivityBasedFeatureSelection(
           OneWayAnova(),
           FractionTailSelector(0.05, mode='select', tail='upper')),
        descr="GNB on 5%(ANOVA)")

# GPR
コード例 #12
0
def svms_for_CombinedClassifier():
    """For my iEEG study, I use a CombinedClassifier. The components are defined here"""
    clfrs = []
    clfrs.append(
        FeatureSelectionClassifier(
            SVM(descr="libsvm.LinSVM(C=def)", probability=1),
            SensitivityBasedFeatureSelection(
                #SVM(descr = "libsvm.LinSVM(C=def)", probability = 1).getSensitivityAnalyzer(transformer=mvpa.misc.transformers.Absolute),
                OneWayAnova(),
                FixedNElementTailSelector(500, mode='select', tail='upper')),
            descr="LinSVM on 500 (Anova)"))
    clfrs.append(
        FeatureSelectionClassifier(
            SVM(descr="libsvm.LinSVM(C=def)", probability=1),
            SensitivityBasedFeatureSelection(
                #SVM(descr = "libsvm.LinSVM(C=def)", probability = 1).getSensitivityAnalyzer(transformer=mvpa.misc.transformers.Absolute),
                OneWayAnova(),
                FixedNElementTailSelector(300, mode='select', tail='upper')),
            descr="LinSVM on 300 (Anova)"))
    clfrs.append(
        FeatureSelectionClassifier(
            SVM(descr="libsvm.LinSVM(C=def)", probability=1),
            SensitivityBasedFeatureSelection(
                #SVM(descr = "libsvm.LinSVM(C=def)", probability = 1).getSensitivityAnalyzer(transformer=mvpa.misc.transformers.Absolute),
                OneWayAnova(),
                FixedNElementTailSelector(200, mode='select', tail='upper')),
            descr="LinSVM on 200 (Anova)"))
    clfrs.append(
        FeatureSelectionClassifier(
            SVM(descr="libsvm.LinSVM(C=def)", probability=1),
            SensitivityBasedFeatureSelection(
                #SVM(descr = "libsvm.LinSVM(C=def)", probability = 1).getSensitivityAnalyzer(transformer=mvpa.misc.transformers.Absolute),
                OneWayAnova(),
                FixedNElementTailSelector(500, mode='select', tail='upper')),
            descr="LinSVM on 100 (Anova)"))
    clfrs.append(
        FeatureSelectionClassifier(
            SVM(descr="libsvm.LinSVM(C=def)", probability=1),
            SensitivityBasedFeatureSelection(
                SVM(descr="libsvm.LinSVM(C=def)",
                    probability=1).getSensitivityAnalyzer(
                        transformer=mvpa.misc.transformers.Absolute),
                #OneWayAnova(),
                FixedNElementTailSelector(500, mode='select', tail='upper')),
            descr="LinSVM on 500 (SVM)"))
    clfrs.append(
        FeatureSelectionClassifier(
            SVM(descr="libsvm.LinSVM(C=def)", probability=1),
            SensitivityBasedFeatureSelection(
                SVM(descr="libsvm.LinSVM(C=def)",
                    probability=1).getSensitivityAnalyzer(
                        transformer=mvpa.misc.transformers.Absolute),
                #OneWayAnova(),
                FixedNElementTailSelector(300, mode='select', tail='upper')),
            descr="LinSVM on 300 (SVM)"))
    clfrs.append(
        FeatureSelectionClassifier(
            SVM(descr="libsvm.LinSVM(C=def)", probability=1),
            SensitivityBasedFeatureSelection(
                SVM(descr="libsvm.LinSVM(C=def)",
                    probability=1).getSensitivityAnalyzer(
                        transformer=mvpa.misc.transformers.Absolute),
                #OneWayAnova(),
                FixedNElementTailSelector(200, mode='select', tail='upper')),
            descr="LinSVM on 200 (SVM)"))
    clfrs.append(
        FeatureSelectionClassifier(
            SVM(descr="libsvm.LinSVM(C=def)", probability=1),
            SensitivityBasedFeatureSelection(
                SVM(descr="libsvm.LinSVM(C=def)",
                    probability=1).getSensitivityAnalyzer(
                        transformer=mvpa.misc.transformers.Absolute),
                #OneWayAnova(),
                FixedNElementTailSelector(500, mode='select', tail='upper')),
            descr="LinSVM on 100 (SVM)"))
    return clfrs
コード例 #13
0
    def test_analyzer_with_split_classifier(self, clfds):
        """Test analyzers in split classifier
        """
        clf, ds = clfds  # unroll the tuple
        # We need to skip some LARSes here
        _sclf = str(clf)
        if 'LARS(' in _sclf and "type='stepwise'" in _sclf:
            # ADD KnownToFail thingie from NiPy
            return

        # To don't waste too much time testing lets limit to 3 splits
        nsplits = 3
        splitter = NFoldSplitter(count=nsplits)
        mclf = SplitClassifier(clf=clf,
                               splitter=splitter,
                               enable_ca=['training_confusion', 'confusion'])
        sana = mclf.get_sensitivity_analyzer(  # postproc=absolute_features(),
            enable_ca=["sensitivities"])

        ulabels = ds.uniquetargets
        nlabels = len(ulabels)
        # Can't rely on splitcfg since count-limit is done in __call__
        assert (nsplits == len(list(splitter(ds))))
        sens = sana(ds)

        # It should return either ...
        #  nlabels * nsplits
        req_nsamples = [nlabels * nsplits]
        if nlabels == 2:
            # A single sensitivity in case of binary
            req_nsamples += [nsplits]
        else:
            # and for pairs in case of multiclass
            req_nsamples += [(nlabels * (nlabels - 1) / 2) * nsplits]
            # and for 1-vs-1 embedded within Multiclass operating on
            # pairs (e.g. SMLR)
            req_nsamples += [req_nsamples[-1] * 2]

            # Also for regression_based -- they can do multiclass
            # but only 1 sensitivity is provided
            if 'regression_based' in clf.__tags__:
                req_nsamples += [nsplits]

        # # of features should correspond
        self.failUnlessEqual(sens.shape[1], ds.nfeatures)
        # # of samples/sensitivities should also be reasonable
        self.failUnless(sens.shape[0] in req_nsamples)

        # Check if labels are present
        self.failUnless('splits' in sens.sa)
        self.failUnless('targets' in sens.sa)
        # should be 1D -- otherwise dtype object
        self.failUnless(sens.sa.targets.ndim == 1)

        sens_ulabels = sens.sa['targets'].unique
        # Some labels might be pairs(tuples) so ndarray would be of
        # dtype object and we would need to get them all
        if sens_ulabels.dtype is np.dtype('object'):
            sens_ulabels = np.unique(
                reduce(lambda x, y: x + y, [list(x) for x in sens_ulabels]))

        assert_array_equal(sens_ulabels, ds.sa['targets'].unique)

        errors = [x.percent_correct for x in sana.clf.ca.confusion.matrices]

        # lets go through all sensitivities and see if we selected the right
        # features
        #if 'meta' in clf.__tags__ and len(sens.samples[0].nonzero()[0])<2:
        if '5%' in clf.descr \
               or (nlabels > 2 and 'regression_based' in clf.__tags__):
            # Some meta classifiers (5% of ANOVA) are too harsh ;-)
            # if we get less than 2 features with on-zero sensitivities we
            # cannot really test
            # Also -- regression based classifiers performance for multiclass
            # is expected to suck in general
            return

        if cfg.getboolean('tests', 'labile', default='yes'):
            for conf_matrix in [sana.clf.ca.training_confusion] \
                              + sana.clf.ca.confusion.matrices:
                self.failUnless(
                    conf_matrix.percent_correct>=70,
                    msg="We must have trained on each one more or " \
                    "less correctly. Got %f%% correct on %d labels" %
                    (conf_matrix.percent_correct,
                     nlabels))

        # Since  now we have per split and possibly per label -- lets just find
        # mean per each feature per label across splits
        sensm = FxMapper('samples', lambda x: np.sum(x),
                         uattrs=['targets'])(sens)
        sensgm = maxofabs_sample()(sensm)  # global max of abs of means

        assert_equal(sensgm.shape[0], 1)
        assert_equal(sensgm.shape[1], ds.nfeatures)

        selected = FixedNElementTailSelector(len(ds.a.bogus_features))(
            sensgm.samples[0])

        if cfg.getboolean('tests', 'labile', default='yes'):

            self.failUnlessEqual(
                set(selected),
                set(ds.a.nonbogus_features),
                msg="At the end we should have selected the right features. "
                "Chose %s whenever nonbogus are %s" %
                (selected, ds.a.nonbogus_features))

            # Now test each one per label
            # TODO: collect all failures and spit them out at once --
            #       that would make it easy to see if the sensitivity
            #       just has incorrect order of labels assigned
            for sens1 in sensm:
                labels1 = sens1.targets  # labels (1) for this sensitivity
                lndim = labels1.ndim
                label = labels1[0]  # current label

                # XXX whole lndim comparison should be gone after
                #     things get fixed and we arrive here with a tuple!
                if lndim == 1:  # just a single label
                    self.failUnless(label in ulabels)

                    ilabel_all = np.where(ds.fa.targets == label)[0]
                    # should have just 1 feature for the label
                    self.failUnlessEqual(len(ilabel_all), 1)
                    ilabel = ilabel_all[0]

                    maxsensi = np.argmax(sens1)  # index of max sensitivity
                    self.failUnlessEqual(
                        maxsensi, ilabel,
                        "Maximal sensitivity for %s was found in %i whenever"
                        " original feature was %i for nonbogus features %s" %
                        (labels1, maxsensi, ilabel, ds.a.nonbogus_features))
                elif lndim == 2 and labels1.shape[1] == 2:  # pair of labels
                    # we should have highest (in abs) coefficients in
                    # those two labels
                    maxsensi2 = np.argsort(np.abs(sens1))[0][-2:]
                    ilabel2 = [
                        np.where(ds.fa.targets == l)[0][0] for l in label
                    ]
                    self.failUnlessEqual(
                        set(maxsensi2), set(ilabel2),
                        "Maximal sensitivity for %s was found in %s whenever"
                        " original features were %s for nonbogus features %s" %
                        (labels1, maxsensi2, ilabel2, ds.a.nonbogus_features))
                    """
                    # Now test for the sign of each one in pair ;) in
                    # all binary problems L1 (-1) -> L2(+1), then
                    # weights for L2 should be positive.  to test for
                    # L1 -- invert the sign
                    # We already know (if we haven't failed in previous test),
                    # that those 2 were the strongest -- so check only signs
                    """
                    self.failUnless(
                        sens1.samples[0, ilabel2[0]] < 0,
                        "With %i classes in pair %s got feature %i for %r >= 0"
                        % (nlabels, label, ilabel2[0], label[0]))
                    self.failUnless(
                        sens1.samples[0, ilabel2[1]] > 0,
                        "With %i classes in pair %s got feature %i for %r <= 0"
                        % (nlabels, label, ilabel2[1], label[1]))
                else:
                    # yoh could be wrong at this assumption... time will show
                    self.fail("Got unknown number labels per sensitivity: %s."
                              " Should be either a single label or a pair" %
                              labels1)