Пример #1
0
def test_attrmap_conflicts():
    am_n = AttributeMap({'a':1, 'b':2, 'c':1})
    am_t = AttributeMap({'a':1, 'b':2, 'c':1}, collisions_resolution='tuple')
    am_l = AttributeMap({'a':1, 'b':2, 'c':1}, collisions_resolution='lucky')
    q_f = ['a', 'b', 'a', 'c']
    # should have no effect on forward mapping
    ok_(np.all(am_n.to_numeric(q_f) == am_t.to_numeric(q_f)))
    ok_(np.all(am_t.to_numeric(q_f) == am_l.to_numeric(q_f)))

    assert_raises(ValueError, am_n.to_literal, [2])
    r_t = am_t.to_literal([2, 1])
    r_l = am_l.to_literal([2, 1])
Пример #2
0
    def test_sensitivity_based_feature_selection(self, clf):

        # sensitivity analyser and transfer error quantifier use the SAME clf!
        sens_ana = clf.get_sensitivity_analyzer(postproc=maxofabs_sample())

        # of features to remove
        Nremove = 2

        # because the clf is already trained when computing the sensitivity
        # map, prevent retraining for transfer error calculation
        # Use absolute of the svm weights as sensitivity
        fe = SensitivityBasedFeatureSelection(
            sens_ana, feature_selector=FixedNElementTailSelector(2), enable_ca=["sensitivity", "selected_ids"]
        )

        wdata = self.get_data()
        tdata = self.get_data_t()
        # XXX for now convert to numeric labels, but should better be taken
        # care of during clf refactoring
        am = AttributeMap()
        wdata.targets = am.to_numeric(wdata.targets)
        tdata.targets = am.to_numeric(tdata.targets)

        wdata_nfeatures = wdata.nfeatures
        tdata_nfeatures = tdata.nfeatures

        sdata, stdata = fe(wdata, tdata)

        # fail if orig datasets are changed
        self.failUnless(wdata.nfeatures == wdata_nfeatures)
        self.failUnless(tdata.nfeatures == tdata_nfeatures)

        # silly check if nfeatures got a single one removed
        self.failUnlessEqual(wdata.nfeatures, sdata.nfeatures + Nremove, msg="We had to remove just a single feature")

        self.failUnlessEqual(
            tdata.nfeatures, stdata.nfeatures + Nremove, msg="We had to remove just a single feature in testing as well"
        )

        self.failUnlessEqual(
            fe.ca.sensitivity.nfeatures, wdata_nfeatures, msg="Sensitivity have to have # of features equal to original"
        )

        self.failUnlessEqual(
            len(fe.ca.selected_ids),
            sdata.nfeatures,
            msg="# of selected features must be equal the one in the result dataset",
        )
Пример #3
0
    def test_null_dist_prob_any(self):
        """Test 'any' tail statistics estimation"""
        skip_if_no_external('scipy')

        # test 'any' mode
        from mvpa.measures.corrcoef import CorrCoef
        ds = datasets['uni2small']

        null = MCNullDist(permutations=10, tail='any')

        assert_raises(ValueError, null.fit, CorrCoef(), ds)
        # cheat and map to numeric for this test
        ds.sa.targets = AttributeMap().to_numeric(ds.targets)
        null.fit(CorrCoef(), ds)

        # 100 and -100 should both have zero probability on their respective
        # tails
        pm100 = null.p([-100, 0, 0, 0, 0, 0])
        p100 = null.p([100, 0, 0, 0, 0, 0])
        assert_array_almost_equal(pm100, p100)

        # With 10 samples isn't that easy to get reliable sampling for
        # non-parametric, so we can allow somewhat low significance
        # ;-)
        self.failUnless(pm100[0] <= 0.1)
        self.failUnless(p100[0] <= 0.1)

        self.failUnless(np.all(pm100[1:] >= 0.1))
        self.failUnless(np.all(pm100[1:] >= 0.1))
        # same test with just scalar measure/feature
        null.fit(CorrCoef(), ds[:, 0])
        p_100 = null.p(100)
        self.failUnlessAlmostEqual(null.p(-100), p_100)
        self.failUnlessAlmostEqual(p100[0], p_100)
Пример #4
0
    def __init__(self, **kwargs):
        ClassWithCollections.__init__(self, **kwargs)

        # XXX
        # the place to map literal to numerical labels (and back)
        # this needs to be in the base class, since some classifiers also
        # have this nasty 'regression' mode, and the code in this class
        # needs to deal with converting the regression output into discrete
        # labels
        # however, preferably the mapping should be kept in the respective
        # low-level implementations that need it
        self._attrmap = AttributeMap()

        self.__trainednfeatures = None
        """Stores number of features for which classifier was trained.
        If None -- it wasn't trained at all"""

        self._set_retrainable(self.params.retrainable, force=True)
Пример #5
0
    def test_sensitivity_based_feature_selection(self, clf):

        # sensitivity analyser and transfer error quantifier use the SAME clf!
        sens_ana = clf.get_sensitivity_analyzer(postproc=maxofabs_sample())

        # of features to remove
        Nremove = 2

        # because the clf is already trained when computing the sensitivity
        # map, prevent retraining for transfer error calculation
        # Use absolute of the svm weights as sensitivity
        fe = SensitivityBasedFeatureSelection(sens_ana,
                feature_selector=FixedNElementTailSelector(2),
                enable_ca=["sensitivity", "selected_ids"])

        wdata = self.get_data()
        tdata = self.get_data_t()
        # XXX for now convert to numeric labels, but should better be taken
        # care of during clf refactoring
        am = AttributeMap()
        wdata.targets = am.to_numeric(wdata.targets)
        tdata.targets = am.to_numeric(tdata.targets)

        wdata_nfeatures = wdata.nfeatures
        tdata_nfeatures = tdata.nfeatures

        sdata, stdata = fe(wdata, tdata)

        # fail if orig datasets are changed
        self.failUnless(wdata.nfeatures == wdata_nfeatures)
        self.failUnless(tdata.nfeatures == tdata_nfeatures)

        # silly check if nfeatures got a single one removed
        self.failUnlessEqual(wdata.nfeatures, sdata.nfeatures+Nremove,
            msg="We had to remove just a single feature")

        self.failUnlessEqual(tdata.nfeatures, stdata.nfeatures+Nremove,
            msg="We had to remove just a single feature in testing as well")

        self.failUnlessEqual(fe.ca.sensitivity.nfeatures, wdata_nfeatures,
            msg="Sensitivity have to have # of features equal to original")

        self.failUnlessEqual(len(fe.ca.selected_ids), sdata.nfeatures,
            msg="# of selected features must be equal the one in the result dataset")
Пример #6
0
    def _call(self, dataset):
        sens = super(self.__class__, self)._call(dataset)
        clf = self.clf
        targets_attr = clf.params.targets_attr
        if targets_attr in sens.sa:
            # if labels are present -- transform them into meaningful tuples
            # (or not if just a single beast)
            am = AttributeMap(dict([(l, -1) for l in clf.neglabels] +
                                   [(l, +1) for l in clf.poslabels]))

            # XXX here we still can get a sensitivity per each label
            # (e.g. with SMLR as the slave clf), so I guess we should
            # tune up Multiclass...Analyzer to add an additional sa
            # And here we might need to check if asobjarray call is necessary
            # and should be actually done
            #asobjarray(
            sens.sa[targets_attr] = \
                am.to_literal(sens.sa[targets_attr].value, recurse=True)
        return sens
Пример #7
0
    def _call(self, dataset):
        sens = super(self.__class__, self)._call(dataset)
        clf = self.clf
        targets_attr = clf.get_space()
        if targets_attr in sens.sa:
            # if labels are present -- transform them into meaningful tuples
            # (or not if just a single beast)
            am = AttributeMap(dict([(l, -1) for l in clf.neglabels] +
                                   [(l, +1) for l in clf.poslabels]))

            # XXX here we still can get a sensitivity per each label
            # (e.g. with SMLR as the slave clf), so I guess we should
            # tune up Multiclass...Analyzer to add an additional sa
            # And here we might need to check if asobjarray call is necessary
            # and should be actually done
            #asobjarray(
            sens.sa[targets_attr] = \
                am.to_literal(sens.sa[targets_attr].value, recurse=True)
        return sens
Пример #8
0
    def test_regressions_classifiers(self, clf):
        """Simple tests on regressions being used as classifiers
        """
        # check if we get values set correctly
        clf.ca.change_temporarily(enable_ca=['estimates'])
        self.failUnlessRaises(UnknownStateError, clf.ca['estimates']._get)
        cv = CrossValidatedTransferError(
            TransferError(clf),
            NFoldSplitter(),
            enable_ca=['confusion', 'training_confusion'])
        ds = datasets['uni2small'].copy()
        # we want numeric labels to maintain the previous behavior, especially
        # since we deal with regressions here
        ds.sa.targets = AttributeMap().to_numeric(ds.targets)
        cverror = cv(ds)

        self.failUnless(len(clf.ca.estimates) == ds[ds.chunks == 1].nsamples)
        clf.ca.reset_changed_temporarily()
Пример #9
0
    def __init__(self, **kwargs):
        ClassWithCollections.__init__(self, **kwargs)

        # XXX
        # the place to map literal to numerical labels (and back)
        # this needs to be in the base class, since some classifiers also
        # have this nasty 'regression' mode, and the code in this class
        # needs to deal with converting the regression output into discrete
        # labels
        # however, preferably the mapping should be kept in the respective
        # low-level implementations that need it
        self._attrmap = AttributeMap()

        self.__trainednfeatures = None
        """Stores number of features for which classifier was trained.
        If None -- it wasn't trained at all"""

        self._set_retrainable(self.params.retrainable, force=True)
Пример #10
0
    def test_degenerate_usage(self, clf):
        """Test how clf handles degenerate cases
        """
        # Whenever we have only 1 feature with only 0s in it
        ds1 = datasets['uni2small'][:, [0]]
        # XXX this very line breaks LARS in many other unittests --
        # very interesting effect. but screw it -- for now it will be
        # this way
        ds1.samples[:] = 0.0  # all 0s
        # For regression we need numbers
        if clf.__is_regression__:
            ds1.targets = AttributeMap().to_numeric(ds1.targets)
        #ds2 = datasets['uni2small'][[0], :]
        #ds2.samples[:] = 0.0             # all 0s

        clf.ca.change_temporarily(
            enable_ca=['estimates', 'training_confusion'])

        # Good pukes are good ;-)
        # TODO XXX add
        #  - ", ds2):" to test degenerate ds with 1 sample
        #  - ds1 but without 0s -- just 1 feature... feature selections
        #    might lead to 'surprises' due to magic in combiners etc
        for ds in (ds1, ):
            try:
                try:
                    clf.train(ds)  # should not crash or stall
                except (ValueError), e:
                    self.fail(
                        "Failed to train on degenerate data. Error was %r" % e)
                # could we still get those?
                _ = clf.summary()
                cm = clf.ca.training_confusion
                # If succeeded to train/predict (due to
                # training_confusion) without error -- results better be
                # at "chance"
                continue
                if 'ACC' in cm.stats:
                    self.failUnlessEqual(cm.stats['ACC'], 0.5)
                else:
                    self.failUnless(np.isnan(cm.stats['CCe']))
            except tuple(_degenerate_allowed_exceptions):
                pass
Пример #11
0
class SVM(_SVM):
    """Support Vector Machine Classifier(s) based on Shogun

    This is a simple base interface
    """
    __default_kernel_class__ = _default_kernel_class_
    num_threads = Parameter(1, min=1, doc='Number of threads to utilize')

    _KNOWN_PARAMS = ['epsilon']

    __tags__ = _SVM.__tags__ + ['sg', 'retrainable']

    # Some words of wisdom from shogun author:
    # XXX remove after proper comments added to implementations
    """
    If you'd like to train linear SVMs use SGD or OCAS. These are (I am
    serious) the fastest linear SVM-solvers to date. (OCAS cannot do SVMs
    with standard additive bias, but will L2 reqularize it - though it
    should not matter much in practice (although it will give slightly
    different solutions)). Note that SGD has no stopping criterion (you
    simply have to specify the number of iterations) and that OCAS has a
    different stopping condition than svmlight for example which may be more
    tight and more loose depending on the problem - I sugeest 1e-2 or 1e-3
    for epsilon.

    If you would like to train kernel SVMs use libsvm/gpdt/svmlight -
    depending on the problem one is faster than the other (hard to say when,
    I *think* when your dataset is very unbalanced chunking methods like
    svmlight/gpdt are better), for smaller problems definitely libsvm.

    If you use string kernels then gpdt/svmlight have a special 'linadd'
    speedup for this (requires sg 0.6.2 - there was some inefficiency in the
    code for python-modular before that). This is effective for big datasets
    and (I trained on 10 million strings based on this).

    And yes currently we only implemented parallel training for svmlight,
    however all SVMs can be evaluated in parallel.
    """
    _KNOWN_SENSITIVITIES = {
        'linear': LinearSVMWeights,
    }
    _KNOWN_IMPLEMENTATIONS = {}
    if externals.exists('shogun', raise_=True):
        _KNOWN_IMPLEMENTATIONS = {
            "libsvm" : (shogun.Classifier.LibSVM, ('C',),
                       ('multiclass', 'binary'),
                        "LIBSVM's C-SVM (L2 soft-margin SVM)"),
            "gmnp" : (shogun.Classifier.GMNPSVM, ('C',),
                     ('multiclass', 'binary'),
                      "Generalized Nearest Point Problem SVM"),
            # XXX should have been GPDT, shogun has it fixed since some version
            "gpbt" : (shogun.Classifier.GPBTSVM, ('C',), ('binary',),
                      "Gradient Projection Decomposition Technique for " \
                      "large-scale SVM problems"),
            "gnpp" : (shogun.Classifier.GNPPSVM, ('C',), ('binary',),
                      "Generalized Nearest Point Problem SVM"),

            ## TODO: Needs sparse features...
            # "svmlin" : (shogun.Classifier.SVMLin, ''),
            # "liblinear" : (shogun.Classifier.LibLinear, ''),
            # "subgradient" : (shogun.Classifier.SubGradientSVM, ''),
            ## good 2-class linear SVMs
            # "ocas" : (shogun.Classifier.SVMOcas, ''),
            # "sgd" : ( shogun.Classifier.SVMSGD, ''),

            # regressions
            "libsvr": (shogun.Regression.LibSVR, ('C', 'tube_epsilon',),
                      ('regression',),
                       "LIBSVM's epsilon-SVR"),
            }

    def __init__(self, **kwargs):
        """Interface class to Shogun's classifiers and regressions.

        Default implementation is 'libsvm'.
        """

        svm_impl = kwargs.get('svm_impl', 'libsvm').lower()
        kwargs['svm_impl'] = svm_impl

        # init base class
        _SVM.__init__(self, **kwargs)

        self.__svm = None
        """Holds the trained svm."""

        # Need to store original data...
        # TODO: keep 1 of them -- just __traindata or __traindataset
        # For now it is needed for computing sensitivities
        self.__traindataset = None

        # internal SG swig proxies
        self.__traindata = None
        self.__kernel = None
        self.__kernel_test = None
        self.__testdata = None

    # TODO: integrate with kernel framework
    #def __condition_kernel(self, kernel):
    ## XXX I thought that it is needed only for retrainable classifier,
    ##     but then krr gets confused, and svrlight needs it to provide
    ##     meaningful results even without 'retraining'
    #if self._svm_impl in ['svrlight', 'lightsvm']:
    #try:
    #kernel.set_precompute_matrix(True, True)
    #except Exception, e:
    ## N/A in shogun 0.9.1... TODO: RF
    #if __debug__:
    #debug('SG_', "Failed call to set_precompute_matrix for %s: %s"
    #% (self, e))

    def _train(self, dataset):
        """Train SVM
        """

        # XXX watchout
        # self.untrain()
        newkernel, newsvm = False, False
        # local bindings for faster lookup
        params = self.params
        retrainable = self.params.retrainable

        targets_sa_name = params.targets_attr  # name of targets sa
        targets_sa = dataset.sa[targets_sa_name]  # actual targets sa

        if retrainable:
            _changedData = self._changedData

        # LABELS
        ul = None
        self.__traindataset = dataset

        # OK -- we have to map labels since
        #  binary ones expect -1/+1
        #  Multiclass expect labels starting with 0, otherwise they puke
        #   when ran from ipython... yikes
        if __debug__:
            debug("SG_", "Creating labels instance")

        if self.__is_regression__:
            labels_ = np.asarray(targets_sa.value, dtype='double')
        else:
            ul = targets_sa.unique
            # ul.sort()

            if len(ul) == 2:
                # assure that we have -1/+1
                _labels_dict = {ul[0]: -1.0, ul[1]: +1.0}
            elif len(ul) < 2:
                raise FailedToTrainError, \
                      "We do not have 1-class SVM brought into SG yet"
            else:
                # can't use plain enumerate since we need them swapped
                _labels_dict = dict([(ul[i], i) for i in range(len(ul))])

            # Create SG-customized attrmap to assure -1 / +1 if necessary
            self._attrmap = AttributeMap(_labels_dict, mapnumeric=True)

            if __debug__:
                debug("SG__", "Mapping labels using dict %s" % _labels_dict)
            labels_ = self._attrmap.to_numeric(targets_sa.value).astype(float)

        labels = shogun.Features.Labels(labels_)
        _setdebug(labels, 'Labels')

        # KERNEL

        # XXX cruel fix for now... whole retraining business needs to
        # be rethought
        if retrainable:
            _changedData['kernel_params'] = _changedData.get(
                'kernel_params', False)

        if not retrainable \
               or _changedData['traindata'] or _changedData['kernel_params']:
            # If needed compute or just collect arguments for SVM and for
            # the kernel

            if retrainable and __debug__:
                if _changedData['traindata']:
                    debug(
                        "SG",
                        "Re-Creating kernel since training data has changed")

                if _changedData['kernel_params']:
                    debug(
                        "SG",
                        "Re-Creating kernel since params %s has changed" %
                        _changedData['kernel_params'])

            k = self.params.kernel
            k.compute(dataset)
            self.__kernel = kernel = k.as_raw_sg()

            newkernel = True
            self.kernel_params.reset()  # mark them as not-changed
            #_setdebug(kernel, 'Kernels')

            #self.__condition_kernel(kernel)
            if retrainable:
                if __debug__:
                    debug("SG_", "Resetting test kernel for retrainable SVM")
                self.__kernel_test = None

        # TODO -- handle _changedData['params'] correctly, ie without recreating
        # whole SVM
        Cs = None
        if not retrainable or self.__svm is None or _changedData['params']:
            # SVM
            if self.params.has_key('C'):
                Cs = self._get_cvec(dataset)

                # XXX do not jump over the head and leave it up to the user
                #     ie do not rescale automagically by the number of samples
                #if len(Cs) == 2 and not ('regression' in self.__tags__) and len(ul) == 2:
                #    # we were given two Cs
                #    if np.max(C) < 0 and np.min(C) < 0:
                #        # and both are requested to be 'scaled' TODO :
                #        # provide proper 'features' to the parameters,
                #        # so we could specify explicitely if to scale
                #        # them by the number of samples here
                #        nl = [np.sum(labels_ == _labels_dict[l]) for l in ul]
                #        ratio = np.sqrt(float(nl[1]) / nl[0])
                #        #ratio = (float(nl[1]) / nl[0])
                #        Cs[0] *= ratio
                #        Cs[1] /= ratio
                #        if __debug__:
                #            debug("SG_", "Rescaled Cs to %s to accomodate the "
                #                  "difference in number of training samples" %
                #                  Cs)

            # Choose appropriate implementation
            svm_impl_class = self.__get_implementation(ul)

            if __debug__:
                debug("SG", "Creating SVM instance of %s" % ` svm_impl_class `)

            if self._svm_impl in ['libsvr', 'svrlight']:
                # for regressions constructor a bit different
                self.__svm = svm_impl_class(Cs[0], self.params.tube_epsilon,
                                            self.__kernel, labels)
                # we need to set epsilon explicitly
                self.__svm.set_epsilon(self.params.epsilon)
            elif self._svm_impl in ['krr']:
                self.__svm = svm_impl_class(self.params.tau, self.__kernel,
                                            labels)
            else:
                self.__svm = svm_impl_class(Cs[0], self.__kernel, labels)
                self.__svm.set_epsilon(self.params.epsilon)

            # Set shrinking
            if 'shrinking' in params:
                shrinking = params.shrinking
                if __debug__:
                    debug("SG_", "Setting shrinking to %s" % shrinking)
                self.__svm.set_shrinking_enabled(shrinking)

            if Cs is not None and len(Cs) == 2:
                if __debug__:
                    debug(
                        "SG_",
                        "Since multiple Cs are provided: %s, assign them" % Cs)
                self.__svm.set_C(Cs[0], Cs[1])

            self.params.reset()  # mark them as not-changed
            newsvm = True
            _setdebug(self.__svm, 'SVM')
            # Set optimization parameters
            if self.params.has_key('tube_epsilon') and \
                   hasattr(self.__svm, 'set_tube_epsilon'):
                self.__svm.set_tube_epsilon(self.params.tube_epsilon)
            self.__svm.parallel.set_num_threads(self.params.num_threads)
        else:
            if __debug__:
                debug("SG_", "SVM instance is not re-created")
            if _changedData['targets']:  # labels were changed
                if __debug__: debug("SG__", "Assigning new labels")
                self.__svm.set_labels(labels)
            if newkernel:  # kernel was replaced
                if __debug__: debug("SG__", "Assigning new kernel")
                self.__svm.set_kernel(self.__kernel)
            assert (_changedData['params'] is False
                    )  # we should never get here

        if retrainable:
            # we must assign it only if it is retrainable
            self.ca.retrained = not newsvm or not newkernel

        # Train
        if __debug__ and 'SG' in debug.active:
            if not self.__is_regression__:
                lstr = " with labels %s" % targets_sa.unique
            else:
                lstr = ""
            debug(
                "SG", "%sTraining %s on data%s" %
                (("", "Re-")[retrainable and self.ca.retrained], self, lstr))

        self.__svm.train()

        if __debug__:
            debug("SG_", "Done training SG_SVM %s" % self)

        # Report on training
        if (__debug__ and 'SG__' in debug.active) or \
           self.ca.is_enabled('training_confusion'):
            if __debug__:
                debug("SG_", "Assessing predictions on training data")
            trained_targets = self.__svm.classify().get_labels()

        else:
            trained_targets = None

        if __debug__ and "SG__" in debug.active:
            debug(
                "SG__", "Original labels: %s, Trained labels: %s" %
                (targets_sa.value, trained_targets))

        # Assign training confusion right away here since we are ready
        # to do so.
        # XXX TODO use some other conditional attribute like 'trained_targets' and
        #     use it within base Classifier._posttrain to assign predictions
        #     instead of duplicating code here
        # XXX For now it can be done only for regressions since labels need to
        #     be remapped and that becomes even worse if we use regression
        #     as a classifier so mapping happens upstairs
        if self.__is_regression__ and self.ca.is_enabled('training_confusion'):
            self.ca.training_confusion = self.__summary_class__(
                targets=targets_sa.value, predictions=trained_targets)

    # XXX actually this is the beast which started this evil conversion
    #     so -- make use of dataset here! ;)
    @accepts_samples_as_dataset
    def _predict(self, data):
        """Predict values for the data
        """

        retrainable = self.params.retrainable

        if retrainable:
            changed_testdata = self._changedData['testdata'] or \
                               self.__kernel_test is None

        if not retrainable:
            if __debug__:
                debug(
                    "SG__",
                    "Initializing SVMs kernel of %s with training/testing samples"
                    % self)
            self.params.kernel.compute(self.__traindataset, data)
            self.__kernel_test = self.params.kernel.as_sg()._k
            # We can just reuse kernel used for training
            #self.__condition_kernel(self.__kernel)

        else:
            if changed_testdata:
                #if __debug__:
                #debug("SG__",
                #"Re-creating testing kernel of %s giving "
                #"arguments %s" %
                #(`self._kernel_type`, self.__kernel_args))
                self.params.kernel.compute(self.__traindataset, data)

                #_setdebug(kernel_test, 'Kernels')

                #_setdebug(kernel_test_custom, 'Kernels')
                self.__kernel_test = self.params.kernel.as_raw_sg()

            elif __debug__:
                debug("SG__", "Re-using testing kernel")

        assert (self.__kernel_test is not None)
        self.__svm.set_kernel(self.__kernel_test)

        if __debug__:
            debug("SG_", "Classifying testing data")

        # doesn't do any good imho although on unittests helps tiny bit... hm
        #self.__svm.init_kernel_optimization()
        values_ = self.__svm.classify()
        if values_ is None:
            raise RuntimeError, "We got empty list of values from %s" % self

        values = values_.get_labels()

        if retrainable:
            # we must assign it only if it is retrainable
            self.ca.repredicted = repredicted = not changed_testdata
            if __debug__:
                debug(
                    "SG__", "Re-assigning learing kernel. Repredicted is %s" %
                    repredicted)
            # return back original kernel
            self.__svm.set_kernel(self.__kernel)

        if __debug__:
            debug("SG__", "Got values %s" % values)

        if (self.__is_regression__):
            predictions = values
        else:
            if len(self._attrmap.keys()) == 2:
                predictions = np.sign(values)
            else:
                predictions = values

            # remap labels back adjusting their type
            # XXX YOH: This is done by topclass now (needs RF)
            #predictions = self._attrmap.to_literal(predictions)

            if __debug__:
                debug("SG__", "Tuned predictions %s" % predictions)

        # store conditional attribute
        # TODO: extract values properly for multiclass SVMs --
        #       ie 1 value per label or pairs for all 1-vs-1 classifications
        self.ca.estimates = values

        ## to avoid leaks with not yet properly fixed shogun
        if not retrainable:
            try:
                testdata.free_features()
            except:
                pass

        return predictions

    def untrain(self):
        super(SVM, self).untrain()
        # untrain/clean the kernel -- we might not allow to drag SWIG
        # instance around BUT XXX -- make it work fine with
        # CachedKernel -- we might not want to fully "untrain" in such
        # case
        self.params.kernel.cleanup()  # XXX unify naming
        if not self.params.retrainable:
            if __debug__:
                debug("SG__",
                      "Untraining %(clf)s and destroying sg's SVM",
                      msgargs={'clf': self})

            # to avoid leaks with not yet properly fixed shogun
            # XXX make it nice... now it is just stable ;-)
            if True:  # not self.__traindata is None:
                if True:
                    # try:
                    if self.__kernel is not None:
                        del self.__kernel
                        self.__kernel = None

                    if self.__kernel_test is not None:
                        del self.__kernel_test
                        self.__kernel_test = None

                    if self.__svm is not None:
                        del self.__svm
                        self.__svm = None

                    if self.__traindata is not None:
                        # Let in for easy demonstration of the memory leak in shogun
                        #for i in xrange(10):
                        #    debug("SG__", "cachesize pre free features %s" %
                        #          (self.__svm.get_kernel().get_cache_size()))
                        self.__traindata.free_features()
                        del self.__traindata
                        self.__traindata = None

                    self.__traindataset = None

                #except:
                #    pass

            if __debug__:
                debug("SG__",
                      "Done untraining %(self)s and destroying sg's SVM",
                      msgargs=locals())
        elif __debug__:
            debug("SG__",
                  "Not untraining %(self)s since it is retrainable",
                  msgargs=locals())

    def __get_implementation(self, ul):
        if self.__is_regression__ or len(ul) == 2:
            svm_impl_class = SVM._KNOWN_IMPLEMENTATIONS[self._svm_impl][0]
        else:
            if self._svm_impl == 'libsvm':
                svm_impl_class = shogun.Classifier.LibSVMMultiClass
            elif self._svm_impl == 'gmnp':
                svm_impl_class = shogun.Classifier.GMNPSVM
            else:
                raise RuntimeError, \
                      "Shogun: Implementation %s doesn't handle multiclass " \
                      "data. Got labels %s. Use some other classifier" % \
                      (self._svm_impl,
                       self.__traindataset.sa[self.params.targets_attr].unique)
            if __debug__:
                debug(
                    "SG_", "Using %s for multiclass data of %s" %
                    (svm_impl_class, self._svm_impl))

        return svm_impl_class

    svm = property(fget=lambda self: self.__svm)
    """Access to the SVM model."""

    traindataset = property(fget=lambda self: self.__traindataset)
    """Dataset which was used for training
Пример #12
0
def plot_decision_boundary_2d(dataset, clf=None,
                              targets=None, regions=None, maps=None,
                              maps_res=50, vals=[-1, 0, 1],
                              data_callback=None):
    """Plot a scatter of a classifier's decision boundary and data points

    Assumes data is 2d (no way to visualize otherwise!!)

    Parameters
    ----------
    dataset : `Dataset`
      Data points to visualize (might be the data `clf` was train on, or
      any novel data).
    clf : `Classifier`, optional
      Trained classifier
    targets : string, optional
      What samples attributes to use for targets.  If None and clf is
      provided, then `clf.params.targets_attr` is used.
    regions : string, optional
      Plot regions (polygons) around groups of samples with the same
      attribute (and target attribute) values. E.g. chunks.
    maps : string in {'targets', 'estimates'}, optional
      Either plot underlying colored maps, such as clf predictions
      within the spanned regions, or estimates from the classifier
      (might not work for some).
    maps_res : int, optional
      Number of points in each direction to evaluate.
      Points are between axis limits, which are set automatically by
      matplotlib.  Higher number will yield smoother decision lines but come
      at the cost of O^2 classifying time/memory.
    vals : array of floats, optional
      Where to draw the contour lines if maps='estimates'
    data_callback : callable, optional
      Callable object to preprocess the new data points.
      Classified points of the form samples = data_callback(xysamples).
      I.e. this can be a function to normalize them, or cache them
      before they are classified.
    """

    if False:
        ## from mvpa.misc.data_generators import *
        ## from mvpa.clfs.svm import *
        ## from mvpa.clfs.knn import *
        ## ds = dumb_feature_binary_dataset()
        dataset = normal_feature_dataset(nfeatures=2, nchunks=5,
                                         snr=10, nlabels=4, means=[ [0,1], [1,0], [1,1], [0,0] ])
        dataset.samples += dataset.sa.chunks[:, None]*0.1 # slight shifts for chunks ;)
        #dataset = normal_feature_dataset(nfeatures=2, nlabels=3, means=[ [0,1], [1,0], [1,1] ])
        #dataset = normal_feature_dataset(nfeatures=2, nlabels=2, means=[ [0,1], [1,0] ])
        #clf = LinearCSVMC(C=-1)
        clf = kNN(4)#LinearCSVMC(C=-1)
        clf.train(dataset)
        #clf = None
        #plot_decision_boundary_2d(ds, clf)
        targets = 'targets'
        regions = 'chunks'
        #maps = 'estimates'
        maps = 'targets'
        #maps = None #'targets'
        res = 50
        vals = [-1, 0, 1]
        data_callback=None
        pl.clf()

    if dataset.nfeatures != 2:
        raise ValueError('Can only plot a decision boundary in 2D')

    Pioff()
    a = pl.gca() # f.add_subplot(1,1,1)

    attrmap = None
    if clf:
        estimates_were_enabled = clf.ca.is_enabled('estimates')
        clf.ca.enable('estimates')

        if targets is None:
            targets = clf.params.targets_attr
        # Lets reuse classifiers attrmap if it is good enough
        attrmap = clf._attrmap
        predictions = clf.predict(dataset)

    targets_sa_name = targets           # bad Yarik -- will rebind targets to actual values
    targets_lit = dataset.sa[targets_sa_name].value
    utargets_lit = dataset.sa[targets_sa_name].unique

    if not (attrmap is not None
            and len(attrmap)
            and set(clf._attrmap.keys()).issuperset(utargets_lit)):
        # create our own
        attrmap = AttributeMap(mapnumeric=True)

    targets = attrmap.to_numeric(targets_lit)
    utargets = attrmap.to_numeric(utargets_lit)

    vmin = min(utargets)
    vmax = max(utargets)
    cmap = pl.cm.RdYlGn                  # argument

    # Scatter points
    if clf:
        all_hits = predictions == targets_lit
    else:
        all_hits = np.ones((len(targets),), dtype=bool)

    targets_colors = {}
    for l in utargets:
        targets_mask = targets==l
        s = dataset[targets_mask]
        targets_colors[l] = c \
            = cmap((l-vmin)/float(vmax-vmin))

        # We want to plot hits and misses with different symbols
        hits = all_hits[targets_mask]
        misses = np.logical_not(hits)
        scatter_kwargs = dict(
            c=[c], zorder=10+(l-vmin))

        if sum(hits):
            a.scatter(s.samples[hits, 0], s.samples[hits, 1], marker='o',
                      label='%s [%d]' % (attrmap.to_literal(l), sum(hits)),
                      **scatter_kwargs)
        if sum(misses):
            a.scatter(s.samples[misses, 0], s.samples[misses, 1], marker='x',
                      label='%s [%d] (miss)' % (attrmap.to_literal(l), sum(misses)),
                      edgecolor=[c], **scatter_kwargs)

    (xmin, xmax) = a.get_xlim()
    (ymin, ymax) = a.get_ylim()
    extent = (xmin, xmax, ymin, ymax)

    # Create grid to evaluate, predict it
    (x,y) = np.mgrid[xmin:xmax:np.complex(0, maps_res),
                    ymin:ymax:np.complex(0, maps_res)]
    news = np.vstack((x.ravel(), y.ravel())).T
    try:
        news = data_callback(news)
    except TypeError: # Not a callable object
        pass

    imshow_kwargs = dict(origin='lower',
            zorder=1,
            aspect='auto',
            interpolation='bilinear', alpha=0.9, cmap=cmap,
            vmin=vmin, vmax=vmax,
            extent=extent)

    if maps is not None:
        if clf is None:
            raise ValueError, \
                  "Please provide classifier for plotting maps of %s" % maps
        predictions_new = clf.predict(news)

    if maps == 'estimates':
        # Contour and show predictions
        trained_targets = attrmap.to_numeric(clf.ca.trained_targets)

        if len(trained_targets)==2:
            linestyles = []
            for v in vals:
                if v == 0:
                    linestyles.append('solid')
                else:
                    linestyles.append('dashed')
            vmin, vmax = -3, 3 # Gives a nice tonal range ;)
            map_ = 'estimates' # should actually depend on estimates
        else:
            vals = (trained_targets[:-1] + trained_targets[1:])/2.
            linestyles = ['solid'] * len(vals)
            map_ = 'targets'

        try:
            clf.ca.estimates.reshape(x.shape)
            a.imshow(map_values.T, **imshow_kwargs)
            CS = a.contour(x, y, map_values, vals, zorder=6,
                           linestyles=linestyles, extent=extent, colors='k')
        except ValueError, e:
            print "Sorry - plotting of estimates isn't full supported for %s. " \
                  "Got exception %s" % (clf, e)
Пример #13
0
        except ValueError, e:
            print "Sorry - plotting of estimates isn't full supported for %s. " \
                  "Got exception %s" % (clf, e)
    elif maps == 'targets':
        map_values = attrmap.to_numeric(predictions_new).reshape(x.shape)
        a.imshow(map_values.T, **imshow_kwargs)
        #CS = a.contour(x, y, map_values, vals, zorder=6,
        #               linestyles=linestyles, extent=extent, colors='k')

    # Plot regions belonging to the same pair of attribute given
    # (e.g. chunks) and targets attribute
    if regions:
        chunks_sa = dataset.sa[regions]
        chunks_lit = chunks_sa.value
        uchunks_lit = chunks_sa.value
        chunks_attrmap = AttributeMap(mapnumeric=True)
        chunks = chunks_attrmap.to_numeric(chunks_lit)
        uchunks = chunks_attrmap.to_numeric(uchunks_lit)

        from matplotlib.delaunay.triangulate import Triangulation
        from matplotlib.patches import Polygon
        # Lets figure out convex halls for each chunk/label pair
        for target in utargets:
            t_mask = targets == target
            for chunk in uchunks:
                tc_mask = np.logical_and(t_mask,
                                        chunk == chunks)
                tc_samples = dataset.samples[tc_mask]
                tr = Triangulation(tc_samples[:, 0],
                                   tc_samples[:, 1])
                poly = pl.fill(tc_samples[tr.hull, 0],
Пример #14
0
def to_lightsvm_format(dataset, out, targets_attr='targets',
                       domain=None, am=None):
    """Export dataset into LightSVM format

    Parameters
    ----------
    dataset : Dataset
    out
      Anything understanding .write(string), such as `File`
    targets_attr : string, optional
      Name of the samples attribute to be output
    domain : {None, 'regression', 'binary', 'multiclass'}, optional
      What domain dataset belongs to.  If `None`, it would be deduced
      depending on the datatype ('regression' if float, classification
      in case of int or string, with 'binary'/'multiclass' depending on
      the number of unique targets)
    am : `AttributeMap` or None, optional
      Which mapping to use for storing the non-conformant targets. If
      None was provided, new one would be automagically generated
      depending on the given/deduced domain.

    Returns
    -------
    am

    LightSVM format is an ASCII representation with a single sample per
    each line::

      output featureIndex:featureValue ... featureIndex:featureValue

    where ``output`` is specific for a given domain:

    regression
      float number
    binary
      integer labels from {-1, 1}
    multiclass
      integer labels from {1..ds.targets_attr.nunique}

    """
    targets_a = dataset.sa[targets_attr]
    targets = targets_a.value

    # XXX this all below
    #  * might become cleaner
    #  * might be RF to become more generic to be used may be elsewhere as well

    if domain is None:
        if targets.dtype.kind in ['S', 'i']:
            if len(targets_a.unique) == 2:
                domain = 'binary'
            else:
                domain = 'multiclass'
        else:
            domain = 'regression'

    if domain in ['multiclass', 'binary']:
        # check if labels are appropriate and provide mapping if necessary
        utargets = targets_a.unique
        if domain == 'binary' and set(utargets) != set([-1, 1]):
            # need mapping
            if len(utargets) != 2:
                raise ValueError, \
                      "We need 2 unique targets in %s of %s. Got targets " \
                      "from set %s" % (targets_attr, dataset, utargets)
            if am is None:
                am = AttributeMap(dict(zip(utargets, [-1, 1])))
            elif set(am.keys()) != set([-1, 1]):
                raise ValueError, \
                      "Provided %s doesn't map into binary " \
                      "labels -1,+1" % (am,)
        elif domain == 'multiclass' \
                 and set(utargets) != set(range(1, len(utargets)+1)):
            if am is None:
                am = AttributeMap(dict(zip(utargets,
                                           range(1, len(utargets) + 1))))
            elif set(am.keys()) != set([-1, 1]):
                raise ValueError, \
                      "Provided %s doesn't map into multiclass " \
                      "range 1..N" % (am, )

    if am is not None:
        # map the targets
        targets = am.to_numeric(targets)

    for t, s in zip(targets, dataset.samples):
        out.write('%g %s\n'
                  % (t,
                     ' '.join(
                         '%i:%.8g' % (i, v)
                         for i,v in zip(range(1, dataset.nfeatures+1), s))))

    out.flush()                # push it out
    return am
Пример #15
0
    def _train(self, dataset):
        """Train SVM
        """

        # XXX watchout
        # self.untrain()
        newkernel, newsvm = False, False
        # local bindings for faster lookup
        params = self.params
        retrainable = self.params.retrainable

        targets_sa_name = self.get_space()    # name of targets sa
        targets_sa = dataset.sa[targets_sa_name] # actual targets sa

        if retrainable:
            _changedData = self._changedData

        # LABELS
        ul = None
        self.__traindataset = dataset


        # OK -- we have to map labels since
        #  binary ones expect -1/+1
        #  Multiclass expect labels starting with 0, otherwise they puke
        #   when ran from ipython... yikes
        if __debug__:
            debug("SG_", "Creating labels instance")

        if self.__is_regression__:
            labels_ = np.asarray(targets_sa.value, dtype='double')
        else:
            ul = targets_sa.unique
            # ul.sort()

            if len(ul) == 2:
                # assure that we have -1/+1
                _labels_dict = {ul[0]:-1.0, ul[1]:+1.0}
            elif len(ul) < 2:
                raise FailedToTrainError, \
                      "We do not have 1-class SVM brought into SG yet"
            else:
                # can't use plain enumerate since we need them swapped
                _labels_dict = dict([ (ul[i], i) for i in range(len(ul))])

            # Create SG-customized attrmap to assure -1 / +1 if necessary
            self._attrmap = AttributeMap(_labels_dict, mapnumeric=True)

            if __debug__:
                debug("SG__", "Mapping labels using dict %s" % _labels_dict)
            labels_ = self._attrmap.to_numeric(targets_sa.value).astype(float)

        labels = shogun.Features.Labels(labels_)
        _setdebug(labels, 'Labels')


        # KERNEL

        # XXX cruel fix for now... whole retraining business needs to
        # be rethought
        if retrainable:
            _changedData['kernel_params'] = _changedData.get('kernel_params', False)

        # TODO: big RF to move non-kernel classifiers away
        if 'kernel-based' in self.__tags__ and (not retrainable
               or _changedData['traindata'] or _changedData['kernel_params']):
            # If needed compute or just collect arguments for SVM and for
            # the kernel

            if retrainable and __debug__:
                if _changedData['traindata']:
                    debug("SG",
                          "Re-Creating kernel since training data has changed")

                if _changedData['kernel_params']:
                    debug("SG",
                          "Re-Creating kernel since params %s has changed" %
                          _changedData['kernel_params'])


            k = self.params.kernel
            k.compute(dataset)
            self.__kernel = kernel = k.as_raw_sg()

            newkernel = True
            self.kernel_params.reset()  # mark them as not-changed
            #_setdebug(kernel, 'Kernels')

            #self.__condition_kernel(kernel)
            if retrainable:
                if __debug__:
                    debug("SG_", "Resetting test kernel for retrainable SVM")
                self.__kernel_test = None

        # TODO -- handle _changedData['params'] correctly, ie without recreating
        # whole SVM
        Cs = None
        if not retrainable or self.__svm is None or _changedData['params']:
            # SVM
            if self.params.has_key('C'):
                Cs = self._get_cvec(dataset)

                # XXX do not jump over the head and leave it up to the user
                #     ie do not rescale automagically by the number of samples
                #if len(Cs) == 2 and not ('regression' in self.__tags__) and len(ul) == 2:
                #    # we were given two Cs
                #    if np.max(C) < 0 and np.min(C) < 0:
                #        # and both are requested to be 'scaled' TODO :
                #        # provide proper 'features' to the parameters,
                #        # so we could specify explicitely if to scale
                #        # them by the number of samples here
                #        nl = [np.sum(labels_ == _labels_dict[l]) for l in ul]
                #        ratio = np.sqrt(float(nl[1]) / nl[0])
                #        #ratio = (float(nl[1]) / nl[0])
                #        Cs[0] *= ratio
                #        Cs[1] /= ratio
                #        if __debug__:
                #            debug("SG_", "Rescaled Cs to %s to accomodate the "
                #                  "difference in number of training samples" %
                #                  Cs)

            # Choose appropriate implementation
            svm_impl_class = self.__get_implementation(ul)

            if __debug__:
                debug("SG", "Creating SVM instance of %s" % `svm_impl_class`)

            if self._svm_impl in ['libsvr', 'svrlight']:
                # for regressions constructor a bit different
                self.__svm = svm_impl_class(Cs[0], self.params.tube_epsilon, self.__kernel, labels)
                # we need to set epsilon explicitly
                self.__svm.set_epsilon(self.params.epsilon)
            elif self._svm_impl in ['krr']:
                self.__svm = svm_impl_class(self.params.tau, self.__kernel, labels)
            elif 'kernel-based' in self.__tags__:
                self.__svm = svm_impl_class(Cs[0], self.__kernel, labels)
                self.__svm.set_epsilon(self.params.epsilon)
            else:
                traindata_sg = _tosg(dataset.samples)
                self.__svm = svm_impl_class(Cs[0], traindata_sg, labels)
                self.__svm.set_epsilon(self.params.epsilon)

            # Set shrinking
            if 'shrinking' in params:
                shrinking = params.shrinking
                if __debug__:
                    debug("SG_", "Setting shrinking to %s" % shrinking)
                self.__svm.set_shrinking_enabled(shrinking)

            if Cs is not None and len(Cs) == 2:
                if __debug__:
                    debug("SG_", "Since multiple Cs are provided: %s, assign them" % Cs)
                self.__svm.set_C(Cs[0], Cs[1])

            self.params.reset()  # mark them as not-changed
            newsvm = True
            _setdebug(self.__svm, 'SVM')
            # Set optimization parameters
            if self.params.has_key('tube_epsilon') and \
                   hasattr(self.__svm, 'set_tube_epsilon'):
                self.__svm.set_tube_epsilon(self.params.tube_epsilon)
            self.__svm.parallel.set_num_threads(self.params.num_threads)
        else:
            if __debug__:
                debug("SG_", "SVM instance is not re-created")
            if _changedData['targets']:          # labels were changed
                if __debug__: debug("SG__", "Assigning new labels")
                self.__svm.set_labels(labels)
            if newkernel:               # kernel was replaced
                if __debug__: debug("SG__", "Assigning new kernel")
                self.__svm.set_kernel(self.__kernel)
            assert(_changedData['params'] is False)  # we should never get here

        if retrainable:
            # we must assign it only if it is retrainable
            self.ca.retrained = not newsvm or not newkernel

        # Train
        if __debug__ and 'SG' in debug.active:
            if not self.__is_regression__:
                lstr = " with labels %s" % targets_sa.unique
            else:
                lstr = ""
            debug("SG", "%sTraining %s on data%s" %
                  (("","Re-")[retrainable and self.ca.retrained],
                   self, lstr))

        self.__svm.train()

        if __debug__:
            debug("SG_", "Done training SG_SVM %s" % self)

        # Report on training
        if (__debug__ and 'SG__' in debug.active) or \
           self.ca.is_enabled('training_stats'):
            if __debug__:
                debug("SG_", "Assessing predictions on training data")
            trained_targets = self.__svm.classify().get_labels()

        else:
            trained_targets = None

        if __debug__ and "SG__" in debug.active:
            debug("SG__", "Original labels: %s, Trained labels: %s" %
                  (targets_sa.value, trained_targets))

        # Assign training confusion right away here since we are ready
        # to do so.
        # XXX TODO use some other conditional attribute like 'trained_targets' and
        #     use it within base Classifier._posttrain to assign predictions
        #     instead of duplicating code here
        # XXX For now it can be done only for regressions since labels need to
        #     be remapped and that becomes even worse if we use regression
        #     as a classifier so mapping happens upstairs
        if self.__is_regression__ and self.ca.is_enabled('training_stats'):
            self.ca.training_stats = self.__summary_class__(
                targets=targets_sa.value,
                predictions=trained_targets)
Пример #16
0
class SVM(_SVM):
    """Support Vector Machine Classifier(s) based on Shogun

    This is a simple base interface
    """
    __default_kernel_class__ = _default_kernel_class_
    num_threads = Parameter(1,
                            min=1,
                            doc='Number of threads to utilize')

    _KNOWN_PARAMS = [ 'epsilon' ]

    __tags__ = _SVM.__tags__ + [ 'sg', 'retrainable' ]

    # Some words of wisdom from shogun author:
    # XXX remove after proper comments added to implementations
    """
    If you'd like to train linear SVMs use SGD or OCAS. These are (I am
    serious) the fastest linear SVM-solvers to date. (OCAS cannot do SVMs
    with standard additive bias, but will L2 reqularize it - though it
    should not matter much in practice (although it will give slightly
    different solutions)). Note that SGD has no stopping criterion (you
    simply have to specify the number of iterations) and that OCAS has a
    different stopping condition than svmlight for example which may be more
    tight and more loose depending on the problem - I sugeest 1e-2 or 1e-3
    for epsilon.

    If you would like to train kernel SVMs use libsvm/gpdt/svmlight -
    depending on the problem one is faster than the other (hard to say when,
    I *think* when your dataset is very unbalanced chunking methods like
    svmlight/gpdt are better), for smaller problems definitely libsvm.

    If you use string kernels then gpdt/svmlight have a special 'linadd'
    speedup for this (requires sg 0.6.2 - there was some inefficiency in the
    code for python-modular before that). This is effective for big datasets
    and (I trained on 10 million strings based on this).

    And yes currently we only implemented parallel training for svmlight,
    however all SVMs can be evaluated in parallel.
    """
    _KNOWN_SENSITIVITIES={'linear':LinearSVMWeights,
                          }
    _KNOWN_IMPLEMENTATIONS = {}
    if externals.exists('shogun', raise_=True):
        _KNOWN_IMPLEMENTATIONS = {
            "libsvm" : (shogun.Classifier.LibSVM, ('C',),
                       ('multiclass', 'binary'),
                        "LIBSVM's C-SVM (L2 soft-margin SVM)"),
            "gmnp" : (shogun.Classifier.GMNPSVM, ('C',),
                     ('multiclass', 'binary'),
                      "Generalized Nearest Point Problem SVM"),
            # XXX should have been GPDT, shogun has it fixed since some version
            "gpbt" : (shogun.Classifier.GPBTSVM, ('C',), ('binary',),
                      "Gradient Projection Decomposition Technique for " \
                      "large-scale SVM problems"),
            "gnpp" : (shogun.Classifier.GNPPSVM, ('C',), ('binary',),
                      "Generalized Nearest Point Problem SVM"),

            ## TODO: Needs sparse features...
            # "svmlin" : (shogun.Classifier.SVMLin, ''),
            # "liblinear" : (shogun.Classifier.LibLinear, ''),
            # "subgradient" : (shogun.Classifier.SubGradientSVM, ''),
            ## good 2-class linear SVMs
            # "ocas" : (shogun.Classifier.SVMOcas, ''),
            # "sgd" : ( shogun.Classifier.SVMSGD, ''),

            # regressions
            "libsvr": (shogun.Regression.LibSVR, ('C', 'tube_epsilon',),
                      ('regression',),
                       "LIBSVM's epsilon-SVR"),
            }


    def __init__(self, **kwargs):
        """Interface class to Shogun's classifiers and regressions.

        Default implementation is 'libsvm'.
        """


        svm_impl = kwargs.get('svm_impl', 'libsvm').lower()
        kwargs['svm_impl'] = svm_impl

        # init base class
        _SVM.__init__(self, **kwargs)

        self.__svm = None
        """Holds the trained svm."""

        # Need to store original data...
        # TODO: keep 1 of them -- just __traindata or __traindataset
        # For now it is needed for computing sensitivities
        self.__traindataset = None

        # internal SG swig proxies
        self.__traindata = None
        self.__kernel = None
        self.__kernel_test = None
        self.__testdata = None

        # remove kernel-based for some
        # TODO RF: provide separate handling for non-kernel machines
        if svm_impl in ['svmocas']:
            if not (self.__kernel is None
                    or self.__kernel.__kernel_name__ == 'linear'):
                raise ValueError(
                    "%s is inherently linear, thus provided kernel %s "
                    "is of no effect" % (svm_impl, self.__kernel))
            self.__tags__.pop(self.__tags__.index('kernel-based'))
            self.__tags__.pop(self.__tags__.index('retrainable'))

    # TODO: integrate with kernel framework
    #def __condition_kernel(self, kernel):
        ## XXX I thought that it is needed only for retrainable classifier,
        ##     but then krr gets confused, and svrlight needs it to provide
        ##     meaningful results even without 'retraining'
        #if self._svm_impl in ['svrlight', 'lightsvm']:
            #try:
                #kernel.set_precompute_matrix(True, True)
            #except Exception, e:
                ## N/A in shogun 0.9.1... TODO: RF
                #if __debug__:
                    #debug('SG_', "Failed call to set_precompute_matrix for %s: %s"
                          #% (self, e))


    def _train(self, dataset):
        """Train SVM
        """

        # XXX watchout
        # self.untrain()
        newkernel, newsvm = False, False
        # local bindings for faster lookup
        params = self.params
        retrainable = self.params.retrainable

        targets_sa_name = self.get_space()    # name of targets sa
        targets_sa = dataset.sa[targets_sa_name] # actual targets sa

        if retrainable:
            _changedData = self._changedData

        # LABELS
        ul = None
        self.__traindataset = dataset


        # OK -- we have to map labels since
        #  binary ones expect -1/+1
        #  Multiclass expect labels starting with 0, otherwise they puke
        #   when ran from ipython... yikes
        if __debug__:
            debug("SG_", "Creating labels instance")

        if self.__is_regression__:
            labels_ = np.asarray(targets_sa.value, dtype='double')
        else:
            ul = targets_sa.unique
            # ul.sort()

            if len(ul) == 2:
                # assure that we have -1/+1
                _labels_dict = {ul[0]:-1.0, ul[1]:+1.0}
            elif len(ul) < 2:
                raise FailedToTrainError, \
                      "We do not have 1-class SVM brought into SG yet"
            else:
                # can't use plain enumerate since we need them swapped
                _labels_dict = dict([ (ul[i], i) for i in range(len(ul))])

            # Create SG-customized attrmap to assure -1 / +1 if necessary
            self._attrmap = AttributeMap(_labels_dict, mapnumeric=True)

            if __debug__:
                debug("SG__", "Mapping labels using dict %s" % _labels_dict)
            labels_ = self._attrmap.to_numeric(targets_sa.value).astype(float)

        labels = shogun.Features.Labels(labels_)
        _setdebug(labels, 'Labels')


        # KERNEL

        # XXX cruel fix for now... whole retraining business needs to
        # be rethought
        if retrainable:
            _changedData['kernel_params'] = _changedData.get('kernel_params', False)

        # TODO: big RF to move non-kernel classifiers away
        if 'kernel-based' in self.__tags__ and (not retrainable
               or _changedData['traindata'] or _changedData['kernel_params']):
            # If needed compute or just collect arguments for SVM and for
            # the kernel

            if retrainable and __debug__:
                if _changedData['traindata']:
                    debug("SG",
                          "Re-Creating kernel since training data has changed")

                if _changedData['kernel_params']:
                    debug("SG",
                          "Re-Creating kernel since params %s has changed" %
                          _changedData['kernel_params'])


            k = self.params.kernel
            k.compute(dataset)
            self.__kernel = kernel = k.as_raw_sg()

            newkernel = True
            self.kernel_params.reset()  # mark them as not-changed
            #_setdebug(kernel, 'Kernels')

            #self.__condition_kernel(kernel)
            if retrainable:
                if __debug__:
                    debug("SG_", "Resetting test kernel for retrainable SVM")
                self.__kernel_test = None

        # TODO -- handle _changedData['params'] correctly, ie without recreating
        # whole SVM
        Cs = None
        if not retrainable or self.__svm is None or _changedData['params']:
            # SVM
            if self.params.has_key('C'):
                Cs = self._get_cvec(dataset)

                # XXX do not jump over the head and leave it up to the user
                #     ie do not rescale automagically by the number of samples
                #if len(Cs) == 2 and not ('regression' in self.__tags__) and len(ul) == 2:
                #    # we were given two Cs
                #    if np.max(C) < 0 and np.min(C) < 0:
                #        # and both are requested to be 'scaled' TODO :
                #        # provide proper 'features' to the parameters,
                #        # so we could specify explicitely if to scale
                #        # them by the number of samples here
                #        nl = [np.sum(labels_ == _labels_dict[l]) for l in ul]
                #        ratio = np.sqrt(float(nl[1]) / nl[0])
                #        #ratio = (float(nl[1]) / nl[0])
                #        Cs[0] *= ratio
                #        Cs[1] /= ratio
                #        if __debug__:
                #            debug("SG_", "Rescaled Cs to %s to accomodate the "
                #                  "difference in number of training samples" %
                #                  Cs)

            # Choose appropriate implementation
            svm_impl_class = self.__get_implementation(ul)

            if __debug__:
                debug("SG", "Creating SVM instance of %s" % `svm_impl_class`)

            if self._svm_impl in ['libsvr', 'svrlight']:
                # for regressions constructor a bit different
                self.__svm = svm_impl_class(Cs[0], self.params.tube_epsilon, self.__kernel, labels)
                # we need to set epsilon explicitly
                self.__svm.set_epsilon(self.params.epsilon)
            elif self._svm_impl in ['krr']:
                self.__svm = svm_impl_class(self.params.tau, self.__kernel, labels)
            elif 'kernel-based' in self.__tags__:
                self.__svm = svm_impl_class(Cs[0], self.__kernel, labels)
                self.__svm.set_epsilon(self.params.epsilon)
            else:
                traindata_sg = _tosg(dataset.samples)
                self.__svm = svm_impl_class(Cs[0], traindata_sg, labels)
                self.__svm.set_epsilon(self.params.epsilon)

            # Set shrinking
            if 'shrinking' in params:
                shrinking = params.shrinking
                if __debug__:
                    debug("SG_", "Setting shrinking to %s" % shrinking)
                self.__svm.set_shrinking_enabled(shrinking)

            if Cs is not None and len(Cs) == 2:
                if __debug__:
                    debug("SG_", "Since multiple Cs are provided: %s, assign them" % Cs)
                self.__svm.set_C(Cs[0], Cs[1])

            self.params.reset()  # mark them as not-changed
            newsvm = True
            _setdebug(self.__svm, 'SVM')
            # Set optimization parameters
            if self.params.has_key('tube_epsilon') and \
                   hasattr(self.__svm, 'set_tube_epsilon'):
                self.__svm.set_tube_epsilon(self.params.tube_epsilon)
            self.__svm.parallel.set_num_threads(self.params.num_threads)
        else:
            if __debug__:
                debug("SG_", "SVM instance is not re-created")
            if _changedData['targets']:          # labels were changed
                if __debug__: debug("SG__", "Assigning new labels")
                self.__svm.set_labels(labels)
            if newkernel:               # kernel was replaced
                if __debug__: debug("SG__", "Assigning new kernel")
                self.__svm.set_kernel(self.__kernel)
            assert(_changedData['params'] is False)  # we should never get here

        if retrainable:
            # we must assign it only if it is retrainable
            self.ca.retrained = not newsvm or not newkernel

        # Train
        if __debug__ and 'SG' in debug.active:
            if not self.__is_regression__:
                lstr = " with labels %s" % targets_sa.unique
            else:
                lstr = ""
            debug("SG", "%sTraining %s on data%s" %
                  (("","Re-")[retrainable and self.ca.retrained],
                   self, lstr))

        self.__svm.train()

        if __debug__:
            debug("SG_", "Done training SG_SVM %s" % self)

        # Report on training
        if (__debug__ and 'SG__' in debug.active) or \
           self.ca.is_enabled('training_stats'):
            if __debug__:
                debug("SG_", "Assessing predictions on training data")
            trained_targets = self.__svm.classify().get_labels()

        else:
            trained_targets = None

        if __debug__ and "SG__" in debug.active:
            debug("SG__", "Original labels: %s, Trained labels: %s" %
                  (targets_sa.value, trained_targets))

        # Assign training confusion right away here since we are ready
        # to do so.
        # XXX TODO use some other conditional attribute like 'trained_targets' and
        #     use it within base Classifier._posttrain to assign predictions
        #     instead of duplicating code here
        # XXX For now it can be done only for regressions since labels need to
        #     be remapped and that becomes even worse if we use regression
        #     as a classifier so mapping happens upstairs
        if self.__is_regression__ and self.ca.is_enabled('training_stats'):
            self.ca.training_stats = self.__summary_class__(
                targets=targets_sa.value,
                predictions=trained_targets)


    # XXX actually this is the beast which started this evil conversion
    #     so -- make use of dataset here! ;)
    @accepts_samples_as_dataset
    def _predict(self, dataset):
        """Predict values for the data
        """

        retrainable = self.params.retrainable

        if retrainable:
            changed_testdata = self._changedData['testdata'] or \
                               self.__kernel_test is None

        if not retrainable:
            if __debug__:
                debug("SG__",
                      "Initializing SVMs kernel of %s with training/testing samples"
                      % self)
            self.params.kernel.compute(self.__traindataset, dataset)
            self.__kernel_test = self.params.kernel.as_sg()._k
            # We can just reuse kernel used for training
            #self.__condition_kernel(self.__kernel)

        else:
            if changed_testdata:
                #if __debug__:
                    #debug("SG__",
                          #"Re-creating testing kernel of %s giving "
                          #"arguments %s" %
                          #(`self._kernel_type`, self.__kernel_args))
                self.params.kernel.compute(self.__traindataset, dataset)

                #_setdebug(kernel_test, 'Kernels')

                #_setdebug(kernel_test_custom, 'Kernels')
                self.__kernel_test = self.params.kernel.as_raw_sg()

            elif __debug__:
                debug("SG__", "Re-using testing kernel")

        assert(self.__kernel_test is not None)

        if 'kernel-based' in self.__tags__:
            self.__svm.set_kernel(self.__kernel_test)
            # doesn't do any good imho although on unittests helps tiny bit... hm
            #self.__svm.init_kernel_optimization()
            values_ = self.__svm.classify()
        else:
            testdata_sg = _tosg(dataset.samples)
            self.__svm.set_features(testdata_sg)
            values_ = self.__svm.classify()

        if __debug__:
            debug("SG_", "Classifying testing data")

        if values_ is None:
            raise RuntimeError, "We got empty list of values from %s" % self

        values = values_.get_labels()

        if retrainable:
            # we must assign it only if it is retrainable
            self.ca.repredicted = repredicted = not changed_testdata
            if __debug__:
                debug("SG__", "Re-assigning learing kernel. Repredicted is %s"
                      % repredicted)
            # return back original kernel
            if 'kernel-based' in self.__tags__:
                self.__svm.set_kernel(self.__kernel)

        if __debug__:
            debug("SG__", "Got values %s" % values)

        if (self.__is_regression__):
            predictions = values
        else:
            if len(self._attrmap.keys()) == 2:
                predictions = np.sign(values)
                # since np.sign(0) == 0
                predictions[predictions==0] = 1
            else:
                predictions = values

            # remap labels back adjusting their type
            # XXX YOH: This is done by topclass now (needs RF)
            #predictions = self._attrmap.to_literal(predictions)

            if __debug__:
                debug("SG__", "Tuned predictions %s" % predictions)

        # store conditional attribute
        # TODO: extract values properly for multiclass SVMs --
        #       ie 1 value per label or pairs for all 1-vs-1 classifications
        self.ca.estimates = values

        ## to avoid leaks with not yet properly fixed shogun
        if not retrainable:
            try:
                testdata.free_features()
            except:
                pass

        return predictions


    def _untrain(self):
        super(SVM, self)._untrain()
        # untrain/clean the kernel -- we might not allow to drag SWIG
        # instance around BUT XXX -- make it work fine with
        # CachedKernel -- we might not want to fully "untrain" in such
        # case
        self.params.kernel.cleanup()    # XXX unify naming
        if not self.params.retrainable:
            if __debug__:
                debug("SG__", "Untraining %(clf)s and destroying sg's SVM",
                      msgargs={'clf':self})

            # to avoid leaks with not yet properly fixed shogun
            # XXX make it nice... now it is just stable ;-)
            if True: # not self.__traindata is None:
                if True:
                # try:
                    if self.__kernel is not None:
                        del self.__kernel
                        self.__kernel = None

                    if self.__kernel_test is not None:
                        del self.__kernel_test
                        self.__kernel_test = None

                    if self.__svm is not None:
                        del self.__svm
                        self.__svm = None

                    if self.__traindata is not None:
                        # Let in for easy demonstration of the memory leak in shogun
                        #for i in xrange(10):
                        #    debug("SG__", "cachesize pre free features %s" %
                        #          (self.__svm.get_kernel().get_cache_size()))
                        self.__traindata.free_features()
                        del self.__traindata
                        self.__traindata = None

                    self.__traindataset = None


                #except:
                #    pass

            if __debug__:
                debug("SG__",
                      "Done untraining %(self)s and destroying sg's SVM",
                      msgargs=locals())
        elif __debug__:
            debug("SG__", "Not untraining %(self)s since it is retrainable",
                  msgargs=locals())


    def __get_implementation(self, ul):
        if self.__is_regression__ or len(ul) == 2:
            svm_impl_class = SVM._KNOWN_IMPLEMENTATIONS[self._svm_impl][0]
        else:
            if self._svm_impl == 'libsvm':
                svm_impl_class = shogun.Classifier.LibSVMMultiClass
            elif self._svm_impl == 'gmnp':
                svm_impl_class = shogun.Classifier.GMNPSVM
            else:
                raise RuntimeError, \
                      "Shogun: Implementation %s doesn't handle multiclass " \
                      "data. Got labels %s. Use some other classifier" % \
                      (self._svm_impl,
                       self.__traindataset.sa[self.get_space()].unique)
            if __debug__:
                debug("SG_", "Using %s for multiclass data of %s" %
                      (svm_impl_class, self._svm_impl))

        return svm_impl_class


    svm = property(fget=lambda self: self.__svm)
    """Access to the SVM model."""

    traindataset = property(fget=lambda self: self.__traindataset)
    """Dataset which was used for training
Пример #17
0
class Classifier(ClassWithCollections):
    """Abstract classifier class to be inherited by all classifiers
    """

    # Kept separate from doc to don't pollute help(clf), especially if
    # we including help for the parent class
    _DEV__doc__ = """
    Required behavior:

    For every classifier is has to be possible to be instantiated without
    having to specify the training pattern.

    Repeated calls to the train() method with different training data have to
    result in a valid classifier, trained for the particular dataset.

    It must be possible to specify all classifier parameters as keyword
    arguments to the constructor.

    Recommended behavior:

    Derived classifiers should provide access to *estimates* -- i.e. that
    information that is finally used to determine the predicted class label.

    Michael: Maybe it works well if each classifier provides a 'estimates'
             state member. This variable is a list as long as and in same order
             as Dataset.uniquetargets (training data). Each item in the list
             corresponds to the likelyhood of a sample to belong to the
             respective class. However the semantics might differ between
             classifiers, e.g. kNN would probably store distances to class-
             neighbors, where PLR would store the raw function value of the
             logistic function. So in the case of kNN low is predictive and for
             PLR high is predictive. Don't know if there is the need to unify
             that.

             As the storage and/or computation of this information might be
             demanding its collection should be switchable and off be default.

    Nomenclature
     * predictions  : result of the last call to .predict()
     * estimates : might be different from predictions if a classifier's predict()
                   makes a decision based on some internal value such as
                   probability or a distance.
    """
    # Dict that contains the parameters of a classifier.
    # This shall provide an interface to plug generic parameter optimizer
    # on all classifiers (e.g. grid- or line-search optimizer)
    # A dictionary is used because Michael thinks that access by name is nicer.
    # Additionally Michael thinks ATM that additional information might be
    # necessary in some situations (e.g. reasonably predefined parameter range,
    # minimal iteration stepsize, ...), therefore the value to each key should
    # also be a dict or we should use mvpa.misc.param.Parameter'...

    trained_targets = ConditionalAttribute(enabled=True,
        doc="Set of unique targets it has been trained on")

    trained_nsamples = ConditionalAttribute(enabled=True,
        doc="Number of samples it has been trained on")

    trained_dataset = ConditionalAttribute(enabled=False,
        doc="The dataset it has been trained on")

    training_confusion = ConditionalAttribute(enabled=False,
        doc="Confusion matrix of learning performance")

    predictions = ConditionalAttribute(enabled=True,
        doc="Most recent set of predictions")

    estimates = ConditionalAttribute(enabled=True,
        doc="Internal classifier estimates the most recent " +
            "predictions are based on")

    training_time = ConditionalAttribute(enabled=True,
        doc="Time (in seconds) which took classifier to train")

    predicting_time = ConditionalAttribute(enabled=True,
        doc="Time (in seconds) which took classifier to predict")

    feature_ids = ConditionalAttribute(enabled=False,
        doc="Feature IDS which were used for the actual training.")

    __tags__ = []
    """Describes some specifics about the classifier -- is that it is
    doing regression for instance...."""

    targets_attr = Parameter('targets', allowedtype='bool',# ro=True,
        doc="""What samples attribute to use as targets.""",
        index=999)


    # TODO: make it available only for actually retrainable classifiers
    retrainable = Parameter(False, allowedtype='bool',
        doc="""Either to enable retraining for 'retrainable' classifier.""",
        index=1002)


    def __init__(self, **kwargs):
        ClassWithCollections.__init__(self, **kwargs)

        # XXX
        # the place to map literal to numerical labels (and back)
        # this needs to be in the base class, since some classifiers also
        # have this nasty 'regression' mode, and the code in this class
        # needs to deal with converting the regression output into discrete
        # labels
        # however, preferably the mapping should be kept in the respective
        # low-level implementations that need it
        self._attrmap = AttributeMap()

        self.__trainednfeatures = None
        """Stores number of features for which classifier was trained.
        If None -- it wasn't trained at all"""

        self._set_retrainable(self.params.retrainable, force=True)

        # deprecate
        #self.__trainedidhash = None
        #"""Stores id of the dataset on which it was trained to signal
        #in trained() if it was trained already on the same dataset"""

    @property
    def __summary_class__(self):
        if 'regression' in self.__tags__:
            return RegressionStatistics
        else:
            return ConfusionMatrix

    @property
    def __is_regression__(self):
        return 'regression' in self.__tags__

    def __str__(self):
        if __debug__ and 'CLF_' in debug.active:
            return "%s / %s" % (repr(self), super(Classifier, self).__str__())
        else:
            return repr(self)

    def __repr__(self, prefixes=[]):
        return super(Classifier, self).__repr__(prefixes=prefixes)


    def _pretrain(self, dataset):
        """Functionality prior to training
        """
        # So we reset all conditional attributes and may be free up some memory
        # explicitly
        params = self.params
        if not params.retrainable:
            self.untrain()
        else:
            # just reset the ca, do not untrain
            self.ca.reset()
            if not self.__changedData_isset:
                self.__reset_changed_data()
                _changedData = self._changedData
                __idhashes = self.__idhashes
                __invalidatedChangedData = self.__invalidatedChangedData

                # if we don't know what was changed we need to figure
                # them out
                if __debug__:
                    debug('CLF_', "IDHashes are %s" % (__idhashes))

                # Look at the data if any was changed
                for key, data_ in (('traindata', dataset.samples),
                                   ('targets', dataset.sa[params.targets_attr].value)):
                    _changedData[key] = self.__was_data_changed(key, data_)
                    # if those idhashes were invalidated by retraining
                    # we need to adjust _changedData accordingly
                    if __invalidatedChangedData.get(key, False):
                        if __debug__ and not _changedData[key]:
                            debug('CLF_', 'Found that idhash for %s was '
                                  'invalidated by retraining' % key)
                        _changedData[key] = True

                # Look at the parameters
                for col in self._paramscols:
                    changedParams = self._collections[col].which_set()
                    if len(changedParams):
                        _changedData[col] = changedParams

                self.__invalidatedChangedData = {} # reset it on training

                if __debug__:
                    debug('CLF_', "Obtained _changedData is %s"
                          % (self._changedData))


    def _posttrain(self, dataset):
        """Functionality post training

        For instance -- computing confusion matrix.

        Parameters
        ----------
        dataset : Dataset
          Data which was used for training
        """
        ca = self.ca
        if ca.is_enabled('trained_targets'):
            ca.trained_targets = dataset.sa[self.params.targets_attr].unique

        ca.trained_dataset = dataset
        ca.trained_nsamples = dataset.nsamples

        # needs to be assigned first since below we use predict
        self.__trainednfeatures = dataset.nfeatures

        if __debug__ and 'CHECK_TRAINED' in debug.active:
            self.__trainedidhash = dataset.idhash

        if self.ca.is_enabled('training_confusion') and \
               not self.ca.is_set('training_confusion'):
            # we should not store predictions for training data,
            # it is confusing imho (yoh)
            self.ca.change_temporarily(
                disable_ca=["predictions"])
            if self.params.retrainable:
                # we would need to recheck if data is the same,
                # XXX think if there is a way to make this all
                # efficient. For now, probably, retrainable
                # classifiers have no chance but not to use
                # training_confusion... sad
                self.__changedData_isset = False
            predictions = self.predict(dataset)
            self.ca.reset_changed_temporarily()
            self.ca.training_confusion = self.__summary_class__(
                targets=dataset.sa[self.params.targets_attr].value,
                predictions=predictions)

        if self.ca.is_enabled('feature_ids'):
            self.ca.feature_ids = self._get_feature_ids()


    ##REF: Name was automagically refactored
    def _get_feature_ids(self):
        """Virtual method to return feature_ids used while training

        Is not intended to be called anywhere but from _posttrain,
        thus classifier is assumed to be trained at this point
        """
        # By default all features are used
        return range(self.__trainednfeatures)


    def summary(self):
        """Providing summary over the classifier"""

        s = "Classifier %s" % self
        ca = self.ca
        ca_enabled = ca.enabled

        if self.trained:
            s += "\n trained"
            if ca.is_set('training_time'):
                s += ' in %.3g sec' % ca.training_time
            s += ' on data with'
            if ca.is_set('trained_targets'):
                s += ' targets:%s' % list(ca.trained_targets)

            nsamples, nchunks = None, None
            if ca.is_set('trained_nsamples'):
                nsamples = ca.trained_nsamples
            if ca.is_set('trained_dataset'):
                td = ca.trained_dataset
                nsamples, nchunks = td.nsamples, len(td.sa['chunks'].unique)
            if nsamples is not None:
                s += ' #samples:%d' % nsamples
            if nchunks is not None:
                s += ' #chunks:%d' % nchunks

            s += " #features:%d" % self.__trainednfeatures
            if ca.is_set('feature_ids'):
                s += ", used #features:%d" % len(ca.feature_ids)
            if ca.is_set('training_confusion'):
                s += ", training error:%.3g" % ca.training_confusion.error
        else:
            s += "\n not yet trained"

        if len(ca_enabled):
            s += "\n enabled ca:%s" % ', '.join([str(ca[x])
                                                     for x in ca_enabled])
        return s


    def clone(self):
        """Create full copy of the classifier.

        It might require classifier to be untrained first due to
        present SWIG bindings.

        TODO: think about proper re-implementation, without enrollment of deepcopy
        """
        if __debug__:
            debug("CLF", "Cloning %s#%s" % (self, id(self)))
        try:
            return deepcopy(self)
        except:
            self.untrain()
            return deepcopy(self)


    def _train(self, dataset):
        """Function to be actually overridden in derived classes
        """
        raise NotImplementedError


    def train(self, dataset):
        """Train classifier on a dataset

        Shouldn't be overridden in subclasses unless explicitly needed
        to do so
        """
        if dataset.nfeatures == 0 or dataset.nsamples == 0:
            raise DegenerateInputError, \
                  "Cannot train classifier on degenerate data %s" % dataset
        if __debug__:
            debug("CLF", "Training classifier %(clf)s on dataset %(dataset)s",
                  msgargs={'clf':self, 'dataset':dataset})

        self._pretrain(dataset)

        # remember the time when started training
        t0 = time.time()

        if dataset.nfeatures > 0:

            result = self._train(dataset)
        else:
            warning("Trying to train on dataset with no features present")
            if __debug__:
                debug("CLF",
                      "No features present for training, no actual training " \
                      "is called")
            result = None

        self.ca.training_time = time.time() - t0
        self._posttrain(dataset)
        return result


    def _prepredict(self, dataset):
        """Functionality prior prediction
        """
        if not ('notrain2predict' in self.__tags__):
            # check if classifier was trained if that is needed
            if not self.trained:
                raise ValueError, \
                      "Classifier %s wasn't yet trained, therefore can't " \
                      "predict" % self
            nfeatures = dataset.nfeatures #data.shape[1]
            # check if number of features is the same as in the data
            # it was trained on
            if nfeatures != self.__trainednfeatures:
                raise ValueError, \
                      "Classifier %s was trained on data with %d features, " % \
                      (self, self.__trainednfeatures) + \
                      "thus can't predict for %d features" % nfeatures


        if self.params.retrainable:
            if not self.__changedData_isset:
                self.__reset_changed_data()
                _changedData = self._changedData
                data = np.asanyarray(dataset.samples)
                _changedData['testdata'] = \
                                        self.__was_data_changed('testdata', data)
                if __debug__:
                    debug('CLF_', "prepredict: Obtained _changedData is %s"
                          % (_changedData))


    def _postpredict(self, dataset, result):
        """Functionality after prediction is computed
        """
        self.ca.predictions = result
        if self.params.retrainable:
            self.__changedData_isset = False

    def _predict(self, dataset):
        """Actual prediction
        """
        raise NotImplementedError

    @accepts_samples_as_dataset
    def predict(self, dataset):
        """Predict classifier on data

        Shouldn't be overridden in subclasses unless explicitly needed
        to do so. Also subclasses trying to call super class's predict
        should call _predict if within _predict instead of predict()
        since otherwise it would loop
        """
        ## ??? yoh: changed to asany from as without exhaustive check
        data = np.asanyarray(dataset.samples)
        if __debug__:
            debug("CLF", "Predicting classifier %(clf)s on ds %(dataset)s",
                msgargs={'clf':self, 'dataset':dataset})

        # remember the time when started computing predictions
        t0 = time.time()

        ca = self.ca
        # to assure that those are reset (could be set due to testing
        # post-training)
        ca.reset(['estimates', 'predictions'])

        self._prepredict(dataset)

        if self.__trainednfeatures > 0 \
               or 'notrain2predict' in self.__tags__:
            result = self._predict(dataset)
        else:
            warning("Trying to predict using classifier trained on no features")
            if __debug__:
                debug("CLF",
                      "No features were present for training, prediction is " \
                      "bogus")
            result = [None]*data.shape[0]

        ca.predicting_time = time.time() - t0

        # with labels mapping in-place, we also need to go back to the
        # literal labels
        if self._attrmap:
            try:
                result = self._attrmap.to_literal(result)
            except KeyError, e:
                raise FailedToPredictError, \
                      "Failed to convert predictions from numeric into " \
                      "literals: %s" % e

        self._postpredict(dataset, result)
        return result
Пример #18
0
def plot_decision_boundary_2d(dataset,
                              clf=None,
                              targets=None,
                              regions=None,
                              maps=None,
                              maps_res=50,
                              vals=[-1, 0, 1],
                              data_callback=None):
    """Plot a scatter of a classifier's decision boundary and data points

    Assumes data is 2d (no way to visualize otherwise!!)

    Parameters
    ----------
    dataset : `Dataset`
      Data points to visualize (might be the data `clf` was train on, or
      any novel data).
    clf : `Classifier`, optional
      Trained classifier
    targets : string, optional
      What samples attributes to use for targets.  If None and clf is
      provided, then `clf.params.targets_attr` is used.
    regions : string, optional
      Plot regions (polygons) around groups of samples with the same
      attribute (and target attribute) values. E.g. chunks.
    maps : string in {'targets', 'estimates'}, optional
      Either plot underlying colored maps, such as clf predictions
      within the spanned regions, or estimates from the classifier
      (might not work for some).
    maps_res : int, optional
      Number of points in each direction to evaluate.
      Points are between axis limits, which are set automatically by
      matplotlib.  Higher number will yield smoother decision lines but come
      at the cost of O^2 classifying time/memory.
    vals : array of floats, optional
      Where to draw the contour lines if maps='estimates'
    data_callback : callable, optional
      Callable object to preprocess the new data points.
      Classified points of the form samples = data_callback(xysamples).
      I.e. this can be a function to normalize them, or cache them
      before they are classified.
    """

    if False:
        ## from mvpa.misc.data_generators import *
        ## from mvpa.clfs.svm import *
        ## from mvpa.clfs.knn import *
        ## ds = dumb_feature_binary_dataset()
        dataset = normal_feature_dataset(nfeatures=2,
                                         nchunks=5,
                                         snr=10,
                                         nlabels=4,
                                         means=[[0, 1], [1, 0], [1, 1], [0,
                                                                         0]])
        dataset.samples += dataset.sa.chunks[:,
                                             None] * 0.1  # slight shifts for chunks ;)
        #dataset = normal_feature_dataset(nfeatures=2, nlabels=3, means=[ [0,1], [1,0], [1,1] ])
        #dataset = normal_feature_dataset(nfeatures=2, nlabels=2, means=[ [0,1], [1,0] ])
        #clf = LinearCSVMC(C=-1)
        clf = kNN(4)  #LinearCSVMC(C=-1)
        clf.train(dataset)
        #clf = None
        #plot_decision_boundary_2d(ds, clf)
        targets = 'targets'
        regions = 'chunks'
        #maps = 'estimates'
        maps = 'targets'
        #maps = None #'targets'
        res = 50
        vals = [-1, 0, 1]
        data_callback = None
        pl.clf()

    if dataset.nfeatures != 2:
        raise ValueError('Can only plot a decision boundary in 2D')

    Pioff()
    a = pl.gca()  # f.add_subplot(1,1,1)

    attrmap = None
    if clf:
        estimates_were_enabled = clf.ca.is_enabled('estimates')
        clf.ca.enable('estimates')

        if targets is None:
            targets = clf.params.targets_attr
        # Lets reuse classifiers attrmap if it is good enough
        attrmap = clf._attrmap
        predictions = clf.predict(dataset)

    targets_sa_name = targets  # bad Yarik -- will rebind targets to actual values
    targets_lit = dataset.sa[targets_sa_name].value
    utargets_lit = dataset.sa[targets_sa_name].unique

    if not (attrmap is not None and len(attrmap)
            and set(clf._attrmap.keys()).issuperset(utargets_lit)):
        # create our own
        attrmap = AttributeMap(mapnumeric=True)

    targets = attrmap.to_numeric(targets_lit)
    utargets = attrmap.to_numeric(utargets_lit)

    vmin = min(utargets)
    vmax = max(utargets)
    cmap = pl.cm.RdYlGn  # argument

    # Scatter points
    if clf:
        all_hits = predictions == targets_lit
    else:
        all_hits = np.ones((len(targets), ), dtype=bool)

    targets_colors = {}
    for l in utargets:
        targets_mask = targets == l
        s = dataset[targets_mask]
        targets_colors[l] = c \
            = cmap((l-vmin)/float(vmax-vmin))

        # We want to plot hits and misses with different symbols
        hits = all_hits[targets_mask]
        misses = np.logical_not(hits)
        scatter_kwargs = dict(c=[c], zorder=10 + (l - vmin))

        if sum(hits):
            a.scatter(s.samples[hits, 0],
                      s.samples[hits, 1],
                      marker='o',
                      label='%s [%d]' % (attrmap.to_literal(l), sum(hits)),
                      **scatter_kwargs)
        if sum(misses):
            a.scatter(s.samples[misses, 0],
                      s.samples[misses, 1],
                      marker='x',
                      label='%s [%d] (miss)' %
                      (attrmap.to_literal(l), sum(misses)),
                      edgecolor=[c],
                      **scatter_kwargs)

    (xmin, xmax) = a.get_xlim()
    (ymin, ymax) = a.get_ylim()
    extent = (xmin, xmax, ymin, ymax)

    # Create grid to evaluate, predict it
    (x, y) = np.mgrid[xmin:xmax:np.complex(0, maps_res),
                      ymin:ymax:np.complex(0, maps_res)]
    news = np.vstack((x.ravel(), y.ravel())).T
    try:
        news = data_callback(news)
    except TypeError:  # Not a callable object
        pass

    imshow_kwargs = dict(origin='lower',
                         zorder=1,
                         aspect='auto',
                         interpolation='bilinear',
                         alpha=0.9,
                         cmap=cmap,
                         vmin=vmin,
                         vmax=vmax,
                         extent=extent)

    if maps is not None:
        if clf is None:
            raise ValueError, \
                  "Please provide classifier for plotting maps of %s" % maps
        predictions_new = clf.predict(news)

    if maps == 'estimates':
        # Contour and show predictions
        trained_targets = attrmap.to_numeric(clf.ca.trained_targets)

        if len(trained_targets) == 2:
            linestyles = []
            for v in vals:
                if v == 0:
                    linestyles.append('solid')
                else:
                    linestyles.append('dashed')
            vmin, vmax = -3, 3  # Gives a nice tonal range ;)
            map_ = 'estimates'  # should actually depend on estimates
        else:
            vals = (trained_targets[:-1] + trained_targets[1:]) / 2.
            linestyles = ['solid'] * len(vals)
            map_ = 'targets'

        try:
            clf.ca.estimates.reshape(x.shape)
            a.imshow(map_values.T, **imshow_kwargs)
            CS = a.contour(x,
                           y,
                           map_values,
                           vals,
                           zorder=6,
                           linestyles=linestyles,
                           extent=extent,
                           colors='k')
        except ValueError, e:
            print "Sorry - plotting of estimates isn't full supported for %s. " \
                  "Got exception %s" % (clf, e)
Пример #19
0
    def test_regressions(self, regr):
        """Simple tests on regressions
        """
        ds = datasets['chirp_linear']
        # we want numeric labels to maintain the previous behavior, especially
        # since we deal with regressions here
        ds.sa.targets = AttributeMap().to_numeric(ds.targets)

        cve = CrossValidatedTransferError(
            TransferError(regr),
            splitter=NFoldSplitter(),
            postproc=mean_sample(),
            enable_ca=['training_confusion', 'confusion'])
        # check the default
        self.failUnless(isinstance(cve.transerror.errorfx, CorrErrorFx))
        corr = np.asscalar(cve(ds).samples)

        # Our CorrErrorFx should never return NaN
        self.failUnless(not np.isnan(corr))
        self.failUnless(corr == cve.ca.confusion.stats['CCe'])

        splitregr = SplitClassifier(
            regr,
            splitter=OddEvenSplitter(),
            enable_ca=['training_confusion', 'confusion'])
        splitregr.train(ds)
        split_corr = splitregr.ca.confusion.stats['CCe']
        split_corr_tr = splitregr.ca.training_confusion.stats['CCe']

        for confusion, error in (
            (cve.ca.confusion, corr),
            (splitregr.ca.confusion, split_corr),
            (splitregr.ca.training_confusion, split_corr_tr),
        ):
            #TODO: test confusion statistics
            # Part of it for now -- CCe
            for conf in confusion.summaries:
                stats = conf.stats
                if cfg.getboolean('tests', 'labile', default='yes'):
                    self.failUnless(stats['CCe'] < 0.5)
                self.failUnlessEqual(stats['CCe'], stats['Summary CCe'])

            s0 = confusion.as_string(short=True)
            s1 = confusion.as_string(short=False)

            for s in [s0, s1]:
                self.failUnless(len(s) > 10,
                                msg="We should get some string representation "
                                "of regression summary. Got %s" % s)
            if cfg.getboolean('tests', 'labile', default='yes'):
                self.failUnless(
                    error < 0.2,
                    msg="Regressions should perform well on a simple "
                    "dataset. Got correlation error of %s " % error)

            # Test access to summary statistics
            # YOH: lets start making testing more reliable.
            #      p-value for such accident to have is verrrry tiny,
            #      so if regression works -- it better has at least 0.5 ;)
            #      otherwise fix it! ;)
            # YOH: not now -- issues with libsvr in SG and linear kernel
            if cfg.getboolean('tests', 'labile', default='yes'):
                self.failUnless(confusion.stats['CCe'] < 0.5)

        # just to check if it works fine
        split_predictions = splitregr.predict(ds.samples)
Пример #20
0
class Classifier(ClassWithCollections):
    """Abstract classifier class to be inherited by all classifiers
    """

    # Kept separate from doc to don't pollute help(clf), especially if
    # we including help for the parent class
    _DEV__doc__ = """
    Required behavior:

    For every classifier is has to be possible to be instantiated without
    having to specify the training pattern.

    Repeated calls to the train() method with different training data have to
    result in a valid classifier, trained for the particular dataset.

    It must be possible to specify all classifier parameters as keyword
    arguments to the constructor.

    Recommended behavior:

    Derived classifiers should provide access to *estimates* -- i.e. that
    information that is finally used to determine the predicted class label.

    Michael: Maybe it works well if each classifier provides a 'estimates'
             state member. This variable is a list as long as and in same order
             as Dataset.uniquetargets (training data). Each item in the list
             corresponds to the likelyhood of a sample to belong to the
             respective class. However the semantics might differ between
             classifiers, e.g. kNN would probably store distances to class-
             neighbors, where PLR would store the raw function value of the
             logistic function. So in the case of kNN low is predictive and for
             PLR high is predictive. Don't know if there is the need to unify
             that.

             As the storage and/or computation of this information might be
             demanding its collection should be switchable and off be default.

    Nomenclature
     * predictions  : result of the last call to .predict()
     * estimates : might be different from predictions if a classifier's predict()
                   makes a decision based on some internal value such as
                   probability or a distance.
    """
    # Dict that contains the parameters of a classifier.
    # This shall provide an interface to plug generic parameter optimizer
    # on all classifiers (e.g. grid- or line-search optimizer)
    # A dictionary is used because Michael thinks that access by name is nicer.
    # Additionally Michael thinks ATM that additional information might be
    # necessary in some situations (e.g. reasonably predefined parameter range,
    # minimal iteration stepsize, ...), therefore the value to each key should
    # also be a dict or we should use mvpa.misc.param.Parameter'...

    trained_targets = ConditionalAttribute(
        enabled=True, doc="Set of unique targets it has been trained on")

    trained_nsamples = ConditionalAttribute(
        enabled=True, doc="Number of samples it has been trained on")

    trained_dataset = ConditionalAttribute(
        enabled=False, doc="The dataset it has been trained on")

    training_confusion = ConditionalAttribute(
        enabled=False, doc="Confusion matrix of learning performance")

    predictions = ConditionalAttribute(enabled=True,
                                       doc="Most recent set of predictions")

    estimates = ConditionalAttribute(
        enabled=True,
        doc="Internal classifier estimates the most recent " +
        "predictions are based on")

    training_time = ConditionalAttribute(
        enabled=True, doc="Time (in seconds) which took classifier to train")

    predicting_time = ConditionalAttribute(
        enabled=True, doc="Time (in seconds) which took classifier to predict")

    feature_ids = ConditionalAttribute(
        enabled=False,
        doc="Feature IDS which were used for the actual training.")

    __tags__ = []
    """Describes some specifics about the classifier -- is that it is
    doing regression for instance...."""

    targets_attr = Parameter(
        'targets',
        allowedtype='bool',  # ro=True,
        doc="""What samples attribute to use as targets.""",
        index=999)

    # TODO: make it available only for actually retrainable classifiers
    retrainable = Parameter(
        False,
        allowedtype='bool',
        doc="""Either to enable retraining for 'retrainable' classifier.""",
        index=1002)

    def __init__(self, **kwargs):
        ClassWithCollections.__init__(self, **kwargs)

        # XXX
        # the place to map literal to numerical labels (and back)
        # this needs to be in the base class, since some classifiers also
        # have this nasty 'regression' mode, and the code in this class
        # needs to deal with converting the regression output into discrete
        # labels
        # however, preferably the mapping should be kept in the respective
        # low-level implementations that need it
        self._attrmap = AttributeMap()

        self.__trainednfeatures = None
        """Stores number of features for which classifier was trained.
        If None -- it wasn't trained at all"""

        self._set_retrainable(self.params.retrainable, force=True)

        # deprecate
        #self.__trainedidhash = None
        #"""Stores id of the dataset on which it was trained to signal
        #in trained() if it was trained already on the same dataset"""

    @property
    def __summary_class__(self):
        if 'regression' in self.__tags__:
            return RegressionStatistics
        else:
            return ConfusionMatrix

    @property
    def __is_regression__(self):
        return 'regression' in self.__tags__

    def __str__(self):
        if __debug__ and 'CLF_' in debug.active:
            return "%s / %s" % (repr(self), super(Classifier, self).__str__())
        else:
            return repr(self)

    def __repr__(self, prefixes=[]):
        return super(Classifier, self).__repr__(prefixes=prefixes)

    def _pretrain(self, dataset):
        """Functionality prior to training
        """
        # So we reset all conditional attributes and may be free up some memory
        # explicitly
        params = self.params
        if not params.retrainable:
            self.untrain()
        else:
            # just reset the ca, do not untrain
            self.ca.reset()
            if not self.__changedData_isset:
                self.__reset_changed_data()
                _changedData = self._changedData
                __idhashes = self.__idhashes
                __invalidatedChangedData = self.__invalidatedChangedData

                # if we don't know what was changed we need to figure
                # them out
                if __debug__:
                    debug('CLF_', "IDHashes are %s" % (__idhashes))

                # Look at the data if any was changed
                for key, data_ in (('traindata', dataset.samples),
                                   ('targets',
                                    dataset.sa[params.targets_attr].value)):
                    _changedData[key] = self.__was_data_changed(key, data_)
                    # if those idhashes were invalidated by retraining
                    # we need to adjust _changedData accordingly
                    if __invalidatedChangedData.get(key, False):
                        if __debug__ and not _changedData[key]:
                            debug(
                                'CLF_', 'Found that idhash for %s was '
                                'invalidated by retraining' % key)
                        _changedData[key] = True

                # Look at the parameters
                for col in self._paramscols:
                    changedParams = self._collections[col].which_set()
                    if len(changedParams):
                        _changedData[col] = changedParams

                self.__invalidatedChangedData = {}  # reset it on training

                if __debug__:
                    debug('CLF_',
                          "Obtained _changedData is %s" % (self._changedData))

    def _posttrain(self, dataset):
        """Functionality post training

        For instance -- computing confusion matrix.

        Parameters
        ----------
        dataset : Dataset
          Data which was used for training
        """
        ca = self.ca
        if ca.is_enabled('trained_targets'):
            ca.trained_targets = dataset.sa[self.params.targets_attr].unique

        ca.trained_dataset = dataset
        ca.trained_nsamples = dataset.nsamples

        # needs to be assigned first since below we use predict
        self.__trainednfeatures = dataset.nfeatures

        if __debug__ and 'CHECK_TRAINED' in debug.active:
            self.__trainedidhash = dataset.idhash

        if self.ca.is_enabled('training_confusion') and \
               not self.ca.is_set('training_confusion'):
            # we should not store predictions for training data,
            # it is confusing imho (yoh)
            self.ca.change_temporarily(disable_ca=["predictions"])
            if self.params.retrainable:
                # we would need to recheck if data is the same,
                # XXX think if there is a way to make this all
                # efficient. For now, probably, retrainable
                # classifiers have no chance but not to use
                # training_confusion... sad
                self.__changedData_isset = False
            predictions = self.predict(dataset)
            self.ca.reset_changed_temporarily()
            self.ca.training_confusion = self.__summary_class__(
                targets=dataset.sa[self.params.targets_attr].value,
                predictions=predictions)

        if self.ca.is_enabled('feature_ids'):
            self.ca.feature_ids = self._get_feature_ids()

    ##REF: Name was automagically refactored
    def _get_feature_ids(self):
        """Virtual method to return feature_ids used while training

        Is not intended to be called anywhere but from _posttrain,
        thus classifier is assumed to be trained at this point
        """
        # By default all features are used
        return range(self.__trainednfeatures)

    def summary(self):
        """Providing summary over the classifier"""

        s = "Classifier %s" % self
        ca = self.ca
        ca_enabled = ca.enabled

        if self.trained:
            s += "\n trained"
            if ca.is_set('training_time'):
                s += ' in %.3g sec' % ca.training_time
            s += ' on data with'
            if ca.is_set('trained_targets'):
                s += ' targets:%s' % list(ca.trained_targets)

            nsamples, nchunks = None, None
            if ca.is_set('trained_nsamples'):
                nsamples = ca.trained_nsamples
            if ca.is_set('trained_dataset'):
                td = ca.trained_dataset
                nsamples, nchunks = td.nsamples, len(td.sa['chunks'].unique)
            if nsamples is not None:
                s += ' #samples:%d' % nsamples
            if nchunks is not None:
                s += ' #chunks:%d' % nchunks

            s += " #features:%d" % self.__trainednfeatures
            if ca.is_set('feature_ids'):
                s += ", used #features:%d" % len(ca.feature_ids)
            if ca.is_set('training_confusion'):
                s += ", training error:%.3g" % ca.training_confusion.error
        else:
            s += "\n not yet trained"

        if len(ca_enabled):
            s += "\n enabled ca:%s" % ', '.join(
                [str(ca[x]) for x in ca_enabled])
        return s

    def clone(self):
        """Create full copy of the classifier.

        It might require classifier to be untrained first due to
        present SWIG bindings.

        TODO: think about proper re-implementation, without enrollment of deepcopy
        """
        if __debug__:
            debug("CLF", "Cloning %s#%s" % (self, id(self)))
        try:
            return deepcopy(self)
        except:
            self.untrain()
            return deepcopy(self)

    def _train(self, dataset):
        """Function to be actually overridden in derived classes
        """
        raise NotImplementedError

    def train(self, dataset):
        """Train classifier on a dataset

        Shouldn't be overridden in subclasses unless explicitly needed
        to do so
        """
        if dataset.nfeatures == 0 or dataset.nsamples == 0:
            raise DegenerateInputError, \
                  "Cannot train classifier on degenerate data %s" % dataset
        if __debug__:
            debug("CLF",
                  "Training classifier %(clf)s on dataset %(dataset)s",
                  msgargs={
                      'clf': self,
                      'dataset': dataset
                  })

        self._pretrain(dataset)

        # remember the time when started training
        t0 = time.time()

        if dataset.nfeatures > 0:

            result = self._train(dataset)
        else:
            warning("Trying to train on dataset with no features present")
            if __debug__:
                debug("CLF",
                      "No features present for training, no actual training " \
                      "is called")
            result = None

        self.ca.training_time = time.time() - t0
        self._posttrain(dataset)
        return result

    def _prepredict(self, dataset):
        """Functionality prior prediction
        """
        if not ('notrain2predict' in self.__tags__):
            # check if classifier was trained if that is needed
            if not self.trained:
                raise ValueError, \
                      "Classifier %s wasn't yet trained, therefore can't " \
                      "predict" % self
            nfeatures = dataset.nfeatures  #data.shape[1]
            # check if number of features is the same as in the data
            # it was trained on
            if nfeatures != self.__trainednfeatures:
                raise ValueError, \
                      "Classifier %s was trained on data with %d features, " % \
                      (self, self.__trainednfeatures) + \
                      "thus can't predict for %d features" % nfeatures

        if self.params.retrainable:
            if not self.__changedData_isset:
                self.__reset_changed_data()
                _changedData = self._changedData
                data = np.asanyarray(dataset.samples)
                _changedData['testdata'] = \
                                        self.__was_data_changed('testdata', data)
                if __debug__:
                    debug(
                        'CLF_', "prepredict: Obtained _changedData is %s" %
                        (_changedData))

    def _postpredict(self, dataset, result):
        """Functionality after prediction is computed
        """
        self.ca.predictions = result
        if self.params.retrainable:
            self.__changedData_isset = False

    def _predict(self, dataset):
        """Actual prediction
        """
        raise NotImplementedError

    @accepts_samples_as_dataset
    def predict(self, dataset):
        """Predict classifier on data

        Shouldn't be overridden in subclasses unless explicitly needed
        to do so. Also subclasses trying to call super class's predict
        should call _predict if within _predict instead of predict()
        since otherwise it would loop
        """
        ## ??? yoh: changed to asany from as without exhaustive check
        data = np.asanyarray(dataset.samples)
        if __debug__:
            debug("CLF",
                  "Predicting classifier %(clf)s on ds %(dataset)s",
                  msgargs={
                      'clf': self,
                      'dataset': dataset
                  })

        # remember the time when started computing predictions
        t0 = time.time()

        ca = self.ca
        # to assure that those are reset (could be set due to testing
        # post-training)
        ca.reset(['estimates', 'predictions'])

        self._prepredict(dataset)

        if self.__trainednfeatures > 0 \
               or 'notrain2predict' in self.__tags__:
            result = self._predict(dataset)
        else:
            warning(
                "Trying to predict using classifier trained on no features")
            if __debug__:
                debug("CLF",
                      "No features were present for training, prediction is " \
                      "bogus")
            result = [None] * data.shape[0]

        ca.predicting_time = time.time() - t0

        # with labels mapping in-place, we also need to go back to the
        # literal labels
        if self._attrmap:
            try:
                result = self._attrmap.to_literal(result)
            except KeyError, e:
                raise FailedToPredictError, \
                      "Failed to convert predictions from numeric into " \
                      "literals: %s" % e

        self._postpredict(dataset, result)
        return result
Пример #21
0
    def _train(self, dataset):
        """Train SVM
        """

        # XXX watchout
        # self.untrain()
        newkernel, newsvm = False, False
        # local bindings for faster lookup
        params = self.params
        retrainable = self.params.retrainable

        targets_sa_name = params.targets_attr  # name of targets sa
        targets_sa = dataset.sa[targets_sa_name]  # actual targets sa

        if retrainable:
            _changedData = self._changedData

        # LABELS
        ul = None
        self.__traindataset = dataset

        # OK -- we have to map labels since
        #  binary ones expect -1/+1
        #  Multiclass expect labels starting with 0, otherwise they puke
        #   when ran from ipython... yikes
        if __debug__:
            debug("SG_", "Creating labels instance")

        if self.__is_regression__:
            labels_ = np.asarray(targets_sa.value, dtype='double')
        else:
            ul = targets_sa.unique
            # ul.sort()

            if len(ul) == 2:
                # assure that we have -1/+1
                _labels_dict = {ul[0]: -1.0, ul[1]: +1.0}
            elif len(ul) < 2:
                raise FailedToTrainError, \
                      "We do not have 1-class SVM brought into SG yet"
            else:
                # can't use plain enumerate since we need them swapped
                _labels_dict = dict([(ul[i], i) for i in range(len(ul))])

            # Create SG-customized attrmap to assure -1 / +1 if necessary
            self._attrmap = AttributeMap(_labels_dict, mapnumeric=True)

            if __debug__:
                debug("SG__", "Mapping labels using dict %s" % _labels_dict)
            labels_ = self._attrmap.to_numeric(targets_sa.value).astype(float)

        labels = shogun.Features.Labels(labels_)
        _setdebug(labels, 'Labels')

        # KERNEL

        # XXX cruel fix for now... whole retraining business needs to
        # be rethought
        if retrainable:
            _changedData['kernel_params'] = _changedData.get(
                'kernel_params', False)

        if not retrainable \
               or _changedData['traindata'] or _changedData['kernel_params']:
            # If needed compute or just collect arguments for SVM and for
            # the kernel

            if retrainable and __debug__:
                if _changedData['traindata']:
                    debug(
                        "SG",
                        "Re-Creating kernel since training data has changed")

                if _changedData['kernel_params']:
                    debug(
                        "SG",
                        "Re-Creating kernel since params %s has changed" %
                        _changedData['kernel_params'])

            k = self.params.kernel
            k.compute(dataset)
            self.__kernel = kernel = k.as_raw_sg()

            newkernel = True
            self.kernel_params.reset()  # mark them as not-changed
            #_setdebug(kernel, 'Kernels')

            #self.__condition_kernel(kernel)
            if retrainable:
                if __debug__:
                    debug("SG_", "Resetting test kernel for retrainable SVM")
                self.__kernel_test = None

        # TODO -- handle _changedData['params'] correctly, ie without recreating
        # whole SVM
        Cs = None
        if not retrainable or self.__svm is None or _changedData['params']:
            # SVM
            if self.params.has_key('C'):
                Cs = self._get_cvec(dataset)

                # XXX do not jump over the head and leave it up to the user
                #     ie do not rescale automagically by the number of samples
                #if len(Cs) == 2 and not ('regression' in self.__tags__) and len(ul) == 2:
                #    # we were given two Cs
                #    if np.max(C) < 0 and np.min(C) < 0:
                #        # and both are requested to be 'scaled' TODO :
                #        # provide proper 'features' to the parameters,
                #        # so we could specify explicitely if to scale
                #        # them by the number of samples here
                #        nl = [np.sum(labels_ == _labels_dict[l]) for l in ul]
                #        ratio = np.sqrt(float(nl[1]) / nl[0])
                #        #ratio = (float(nl[1]) / nl[0])
                #        Cs[0] *= ratio
                #        Cs[1] /= ratio
                #        if __debug__:
                #            debug("SG_", "Rescaled Cs to %s to accomodate the "
                #                  "difference in number of training samples" %
                #                  Cs)

            # Choose appropriate implementation
            svm_impl_class = self.__get_implementation(ul)

            if __debug__:
                debug("SG", "Creating SVM instance of %s" % ` svm_impl_class `)

            if self._svm_impl in ['libsvr', 'svrlight']:
                # for regressions constructor a bit different
                self.__svm = svm_impl_class(Cs[0], self.params.tube_epsilon,
                                            self.__kernel, labels)
                # we need to set epsilon explicitly
                self.__svm.set_epsilon(self.params.epsilon)
            elif self._svm_impl in ['krr']:
                self.__svm = svm_impl_class(self.params.tau, self.__kernel,
                                            labels)
            else:
                self.__svm = svm_impl_class(Cs[0], self.__kernel, labels)
                self.__svm.set_epsilon(self.params.epsilon)

            # Set shrinking
            if 'shrinking' in params:
                shrinking = params.shrinking
                if __debug__:
                    debug("SG_", "Setting shrinking to %s" % shrinking)
                self.__svm.set_shrinking_enabled(shrinking)

            if Cs is not None and len(Cs) == 2:
                if __debug__:
                    debug(
                        "SG_",
                        "Since multiple Cs are provided: %s, assign them" % Cs)
                self.__svm.set_C(Cs[0], Cs[1])

            self.params.reset()  # mark them as not-changed
            newsvm = True
            _setdebug(self.__svm, 'SVM')
            # Set optimization parameters
            if self.params.has_key('tube_epsilon') and \
                   hasattr(self.__svm, 'set_tube_epsilon'):
                self.__svm.set_tube_epsilon(self.params.tube_epsilon)
            self.__svm.parallel.set_num_threads(self.params.num_threads)
        else:
            if __debug__:
                debug("SG_", "SVM instance is not re-created")
            if _changedData['targets']:  # labels were changed
                if __debug__: debug("SG__", "Assigning new labels")
                self.__svm.set_labels(labels)
            if newkernel:  # kernel was replaced
                if __debug__: debug("SG__", "Assigning new kernel")
                self.__svm.set_kernel(self.__kernel)
            assert (_changedData['params'] is False
                    )  # we should never get here

        if retrainable:
            # we must assign it only if it is retrainable
            self.ca.retrained = not newsvm or not newkernel

        # Train
        if __debug__ and 'SG' in debug.active:
            if not self.__is_regression__:
                lstr = " with labels %s" % targets_sa.unique
            else:
                lstr = ""
            debug(
                "SG", "%sTraining %s on data%s" %
                (("", "Re-")[retrainable and self.ca.retrained], self, lstr))

        self.__svm.train()

        if __debug__:
            debug("SG_", "Done training SG_SVM %s" % self)

        # Report on training
        if (__debug__ and 'SG__' in debug.active) or \
           self.ca.is_enabled('training_confusion'):
            if __debug__:
                debug("SG_", "Assessing predictions on training data")
            trained_targets = self.__svm.classify().get_labels()

        else:
            trained_targets = None

        if __debug__ and "SG__" in debug.active:
            debug(
                "SG__", "Original labels: %s, Trained labels: %s" %
                (targets_sa.value, trained_targets))

        # Assign training confusion right away here since we are ready
        # to do so.
        # XXX TODO use some other conditional attribute like 'trained_targets' and
        #     use it within base Classifier._posttrain to assign predictions
        #     instead of duplicating code here
        # XXX For now it can be done only for regressions since labels need to
        #     be remapped and that becomes even worse if we use regression
        #     as a classifier so mapping happens upstairs
        if self.__is_regression__ and self.ca.is_enabled('training_confusion'):
            self.ca.training_confusion = self.__summary_class__(
                targets=targets_sa.value, predictions=trained_targets)
Пример #22
0
def test_attrmap():
    map_default = {'eins': 0, 'zwei': 2, 'sieben': 1}
    map_custom = {'eins': 11, 'zwei': 22, 'sieben': 33}
    literal = ['eins', 'zwei', 'sieben', 'eins', 'sieben', 'eins']
    literal_nonmatching = ['uno', 'dos', 'tres']
    num_default = [0, 2, 1, 0, 1, 0]
    num_custom = [11, 22, 33, 11, 33, 11]

    # no custom mapping given
    am = AttributeMap()
    assert_false(am)
    ok_(len(am) == 0)
    assert_array_equal(am.to_numeric(literal), num_default)
    assert_array_equal(am.to_literal(num_default), literal)
    ok_(am)
    ok_(len(am) == 3)

    #
    # Tests for recursive mapping + preserving datatype
    class myarray(np.ndarray):
        pass

    assert_raises(KeyError, am.to_literal, [(1, 2), 2, 0])
    literal_fancy = [(1, 2), 2, [0], np.array([0, 1]).view(myarray)]
    literal_fancy_tuple = tuple(literal_fancy)
    literal_fancy_array = np.array(literal_fancy, dtype=object)

    for l in (literal_fancy, literal_fancy_tuple,
              literal_fancy_array):
        res = am.to_literal(l, recurse=True)
        assert_equal(res[0], ('sieben', 'zwei'))
        assert_equal(res[1], 'zwei')
        assert_equal(res[2], ['eins'])
        assert_array_equal(res[3], ['eins', 'sieben'])

        # types of result and subsequences should be preserved
        ok_(isinstance(res, l.__class__))
        ok_(isinstance(res[0], tuple))
        ok_(isinstance(res[1], str))
        ok_(isinstance(res[2], list))
        ok_(isinstance(res[3], myarray))

    # yet another example
    a = np.empty(1, dtype=object)
    a[0] = (0, 1)
    res = am.to_literal(a, recurse=True)
    ok_(isinstance(res[0], tuple))

    #
    # with custom mapping
    am = AttributeMap(map=map_custom)
    assert_array_equal(am.to_numeric(literal), num_custom)
    assert_array_equal(am.to_literal(num_custom), literal)

    # if not numeric nothing is mapped
    assert_array_equal(am.to_numeric(num_custom), num_custom)
    # even if the map doesn't fit
    assert_array_equal(am.to_numeric(num_default), num_default)

    # need to_numeric first
    am = AttributeMap()
    assert_raises(RuntimeError, am.to_literal, [1,2,3])
    # stupid args
    assert_raises(ValueError, AttributeMap, map=num_custom)

    # map mismatch
    am = AttributeMap(map=map_custom)
    if __debug__:
        # checked only in __debug__
        assert_raises(KeyError, am.to_numeric, literal_nonmatching)
    # needs reset and should work afterwards
    am.clear()
    assert_array_equal(am.to_numeric(literal_nonmatching), [2, 0, 1])
    # and now reverse
    am = AttributeMap(map=map_custom)
    assert_raises(KeyError, am.to_literal, num_default)

    # dict-like interface
    am = AttributeMap()

    ok_([(k, v) for k, v in am.iteritems()] == [])
Пример #23
0
        except ValueError, e:
            print "Sorry - plotting of estimates isn't full supported for %s. " \
                  "Got exception %s" % (clf, e)
    elif maps == 'targets':
        map_values = attrmap.to_numeric(predictions_new).reshape(x.shape)
        a.imshow(map_values.T, **imshow_kwargs)
        #CS = a.contour(x, y, map_values, vals, zorder=6,
        #               linestyles=linestyles, extent=extent, colors='k')

    # Plot regions belonging to the same pair of attribute given
    # (e.g. chunks) and targets attribute
    if regions:
        chunks_sa = dataset.sa[regions]
        chunks_lit = chunks_sa.value
        uchunks_lit = chunks_sa.value
        chunks_attrmap = AttributeMap(mapnumeric=True)
        chunks = chunks_attrmap.to_numeric(chunks_lit)
        uchunks = chunks_attrmap.to_numeric(uchunks_lit)

        from matplotlib.delaunay.triangulate import Triangulation
        from matplotlib.patches import Polygon
        # Lets figure out convex halls for each chunk/label pair
        for target in utargets:
            t_mask = targets == target
            for chunk in uchunks:
                tc_mask = np.logical_and(t_mask, chunk == chunks)
                tc_samples = dataset.samples[tc_mask]
                tr = Triangulation(tc_samples[:, 0], tc_samples[:, 1])
                poly = pl.fill(
                    tc_samples[tr.hull, 0],
                    tc_samples[tr.hull, 1],