Beispiel #1
0
        class MyNode(Node):
            some_sa = ConditionalAttribute(enabled=True)
            some_fa = ConditionalAttribute(enabled=True)
            some_complex = ConditionalAttribute(enabled=True)

            def _call(self, ds):
                return Dataset(np.zeros(ds.shape))
Beispiel #2
0
 def test_deep_copying_state_variable(self):
     for v in (True, False):
         sv = ConditionalAttribute(enabled=v, doc="Testing")
         sv.enabled = not v
         sv_dc = copy.deepcopy(sv)
         if not (__debug__ and 'ENFORCE_CA_ENABLED' in debug.active):
             self.assertEqual(sv.enabled, sv_dc.enabled)
         self.assertEqual(sv.name, sv_dc.name)
         self.assertEqual(sv._instance_index, sv_dc._instance_index)
Beispiel #3
0
 def test_deep_copying_state_variable(self):
     for v in (True, False):
         sv = ConditionalAttribute(enabled=v,
                            doc="Testing")
         sv.enabled = not v
         sv_dc = copy.deepcopy(sv)
         self.failUnlessEqual(sv.enabled, sv_dc.enabled)
         self.failUnlessEqual(sv.name, sv_dc.name)
         self.failUnlessEqual(sv._instance_index, sv_dc._instance_index)
Beispiel #4
0
 def test_deep_copying_state_variable(self):
     for v in (True, False):
         sv = ConditionalAttribute(enabled=v, doc="Testing")
         sv.enabled = not v
         sv_dc = copy.deepcopy(sv)
         if not (__debug__ and "ENFORCE_CA_ENABLED" in debug.active):
             self.assertEqual(sv.enabled, sv_dc.enabled)
         self.assertEqual(sv.name, sv_dc.name)
         self.assertEqual(sv._instance_index, sv_dc._instance_index)
Beispiel #5
0
    def _set_retrainable(self, value, force=False):
        """Assign value of retrainable parameter

        If retrainable flag is to be changed, classifier has to be
        untrained.  Also internal attributes such as _changedData,
        __changedData_isset, and __idhashes should be initialized if
        it becomes retrainable
        """
        pretrainable = self.params['retrainable']
        if (force or value != pretrainable.value) \
               and 'retrainable' in self.__tags__:
            if __debug__:
                debug("CLF_", "Setting retrainable to %s" % value)
            if 'meta' in self.__tags__:
                warning("Retrainability is not yet crafted/tested for "
                        "meta classifiers. Unpredictable behavior might occur")
            # assure that we don't drag anything behind
            if self.trained:
                self.untrain()
            ca = self.ca
            if not value and ca.has_key('retrained'):
                ca.pop('retrained')
                ca.pop('repredicted')
            if value:
                if not 'retrainable' in self.__tags__:
                    warning("Setting of flag retrainable for %s has no effect"
                            " since classifier has no such capability. It would"
                            " just lead to resources consumption and slowdown"
                            % self)
                ca['retrained'] = ConditionalAttribute(enabled=True,
                        doc="Either retrainable classifier was retrained")
                ca['repredicted'] = ConditionalAttribute(enabled=True,
                        doc="Either retrainable classifier was repredicted")

            pretrainable.value = value

            # if retrainable we need to keep track of things
            if value:
                self.__idhashes = {'traindata': None, 'targets': None,
                                   'testdata': None} #, 'testtraindata': None}
                if __debug__ and 'CHECK_RETRAIN' in debug.active:
                    # ??? it is not clear though if idhash is faster than
                    # simple comparison of (dataset != __traineddataset).any(),
                    # but if we like to get rid of __traineddataset then we
                    # should use idhash anyways
                    self.__trained = self.__idhashes.copy() # just same Nones
                self.__reset_changed_data()
                self.__invalidatedChangedData = {}
            elif 'retrainable' in self.__tags__:
                #self.__reset_changed_data()
                self.__changedData_isset = False
                self._changedData = None
                self.__idhashes = None
                if __debug__ and 'CHECK_RETRAIN' in debug.active:
                    self.__trained = None
Beispiel #6
0
class ProxyClassifierSensitivityAnalyzer(Sensitivity):
    """Set sensitivity analyzer output just to pass through"""

    clf_sensitivities = ConditionalAttribute(
        enabled=False, doc="Stores sensitivities of the proxied classifier")

    @group_kwargs(prefixes=['slave_'], assign=True)
    def __init__(self, clf, analyzer=None, **kwargs):
        """Initialize Sensitivity Analyzer for `BoostedClassifier`
        """
        Sensitivity.__init__(self, clf, **kwargs)
        # _slave_kwargs is assigned due to assign=True in @group_kwargs
        if analyzer is not None and len(self._slave_kwargs):
            raise ValueError, \
                  "Provide either analyzer of slave_* arguments, not both"

        # Do not force_train slave sensitivity since the dataset might
        # be inappropriate -- rely on the classifier being trained by
        # the extraction by the meta classifier itself
        self._slave_kwargs = _dont_force_slaves(self._slave_kwargs)

        self.__analyzer = analyzer
        """Analyzer to use for basic classifiers within boosted classifier"""

    def _untrain(self):
        super(ProxyClassifierSensitivityAnalyzer, self)._untrain()
        if self.__analyzer is not None:
            self.__analyzer.untrain()

    def _call(self, dataset):
        # OPT: local bindings
        clfclf = self.clf.clf
        analyzer = self.__analyzer

        if analyzer is None:
            analyzer = clfclf.get_sensitivity_analyzer(**(self._slave_kwargs))
            if analyzer is None:
                raise ValueError, \
                      "Wasn't able to figure basic analyzer for clf %s" % \
                      `clfclf`
            if __debug__:
                debug("SA", "Selected analyzer %s for clf %s" % \
                      (analyzer, clfclf))
            # bind to the instance finally
            self.__analyzer = analyzer

        # TODO "remove" unnecessary things below on each call...
        # assign corresponding classifier
        analyzer.clf = clfclf

        # if clf was trained already - don't train again
        if clfclf.trained:
            analyzer._force_train = False

        result = analyzer._call(dataset)
        self.ca.clf_sensitivities = result

        return result

    analyzer = property(fget=lambda x: x.__analyzer)
Beispiel #7
0
class TestClassParametrized(TestClassProper, ClassWithCollections):
    p1 = Parameter(0)
    state0 = ConditionalAttribute(enabled=False)

    def __init__(self, **kwargs):
        # XXX make such example when we actually need to invoke
        # constructor
        # TestClassProper.__init__(self, **kwargs)
        ClassWithCollections.__init__(self, **kwargs)
Beispiel #8
0
def test_ca_col(f, backend):
    from mvpa2.base.state import ConditionalAttributesCollection, ConditionalAttribute
    c1 = ConditionalAttribute(name='ca1', enabled=True)
    #c2 = ConditionalAttribute(name='test2', enabled=True)
    col = ConditionalAttributesCollection([c1], name='whoknows')
    col.ca1 = col  # {0: c1, 1: [None, col]}  # nest badly
    assert_true(col.ca1 is col)
    col_ = saveload(col, f, backend=backend)
    # seems to work niceish with pickle
    #print col_, col_.ca1, col_.ca1.ca1, col_.ca1.ca1.ca1
    assert_true(col_.ca1.ca1 is col_.ca1)
    # but even there top-level assignment test fails, which means it creates two
    # instances
    if backend != 'pickle':
        assert_true(col_.ca1 is col_)
Beispiel #9
0
class GPRLinearWeights(Sensitivity):
    """`SensitivityAnalyzer` that reports the weights GPR trained
    on a given `Dataset`.

    In case of LinearKernel compute explicitly the coefficients
    of the linear regression, together with their variances (if
    requested).

    Note that the intercept is not computed.
    """

    variances = ConditionalAttribute(enabled=False,
        doc="Variances of the weights (for GeneralizedLinearKernel)")

    _LEGAL_CLFS = [ GPR ]


    def _call(self, dataset):
        """Extract weights from GPR
        """

        clf = self.clf
        kernel = clf.kernel
        train_fv = clf._train_fv
        if isinstance(kernel, LinearKernel):
            Sigma_p = 1.0
        else:
            Sigma_p = kernel.params.Sigma_p

        weights = Ndot(Sigma_p,
                        Ndot(train_fv.T, clf._alpha))

        if self.ca.is_enabled('variances'):
            # super ugly formulas that can be quite surely improved:
            tmp = np.linalg.inv(clf._L)
            Kyinv = Ndot(tmp.T, tmp)
            # XXX in such lengthy matrix manipulations you might better off
            #     using np.matrix where * is a matrix product
            self.ca.variances = Ndiag(
                Sigma_p -
                Ndot(Sigma_p,
                      Ndot(train_fv.T,
                            Ndot(Kyinv,
                                  Ndot(train_fv, Sigma_p)))))
        return Dataset(np.atleast_2d(weights))
Beispiel #10
0
class SensitivityBasedFeatureSelection(FeatureSelection):
    """Feature elimination.

    A `FeaturewiseMeasure` is used to compute sensitivity maps given a certain
    dataset. These sensitivity maps are in turn used to discard unimportant
    features.
    """

    sensitivity = ConditionalAttribute(enabled=False)

    def __init__(self,
                 sensitivity_analyzer,
                 feature_selector=FractionTailSelector(0.05),
                 train_analyzer=True,
                 **kwargs
                 ):
        """Initialize feature selection

        Parameters
        ----------
        sensitivity_analyzer : FeaturewiseMeasure
          sensitivity analyzer to come up with sensitivity
        feature_selector : Functor
          Given a sensitivity map it has to return the ids of those
          features that should be kept.
        train_analyzer : bool
          Flag whether to train the sensitivity analyzer on the input dataset
          during train(). If False, the employed sensitivity measure has to be
          already trained before.
        """

        # base init first
        FeatureSelection.__init__(self, **kwargs)

        self.__sensitivity_analyzer = sensitivity_analyzer
        """Sensitivity analyzer to use once"""

        self.__feature_selector = feature_selector
        """Functor which takes care about removing some features."""

        self.__train_analyzer = train_analyzer


    def _train(self, dataset):
        """Select the most important features

        Parameters
        ----------
        dataset : Dataset
          used to compute sensitivity maps
        """
        # optionally train the analyzer first
        if self.__train_analyzer:
            self.__sensitivity_analyzer.train(dataset)

        sensitivity = self.__sensitivity_analyzer(dataset)
        """Compute the sensitivity map."""

        self.ca.sensitivity = sensitivity

        # Select features to preserve
        selected_ids = self.__feature_selector(sensitivity)

        if __debug__:
            debug("FS_", "Sensitivity: %s Selected ids: %s" %
                  (sensitivity, selected_ids))

        # XXX not sure if it really has to be sorted
        selected_ids.sort()
        # announce desired features to the underlying slice mapper
        self._safe_assign_slicearg(selected_ids)
        # and perform its own training
        super(SensitivityBasedFeatureSelection, self)._train(dataset)


    def _untrain(self):
        if __debug__:
            debug("FS_", "Untraining sensitivity-based FS: %s" % self)
        self.__sensitivity_analyzer.untrain()
        # ask base class to do its untrain
        super(SensitivityBasedFeatureSelection, self)._untrain()

    # make it accessible from outside
    sensitivity_analyzer = property(fget=lambda self:self.__sensitivity_analyzer,
                                    doc="Measure which was used to do selection")
Beispiel #11
0
class IterativeFeatureSelection(FeatureSelection):
    """
    """
    errors = ConditionalAttribute(
        doc="History of errors")
    nfeatures = ConditionalAttribute(
        doc="History of # of features left")

    def __init__(self,
                 fmeasure,
                 pmeasure,
                 splitter,
                 fselector,
                 stopping_criterion=NBackHistoryStopCrit(BestDetector()),
                 bestdetector=BestDetector(),
                 train_pmeasure=True,
                 # XXX should we may be guard splitter so we do not end up
                 # with inappropriate one for the use, i.e. which
                 # generates more than 2 splits
                 # guard_splitter=True,
                 **kwargs
                 ):
        """
        Parameters
        ----------
        fmeasure : Measure
          Computed for each candidate feature selection. The measure has
          to compute a scalar value.
        pmeasure : Measure
          Compute against a test dataset for each incremental feature
          set.
        splitter: Splitter
          This splitter instance has to generate at least one dataset split
          when called with the input dataset that is used to compute the
          per-feature criterion for feature selection.
        bestdetector : Functor
          Given a list of error values it has to return a boolean that
          signals whether the latest error value is the total minimum.
        stopping_criterion : Functor
          Given a list of error values it has to return whether the
          criterion is fulfilled.
        fselector : Functor
        train_clf : bool
          Flag whether the classifier in `transfer_error` should be
          trained before computing the error. In general this is
          required, but if the `sensitivity_analyzer` and
          `transfer_error` share and make use of the same classifier it
          can be switched off to save CPU cycles. Default `None` checks
          if sensitivity_analyzer is based on a classifier and doesn't train
          if so.
        """
        # bases init first
        FeatureSelection.__init__(self, **kwargs)

        self._fmeasure = fmeasure
        self._pmeasure = pmeasure
        self._splitter = splitter
        self._fselector = fselector
        self._stopping_criterion = stopping_criterion
        self._bestdetector = bestdetector
        self._train_pmeasure = train_pmeasure


    def _untrain(self):
        if __debug__:
            debug("FS_", "Untraining Iterative FS: %s" % self)
        self._fmeasure.untrain()
        self._pmeasure.untrain()
        # ask base class to do its untrain
        super(IterativeFeatureSelection, self)._untrain()


    def _evaluate_pmeasure(self, train, test):
        # local binding
        pmeasure = self._pmeasure
        # might safe some cycles to prevent training the measure, but only
        # the user can know whether this is sensible or possible
        if self._train_pmeasure:
            pmeasure.train(train)
        # actually run the performance measure to estimate "quality" of
        # selection
        return pmeasure(test)


    def _get_traintest_ds(self, ds):
        # activate the dataset splitter
        dsgen = self._splitter.generate(ds)
        # and derived the dataset part that is used for computing the selection
        # criterion
        trainds = dsgen.next()
        testds = dsgen.next()
        return trainds, testds

    # access properties
    fmeasure = property(fget=lambda self: self._fmeasure)
    pmeasure = property(fget=lambda self: self._pmeasure)
    splitter = property(fget=lambda self: self._splitter)
    fselector = property(fget=lambda self: self._fselector)
    stopping_criterion = property(fget=lambda self: self._stopping_criterion)
    bestdetector = property(fget=lambda self: self._bestdetector)
    train_pmeasure = property(fget=lambda self: self._train_pmeasure)
Beispiel #12
0
class BLR(Classifier):
    """Bayesian Linear Regression (BLR).

    """

    predicted_variances = ConditionalAttribute(
        enabled=False, doc="Variance per each predicted value")

    log_marginal_likelihood = ConditionalAttribute(
        enabled=False, doc="Log Marginal Likelihood")

    __tags__ = ['blr', 'regression', 'linear']

    def __init__(self, sigma_p=None, sigma_noise=1.0, **kwargs):
        """Initialize a BLR regression analysis.

        Parameters
        ----------
        sigma_noise : float
          the standard deviation of the gaussian noise.
          (Defaults to 0.1)

        """
        # init base class first
        Classifier.__init__(self, **kwargs)

        # pylint happiness
        self.w = None

        # It does not make sense to calculate a confusion matrix for a
        # BLR:
        self.ca.enable('training_stats', False)

        # set the prior on w: N(0,sigma_p) , specifying the covariance
        # sigma_p on w:
        self.sigma_p = sigma_p

        # set noise level:
        self.sigma_noise = sigma_noise

        self.ca.predicted_variances = None
        self.ca.log_marginal_likelihood = None
        # Yarik: what was those about??? just for future in
        #        compute_log_marginal_likelihood ?
        # self.targets = None
        pass

    def __repr__(self):
        """String summary of the object
        """
        return """BLR(w=%s, sigma_p=%s, sigma_noise=%f, enable_ca=%s)""" % \
               (self.w, self.sigma_p, self.sigma_noise, str(self.ca.enabled))

    def compute_log_marginal_likelihood(self):
        """
        Compute log marginal likelihood using self.train_fv and self.targets.
        """
        # log_marginal_likelihood = None
        # return log_marginal_likelihood
        raise NotImplementedError

    def _train(self, data):
        """Train regression using `data` (`Dataset`).
        """
        # BLR relies on numerical labels
        train_labels = self._attrmap.to_numeric(
            data.sa[self.get_space()].value)
        # provide a basic (i.e. identity matrix) and correct prior
        # sigma_p, if not provided before or not compliant to 'data':
        if self.sigma_p == None:  # case: not provided
            self.sigma_p = np.eye(data.samples.shape[1] + 1)
        elif self.sigma_p.shape[1] != (data.samples.shape[1] +
                                       1):  # case: wrong dimensions
            self.sigma_p = np.eye(data.samples.shape[1] + 1)
        else:
            # ...then everything is OK :)
            pass

        # add one fake column of '1.0' to model the intercept:
        self.samples_train = np.hstack(
            [data.samples, np.ones((data.samples.shape[0], 1))])
        if type(self.sigma_p) != type(
                self.samples_train):  # if sigma_p is a number...
            self.sigma_p = np.eye(self.samples_train.shape[1]
                                  ) * self.sigma_p  # convert in matrix
            pass

        self.A_inv = np.linalg.inv(
            1.0 / (self.sigma_noise**2) *
            np.dot(self.samples_train.T, self.samples_train) +
            np.linalg.inv(self.sigma_p))
        self.w = 1.0 / (self.sigma_noise**2) * np.dot(
            self.A_inv, np.dot(self.samples_train.T, train_labels))
        pass

    @accepts_dataset_as_samples
    def _predict(self, data):
        """
        Predict the output for the provided data.
        """

        data = np.hstack([data, np.ones((data.shape[0], 1), dtype=data.dtype)])
        predictions = np.dot(data, self.w)

        if self.ca.is_enabled('predicted_variances'):
            # do computation only if conditional attribute was enabled
            self.ca.predicted_variances = np.dot(
                data, np.dot(self.A_inv, data.T)).diagonal()[:, np.newaxis]
        self.ca.estimates = predictions
        return predictions

    def set_hyperparameters(self, *args):
        """
        Set hyperparameters' values.

        Note that this is a list so the order of the values is
        important.
        """
        args = args[0]
        self.sigma_noise = args[0]
        if len(args) > 1:
            self.sigma_p = np.array(args[1:])  # XXX check if this is ok
            pass
        return

    pass
Beispiel #13
0
class DistPValue(ClassWithCollections):
    """Converts values into p-values under vague and non-scientific assumptions
    """

    nulldist_number = ConditionalAttribute(
        enabled=True,
        doc="Number of features within the estimated null-distribution")

    positives_recovered = ConditionalAttribute(
        enabled=True,
        doc=
        "Number of features considered to be positives and which were recovered"
    )

    def __init__(self,
                 sd=0,
                 distribution='rdist',
                 fpp=None,
                 nbins=400,
                 **kwargs):
        """L2-Norm the values, convert them to p-values of a given distribution.

        Parameters
        ----------
        sd : int
          Samples dimension (if len(x.shape)>1) on which to operate
        distribution : string
          Which distribution to use. Known are: 'rdist' (later normal should
          be there as well)
        fpp : float
          At what p-value (both tails) if not None, to control for false
          positives. It would iteratively prune the tails (tentative real positives)
          until empirical p-value becomes less or equal to numerical.
        nbins : int
          Number of bins for the iterative pruning of positives

        WARNING: Highly experimental/slow/etc: no theoretical grounds have been
        presented in any paper, nor proven
        """
        externals.exists('scipy', raise_=True)
        ClassWithCollections.__init__(self, **kwargs)

        self.sd = sd
        if not (distribution in ['rdist']):
            raise ValueError, "Actually only rdist supported at the moment" \
                  " got %s" % distribution
        self.distribution = distribution
        self.fpp = fpp
        self.nbins = nbins

    def __call__(self, x):
        from mvpa2.support.scipy.stats import scipy
        import scipy.stats as stats

        # some local bindings
        distribution = self.distribution
        sd = self.sd
        fpp = self.fpp
        nbins = self.nbins

        x = np.asanyarray(x)
        shape_orig = x.shape
        ndims = len(shape_orig)

        # (very) old numpy had different format of returned bins --
        # there were not edges but center points.  We care here about
        # center points, so we will transform edge points into center
        # points for newer versions of numpy
        numpy_center_points = externals.versions['numpy'] < (1, 1)

        # XXX May be just utilize OverAxis transformer?
        if ndims > 2:
            raise NotImplementedError, \
                  "TODO: add support for more than 2 dimensions"
        elif ndims == 1:
            x, sd = x[:, np.newaxis], 0

        # lets transpose for convenience
        if sd == 0: x = x.T

        # Output p-values of x in null-distribution
        pvalues = np.zeros(x.shape)
        nulldist_number, positives_recovered = [], []

        # finally go through all data
        nd = x.shape[1]
        if __debug__:
            if nd < x.shape[0]:
                warning("Number of features in DistPValue lower than number of"
                        " items -- may be incorrect sd=%d was provided" % sd)
        for i, xx in enumerate(x):
            dist = stats.rdist(nd - 1, 0, 1)
            xx /= np.linalg.norm(xx)

            if fpp is not None:
                if __debug__:
                    debug('TRAN_', "starting adaptive adjustment i=%d" % i)

                # Adaptive adjustment for false negatives:
                Nxx, xxx, pN_emp_prev = len(xx), xx, 1.0
                Nxxx = Nxx
                indexes = np.arange(Nxx)
                """What features belong to Null-distribution"""
                while True:
                    hist, bins = np.histogram(xxx, bins=nbins, normed=False)
                    pdf = hist.astype(float) / Nxxx
                    if not numpy_center_points:
                        # if we obtain edge points for bins -- take centers
                        bins = 0.5 * (bins[0:-1] + bins[1:])
                    bins_halfstep = (bins[1] - bins[2]) / 2.0

                    # theoretical CDF
                    # was really unstable -- now got better ;)
                    dist_cdf = dist.cdf(bins)

                    # otherwise just recompute manually
                    # dist_pdf = dist.pdf(bins)
                    # dist_pdf /= np.sum(dist_pdf)

                    # XXX can't recall the function... silly
                    #     probably could use np.integrate
                    cdf = np.zeros(nbins, dtype=float)
                    #dist_cdf = cdf.copy()
                    dist_prevv = cdf_prevv = 0.0
                    for j in range(nbins):
                        cdf_prevv = cdf[j] = cdf_prevv + pdf[j]
                        #dist_prevv = dist_cdf[j] = dist_prevv + dist_pdf[j]

                    # what bins fall into theoretical 'positives' in both tails
                    p = (0.5 - np.abs(dist_cdf - 0.5) < fpp / 2.0)

                    # amount in empirical tails -- if we match theoretical, we
                    # should have total >= p

                    pN_emp = np.sum(pdf[p])  # / (1.0 * nbins)

                    if __debug__:
                        debug(
                            'TRAN_',
                            "empirical p=%.3f for theoretical p=%.3f" %
                            (pN_emp, fpp))

                    if pN_emp <= fpp:
                        # we are done
                        break

                    if pN_emp > pN_emp_prev:
                        if __debug__:
                            debug(
                                'TRAN_',
                                "Diverging -- thus keeping last result "
                                "with p=%.3f" % pN_emp_prev)
                        # we better restore previous result
                        indexes, xxx, dist = indexes_prev, xxx_prev, dist_prev
                        break

                    pN_emp_prev = pN_emp
                    # very silly way for now -- just proceed by 1 bin
                    keep = np.logical_and(
                        xxx > bins[0],  # + bins_halfstep,
                        xxx < bins[-1])  # - bins_halfstep)
                    if __debug__:
                        debug(
                            'TRAN_', "Keeping %d out of %d elements" %
                            (np.sum(keep), Nxxx))

                    # Preserve them if we need to "roll back"
                    indexes_prev, xxx_prev, dist_prev = indexes, xxx, dist

                    # we should remove those which we assume to be positives and
                    # which should not belong to Null-dist
                    xxx, indexes = xxx[keep], indexes[keep]
                    # L2 renorm it
                    xxx = xxx / np.linalg.norm(xxx)
                    Nxxx = len(xxx)
                    dist = stats.rdist(Nxxx - 1, 0, 1)

                Nindexes = len(indexes)
                Nrecovered = Nxx - Nindexes

                nulldist_number += [Nindexes]
                positives_recovered += [Nrecovered]

                if __debug__:
                    if distribution == 'rdist':
                        assert (dist.args[0] == Nindexes - 1)
                    debug(
                        'TRAN',
                        "Positives recovery finished with %d out of %d "
                        "entries in Null-distribution, thus %d positives "
                        "were recovered" % (Nindexes, Nxx, Nrecovered))

                # And now we need to perform our duty -- assign p-values
                #dist = stats.rdist(Nindexes-1, 0, 1)
            pvalues[i, :] = dist.cdf(xx)

        # XXX we might add an option to transform it to z-scores?
        result = pvalues

        # charge conditional attributes
        # XXX might want to populate them for non-adaptive handling as well
        self.ca.nulldist_number = nulldist_number
        self.ca.positives_recovered = positives_recovered

        # transpose if needed
        if sd == 0:
            result = result.T

        return result
Beispiel #14
0
class TestClassProper(ClassWithCollections):

    state1 = ConditionalAttribute(enabled=False, doc="state1 doc")
    state2 = ConditionalAttribute(enabled=True, doc="state2 doc")
Beispiel #15
0
class TestClassProperChild(TestClassProper):

    state4 = ConditionalAttribute(enabled=False, doc="state4 doc")
class MixedClass(ClassWithCollections):
    C = Parameter(1.0, constraints=EnsureRange(min=0), doc="C parameter")
    D = Parameter(3.0, constraints=EnsureRange(min=0), doc="D parameter")
    state1 = ConditionalAttribute(doc="bogus")
Beispiel #17
0
class BaseSearchlight(Measure):
    """Base class for searchlights.

    The idea for a searchlight algorithm stems from a paper by
    :ref:`Kriegeskorte et al. (2006) <KGB06>`.
    """

    roi_sizes = ConditionalAttribute(enabled=False,
        doc="Number of features in each ROI.")

    roi_feature_ids = ConditionalAttribute(enabled=False,
        doc="Feature IDs for all generated ROIs.")

    roi_center_ids = ConditionalAttribute(enabled=True,
        doc="Center ID for all generated ROIs.")

    is_trained = True
    """Indicate that this measure is always trained."""


    def __init__(self, queryengine, roi_ids=None, nproc=None,
                 **kwargs):
        """
        Parameters
        ----------
        queryengine : QueryEngine
          Engine to use to discover the "neighborhood" of each feature.
          See :class:`~mvpa2.misc.neighborhood.QueryEngine`.
        roi_ids : None or list(int) or str
          List of query engine ids (e.g.,  feature ids, not coordinates, in case
          of `IndexQueryEngine`; and `node_indices` in case of
          `SurfaceQueryEngine`) that shall serve as ROI seeds
          (e.g., sphere centers). Alternatively, this can be the name of a
          feature attribute of the input dataset, whose non-zero values
          determine the feature ids (be careful to use it only with
          `IndexQueryEngine`).  By default all query engine ids will be used.
        nproc : None or int
          How many processes to use for computation.  Requires `pprocess`
          external module.  If None -- all available cores will be used.
        **kwargs
          In addition this class supports all keyword arguments of its
          base-class :class:`~mvpa2.measures.base.Measure`.
      """
        Measure.__init__(self, **kwargs)

        if nproc is not None and nproc > 1 and not externals.exists('pprocess'):
            raise RuntimeError("The 'pprocess' module is required for "
                               "multiprocess searchlights. Please either "
                               "install python-pprocess, or reduce `nproc` "
                               "to 1 (got nproc=%i) or set to default None"
                               % nproc)

        self._queryengine = queryengine
        if roi_ids is not None and not isinstance(roi_ids, str) \
                and not len(roi_ids):
            raise ValueError, \
                  "Cannot run searchlight on an empty list of roi_ids"
        self.__roi_ids = roi_ids
        self.nproc = nproc


    def __repr__(self, prefixes=None):
        """String representation of a `Measure`

        Includes only arguments which differ from default ones
        """
        if prefixes is None:
            prefixes = []
        return super(BaseSearchlight, self).__repr__(
            prefixes=prefixes
            + _repr_attrs(self, ['queryengine', 'roi_ids', 'nproc']))


    @due.dcite(
        Doi('10.1073/pnas.0600244103'),
        description="Searchlight analysis approach",
        tags=["implementation"])
    @due.dcite(
        Doi('10.1038/nrn1931'),
        description="Application of the searchlight approach to decoding using classifiers",
        tags=["use"])
    def _call(self, dataset):
        """Perform the ROI search.
        """
        # local binding
        nproc = self.nproc

        if nproc is None and externals.exists('pprocess'):
            import pprocess
            try:
                nproc = pprocess.get_number_of_cores() or 1
            except AttributeError:
                warning("pprocess version %s has no API to figure out maximal "
                        "number of cores. Using 1"
                        % externals.versions['pprocess'])
                nproc = 1
        # train the queryengine
        self._queryengine.train(dataset)

        # decide whether to run on all possible center coords or just a provided
        # subset
        if isinstance(self.__roi_ids, str):
            roi_ids = dataset.fa[self.__roi_ids].value.nonzero()[0]
        elif self.__roi_ids is not None:
            roi_ids = self.__roi_ids
            # safeguard against stupidity
            if __debug__:
                qe_ids = self._queryengine.ids # known to qe
                if not set(qe_ids).issuperset(roi_ids):
                    raise IndexError(
                          "Some roi_ids are not known to the query engine %s: %s"
                          % (self._queryengine,
                             set(roi_ids).difference(qe_ids)))
        else:
            roi_ids = self._queryengine.ids

        # pass to subclass
        results = self._sl_call(dataset, roi_ids, nproc)

        if 'mapper' in dataset.a:
            # since we know the space we can stick the original mapper into the
            # results as well
            if self.__roi_ids is None:
                results.a['mapper'] = copy.copy(dataset.a.mapper)
            else:
                # there is an additional selection step that needs to be
                # expressed by another mapper
                mapper = copy.copy(dataset.a.mapper)

                # NNO if the orignal mapper has no append (because it's not a
                # chainmapper, for example), we make our own chainmapper.
                #
                # THe original code was:
                # mapper.append(StaticFeatureSelection(roi_ids,
                #                                     dshape=dataset.shape[1:]))
                feat_sel_mapper = StaticFeatureSelection(roi_ids,
                                                     dshape=dataset.shape[1:])
                if 'append' in dir(mapper):
                    mapper.append(feat_sel_mapper)
                else:
                    mapper = ChainMapper([dataset.a.mapper,
                                          feat_sel_mapper])

                results.a['mapper'] = mapper

        # charge state
        self.ca.raw_results = results
        # return raw results, base-class will take care of transformations
        return results


    def _sl_call(self, dataset, roi_ids, nproc):
        """Classical generic searchlight implementation
        """
        raise NotImplementedError("Must be implemented in the derived classes")

    queryengine = property(fget=lambda self: self._queryengine)
    roi_ids = property(fget=lambda self: self.__roi_ids)
Beispiel #18
0
class MCNullDist(NullDist):
    """Null-hypothesis distribution is estimated from randomly permuted data labels.

    The distribution is estimated by calling fit() with an appropriate
    `Measure` or `TransferError` instance and a training and a
    validation dataset (in case of a `TransferError`). For a customizable
    amount of cycles the training data labels are permuted and the
    corresponding measure computed. In case of a `TransferError` this is the
    error when predicting the *correct* labels of the validation dataset.

    The distribution can be queried using the `cdf()` method, which can be
    configured to report probabilities/frequencies from `left` or `right` tail,
    i.e. fraction of the distribution that is lower or larger than some
    critical value.

    This class also supports `FeaturewiseMeasure`. In that case `cdf()`
    returns an array of featurewise probabilities/frequencies.
    """

    _DEV_DOC = """
    TODO automagically decide on the number of samples/permutations needed
    Caution should be paid though since resultant distributions might be
    quite far from some conventional ones (e.g. Normal) -- it is expected to
    them to be bimodal (or actually multimodal) in many scenarios.
    """

    dist_samples = ConditionalAttribute(enabled=False,
                                 doc='Samples obtained for each permutation')
    skipped = ConditionalAttribute(enabled=True,
                  doc='# of the samples which were skipped because '
                      'measure has failed to evaluated at them')

    def __init__(self, permutator, dist_class=Nonparametric, measure=None,
                 **kwargs):
        """Initialize Monte-Carlo Permutation Null-hypothesis testing

        Parameters
        ----------
        permutator : Node
          Node instance that generates permuted datasets.
        dist_class : class
          This can be any class which provides parameters estimate
          using `fit()` method to initialize the instance, and
          provides `cdf(x)` method for estimating value of x in CDF.
          All distributions from SciPy's 'stats' module can be used.
        measure : Measure or None
          Optional measure that is used to compute results on permuted
          data. If None, a measure needs to be passed to ``fit()``.
        """
        NullDist.__init__(self, **kwargs)

        self._dist_class = dist_class
        self._dist = []                 # actual distributions
        self._measure = measure

        self.__permutator = permutator

    def __repr__(self, prefixes=[]):
        prefixes_ = ["%s" % self.__permutator]
        if self._dist_class != Nonparametric:
            prefixes_.insert(0, 'dist_class=%r' % (self._dist_class,))
        return super(MCNullDist, self).__repr__(
            prefixes=prefixes_ + prefixes)


    def fit(self, measure, ds):
        """Fit the distribution by performing multiple cycles which repeatedly
        permuted labels in the training dataset.

        Parameters
        ----------
        measure: Measure or None
          A measure used to compute the results from shuffled data. Can be None
          if a measure instance has been provided to the constructor.
        ds: `Dataset` which gets permuted and used to compute the
          measure/transfer error multiple times.
        """
        # TODO: place exceptions separately so we could avoid circular imports
        from mvpa2.base.learner import LearnerError

        # prefer the already assigned measure over anything the was passed to
        # the function.
        # XXX that is a bit awkward but is necessary to keep the code changes
        # in the rest of PyMVPA minimal till this behavior become mandatory
        if not self._measure is None:
            measure = self._measure
            measure.untrain()

        dist_samples = []
        """Holds the values for randomized labels."""

        # estimate null-distribution
        # TODO this really needs to be more clever! If data samples are
        # shuffled within a class it really makes no difference for the
        # classifier, hence the number of permutations to estimate the
        # null-distribution of transfer errors can be reduced dramatically
        # when the *right* permutations (the ones that matter) are done.
        skipped = 0                     # # of skipped permutations
        for p, permuted_ds in enumerate(self.__permutator.generate(ds)):
            # new permutation all the time
            # but only permute the training data and keep the testdata constant
            #
            if __debug__:
                debug('STATMC', "Doing %i permutations: %i" \
                      % (self.__permutator.count, p+1), cr=True)

            # compute and store the measure of this permutation
            # assume it has `TransferError` interface
            try:
                res = measure(permuted_ds)
                dist_samples.append(res.samples)
            except LearnerError, e:
                if __debug__:
                    debug('STATMC', " skipped", cr=True)
                warning('Failed to obtain value from %s due to %s.  Measurement'
                        ' was skipped, which could lead to unstable and/or'
                        ' incorrect assessment of the null_dist' % (measure, e))
                skipped += 1
                continue

        self.ca.skipped = skipped

        if __debug__:
            debug('STATMC', ' Skipped: %d permutations' % skipped)

        if not len(dist_samples) and skipped > 0:
            raise RuntimeError(
                'Failed to obtain any value from %s. %d measurements were '
                'skipped. Check above warnings, and your code/data'
                % (measure, skipped))
        # store samples as (npermutations x nsamples x nfeatures)
        dist_samples = np.asanyarray(dist_samples)
        # for the ca storage use a dataset with
        # (nsamples x nfeatures x npermutations) to make it compatible with the
        # result dataset of the measure
        self.ca.dist_samples = Dataset(np.rollaxis(dist_samples,
                                       0, len(dist_samples.shape)))

        # fit distribution per each element

        # to decide either it was done on scalars or vectors
        shape = dist_samples.shape
        nshape = len(shape)
        # if just 1 dim, original data was scalar, just create an
        # artif dimension for it
        if nshape == 1:
            dist_samples = dist_samples[:, np.newaxis]

        # fit per each element.
        # XXX could be more elegant? may be use np.vectorize?
        dist_samples_rs = dist_samples.reshape((shape[0], -1))
        dist = []
        for samples in dist_samples_rs.T:
            params = self._dist_class.fit(samples)
            if __debug__ and 'STAT__' in debug.active:
                debug('STAT', 'Estimated parameters for the %s are %s'
                      % (self._dist_class, str(params)))
            dist.append(self._dist_class(*params))
        self._dist = dist
Beispiel #19
0
class kNN(Classifier):
    """
    k-Nearest-Neighbour classifier.

    This is a simple classifier that bases its decision on the distances
    between the training dataset samples and the test sample(s). Distances
    are computed using a customizable distance function. A certain number
    (`k`)of nearest neighbors is selected based on the smallest distances
    and the labels of this neighboring samples are fed into a voting
    function to determine the labels of the test sample.

    Training a kNN classifier is extremely quick, as no actual training
    is performed as the training dataset is simply stored in the
    classifier. All computations are done during classifier prediction.

    Ties
    ----

    In case if voting procedure results in a tie, it is broken by
    choosing a class with minimal mean distance to the corresponding
    k-neighbors.

    Notes
    -----
    If enabled, kNN stores the votes per class in the 'values' state after
    calling predict().

    """

    distances = ConditionalAttribute(enabled=False,
        doc="Distances computed for each sample")


    __tags__ = ['knn', 'non-linear', 'binary', 'multiclass', 'oneclass']

    def __init__(self, k=2, dfx=squared_euclidean_distance,
                 voting='weighted', **kwargs):
        """
        Parameters
        ----------
        k : unsigned integer
          Number of nearest neighbours to be used for voting.
        dfx : functor
          Function to compute the distances between training and test samples.
          Default: squared euclidean distance
        voting : str
          Voting method used to derive predictions from the nearest neighbors.
          Possible values are 'majority' (simple majority of classes
          determines vote) and 'weighted' (votes are weighted according to the
          relative frequencies of each class in the training data).
        **kwargs
          Additional arguments are passed to the base class.
        """

        # init base class first
        Classifier.__init__(self, **kwargs)

        self.__k = k
        self.__dfx = dfx
        self.__voting = voting
        self.__data = None
        self.__weights = None


    def __repr__(self, prefixes=[]): # pylint: disable-msg=W0102
        """Representation of the object
        """
        return super(kNN, self).__repr__(
            ["k=%d" % self.__k, "dfx=%s" % self.__dfx,
             "voting=%s" % repr(self.__voting)]
            + prefixes)


    ## def __str__(self):
    ##     return "%s\n data: %s" % \
    ##         (Classifier.__str__(self), indent_doc(self.__data))


    def _train(self, data):
        """Train the classifier.

        For kNN it is degenerate -- just stores the data.
        """
        self.__data = data
        labels = data.sa[self.get_space()].value
        uniquelabels = data.sa[self.get_space()].unique
        Nuniquelabels = len(uniquelabels)

        if __debug__:
            if str(data.samples.dtype).startswith('uint') \
                or str(data.samples.dtype).startswith('int'):
                warning("kNN: input data is in integers. " + \
                        "Overflow on arithmetic operations might result in"+\
                        " errors. Please convert dataset's samples into" +\
                        " floating datatype if any error is reported.")
        if self.__voting == 'weighted':
            self.__labels = labels.copy()
            Nlabels = len(labels)

            # TODO: To get proper speed up for the next line only,
            #       histogram should be computed
            #       via sorting + counting "same" elements while reducing.
            #       Guaranteed complexity is NlogN whenever now it is N^2
            # compute the relative proportion of samples belonging to each
            # class (do it in one loop to improve speed and reduce readability
            weights = \
                [ 1.0 - ((labels == label).sum() / Nlabels) \
                    for label in uniquelabels ]
            self.__weights = dict(zip(uniquelabels, weights))
        else:
            self.__weights = None

        # create dictionary with an item for each condition
        self.__votes_init = dict(zip(uniquelabels,
                                     [0] * Nuniquelabels))


    @accepts_dataset_as_samples
    def _predict(self, data):
        """Predict the class labels for the provided data.

        Returns a list of class labels (one for each data sample).
        """
        # make sure we're talking about arrays
        data = np.asanyarray(data)

        targets_sa_name = self.get_space()
        targets_sa = self.__data.sa[targets_sa_name]
        labels = targets_sa.value
        uniquelabels = targets_sa.unique

        # checks only in debug mode
        if __debug__:
            if not data.ndim == 2:
                raise ValueError, "Data array must be two-dimensional."

            if not data.shape[1] == self.__data.nfeatures:
                raise ValueError, "Length of data samples (features) does " \
                                  "not match the classifier."

        # compute the distance matrix between training and test data with
        # distances stored row-wise, i.e. distances between test sample [0]
        # and all training samples will end up in row 0
        dists = self.__dfx(self.__data.samples, data).T
        if self.ca.is_enabled('distances'):
            # .sa.copy() now does deepcopying by default
            self.ca.distances = Dataset(dists, fa=self.__data.sa.copy())

        # determine the k nearest neighbors per test sample
        knns = dists.argsort(axis=1)[:, :self.__k]

        # predictions and votes for all samples
        all_votes, predictions = [], []
        for inns, nns in enumerate(knns):
            votes = self.__votes_init.copy()
            # TODO: optimize!
            for nn in nns:
                votes[labels[nn]] += 1

            # optionally weight votes
            if self.__voting == 'majority':
                pass
            elif self.__voting == 'weighted':
                # TODO: optimize!
                for ul in uniquelabels:
                    votes[ul] *= self.__weights[ul]
            else:
                raise ValueError, "kNN told to perform unknown voting '%s'." \
                      % self.__voting

            # reverse dictionary items and sort them to get the
            # winners
            # It would be more expensive than just to look for
            # the maximum, but this piece should be the least
            # cpu-intensive while distances computation should consume
            # the most. Also it would allow to look and break the ties
            votes_reversed = sorted([(v, k) for k, v in votes.iteritems()],
                                    reverse=True)
            # check for ties
            max_vote, max_vote_label = votes_reversed[0]

            if len(votes_reversed) > 1 and max_vote == votes_reversed[1][0]:
                # figure out all ties and break them based on the mean
                # distance
                # TODO: theoretically we could break out of the loop earlier
                ties = [x[1] for x in votes_reversed if x[0] == max_vote]

                # compute mean distances to the corresponding clouds
                # restrict analysis only to k-nn's
                nns_labels = labels[nns]
                nns_dists = dists[inns][nns]
                ties_dists = [np.mean(nns_dists[nns_labels == t]) for t in ties]
                max_vote_label = ties[np.argmin(ties_dists)]
                if __debug__:
                    debug('KNN',
                          'Ran into the ties: %s with votes: %s, dists: %s, max_vote %r',
                          (ties, votes_reversed, ties_dists, max_vote_label))

            all_votes.append(votes)
            predictions.append(max_vote_label)

        # store the predictions in the state. Relies on State._setitem to do
        # nothing if the relevant state member is not enabled
        self.ca.predictions = predictions
        self.ca.estimates = all_votes # np.array([r[1] for r in results])

        return predictions

    def _untrain(self):
        """Reset trained state"""
        self.__data = None
        self.__weights = None
        super(kNN, self)._untrain()

    dfx = property(fget=lambda self: self.__dfx)
Beispiel #20
0
class ElementSelector(ClassWithCollections):
    """Base class to implement functors to select some elements based on a
    sequence of values.
    """

    ndiscarded = ConditionalAttribute(
        enabled=True, doc="Store number of discarded elements.")

    def __init__(self, mode='discard', **kwargs):
        """
        Parameters
        ----------
         mode : {'discard', 'select'}
            Decides whether to `select` or to `discard` features.
        """
        ClassWithCollections.__init__(self, **kwargs)

        self._set_mode(mode)
        """Flag whether to select or to discard elements."""

    ##REF: Name was automagically refactored
    def _set_mode(self, mode):
        """Choose `select` or `discard` mode."""

        if not mode in ['discard', 'select']:
            raise ValueError, "Unkown selection mode [%s]. Can only be one " \
                              "of 'select' or 'discard'." % mode

        self.__mode = mode

    def __call__(self, seq):
        """
        Parameters
        ----------
        seq
           Sequence based on values of which to perform the selection.
           If `Dataset`, then only 1st sample is taken.
        """
        if isinstance(seq, AttrDataset):
            if len(seq) > 1:
                raise ValueError(
                    "Feature selectors cannot handle multiple "
                    "sequences in a Dataset at once.  We got dataset %s "
                    "as input." % (seq, ))
            seq = seq.samples[0]
        elif hasattr(seq, 'shape'):
            shape = seq.shape
            if len(shape) > 1:
                raise ValueError(
                    "Feature selectors cannot handle multidimensional "
                    "inputs (such as ndarrays with more than a single "
                    "dimension.  We got %s with shape %s "
                    "as input." % (seq.__class__, shape))
        return self._call(seq)

    def _call(self, seq):
        """Implementations in derived classed have to return a list of selected
        element IDs based on the given sequence.
        """
        raise NotImplementedError

    mode = property(fget=lambda self: self.__mode, fset=_set_mode)
Beispiel #21
0
class SplitRFE(RFE):
    """RFE with the nested cross-validation to estimate optimal number of features.

    Given a learner (classifier) with a sensitivity analyzer and a
    partitioner, during training SplitRFE first performs a
    cross-validation with RFE to later estimate optimal number of
    features which should survive in RFE.  Optimal number is chosen as
    the mid-point among all minimums of the average errors across
    splits.  After deducing optimal number of features, SplitRFE
    applies regular RFE again on the full training dataset stopping at
    the estimated optimal number of features.

    Examples
    --------

    Resting on an example giving for the :class:`~mvpa2.featself.rfe.RFE` here
    is an implementation using SplitRFE helper:

    >>> # Lazy import
    >>> from mvpa2.suite import *
    >>> # design an RFE feature selection to be used with a classifier
    >>> rfe = SplitRFE(
    ...           LinearCSVMC(),
    ...           OddEvenPartitioner(),
    ...           # take sensitivities per each split, L2 norm, abs, mean them
    ...           fmeasure_postproc=ChainMapper([
    ...               FxMapper('features', l2_normed),
    ...               FxMapper('samples', np.abs),
    ...               FxMapper('samples', np.mean)]),
    ...           # select 50% of the best on each step
    ...           fselector=FractionTailSelector(
    ...               0.50,
    ...               mode='select', tail='upper'),
    ...           # but we do want to update sensitivities on each step
    ...           update_sensitivity=True)
    >>> clf = FeatureSelectionClassifier(
    ...           LinearCSVMC(),
    ...           # on features selected via RFE
    ...           rfe,
    ...           # custom description
    ...           descr='LinSVM+RFE(splits_avg)' )


    But not only classifiers and their sensitivites could be used for RFE. It
    could be used even with univariate measures (e.g. OnewayAnova).
    """

    # exclude those since we are really an adapter here
    __init__doc__exclude__ = RFE.__init__doc__exclude__ + \
      ['fmeasure', 'pmeasure', 'splitter',
       'train_pmeasure', 'stopping_criterion',
       'bestdetector',   # now it is a diff strategy
       'nfeatures_min'   # will get 'trained'
       ]

    nested_errors = ConditionalAttribute(
        doc="History of errors per each nested split")
    nested_nfeatures = ConditionalAttribute(
        doc="History of # of features left per each nested split")

    def __init__(
            self,
            lrn,
            partitioner,
            fselector,
            errorfx=mean_mismatch_error,
            fmeasure_postproc=None,
            fmeasure=None,
            nproc=1,
            # callback?
            **kwargs):
        """
        Parameters
        ----------
        lrn : Learner
          Learner with a sensitivity analyzer which will be used both
          for the sensitivity analysis and transfer error estimation
        partitioner : Partitioner
          Used to generate cross-validation partitions for cross-validation
          to deduce optimal number of features to maintain
        fselector : Functor
          Given a sensitivity map it has to return the ids of those
          features that should be kept.
        errorfx : func, optional
          Functor to use for estimation of cross-validation error
        fmeasure_postproc : func, optional
          Function to provide to the sensitivity analyzer as postproc.  If no
          fmeasure is provided and classifier sensitivity is used, then
          maxofabs_sample() would be used for this postproc, unless other
          value is provided
        fmeasure : Function, optional
          Featurewise measure.  If None was provided, lrn's sensitivity
          analyzer will be used.
        """
        # Initialize itself preparing for the 2nd invocation
        # with determined number of nfeatures_min
        # TODO:  move this into _train since better not to assign anything here
        # to avoid possible problems with copies needing to deal with the same
        # lrn... but then we might like again to reconsider delegation instead
        # of subclassing here....
        if fmeasure is None:
            if __debug__:
                debug(
                    'RFE', 'fmeasure was not provided, will be using the '
                    'sensitivity analyzer for %s' % lrn)
            fmeasure = lrn.get_sensitivity_analyzer(
                postproc=fmeasure_postproc
                if fmeasure_postproc is not None else maxofabs_sample())
            train_pmeasure = False
        else:
            assert fmeasure_postproc is None, "There should be no explicit " \
                    "fmeasure_postproc when fmeasure is specified"
            # if user provided explicit value -- use it! otherwise, we do want
            # to train an arbitrary fmeasure
            train_pmeasure = kwargs.pop('train_pmeasure', True)

        RFE.__init__(self,
                     fmeasure,
                     None,
                     Repeater(2),
                     fselector=fselector,
                     bestdetector=None,
                     train_pmeasure=train_pmeasure,
                     stopping_criterion=None,
                     **kwargs)
        self._lrn = lrn  # should not be modified, thus _
        self.partitioner = partitioner
        self.errorfx = errorfx
        self.fmeasure_postproc = fmeasure_postproc
        self.nproc = nproc

    def __repr__(self, prefixes=None):
        if prefixes is None:
            prefixes = []
        return super(SplitRFE, self).__repr__(
            prefixes=prefixes + _repr_attrs(self, ['lrn', 'partitioner']) +
            _repr_attrs(self, ['errorfx'], default=mean_mismatch_error) +
            _repr_attrs(self, ['fmeasure_postproc'], default=None) +
            _repr_attrs(self, ['nproc'], default=1))

    @property
    def lrn(self):
        return self._lrn

    def _train(self, dataset):
        pmeasure = ProxyMeasure(
            self.lrn,
            postproc=BinaryFxNode(self.errorfx, self.lrn.space),
            skip_train=not self.
            train_pmeasure  # do not train since fmeasure will
        )

        # First we need to replicate our RFE construct but this time
        # with pmeasure for the classifier
        rfe = RFE(
            self.fmeasure,
            pmeasure,
            Splitter('partitions'),
            fselector=self.fselector,
            bestdetector=None,
            train_pmeasure=self.train_pmeasure,
            stopping_criterion=None,  # full "track"
            update_sensitivity=self.update_sensitivity,
            enable_ca=['errors', 'nfeatures'])

        errors, nfeatures = [], []

        if __debug__:
            debug("RFEC", "Stage 1: initial nested CV/RFE for %s", (dataset, ))

        if self.nproc != 1 and externals.exists('joblib'):
            nested_results = jl.Parallel(self.nproc)(
                jl.delayed(_process_partition)(rfe, partition)
                for partition in self.partitioner.generate(dataset))
        else:
            nested_results = [
                _process_partition(rfe, partition)
                for partition in self.partitioner.generate(dataset)
            ]

        # unzip
        errors = [x[0] for x in nested_results]
        nfeatures = [x[1] for x in nested_results]

        self.ca.nested_nfeatures = nfeatures
        self.ca.nested_errors = errors

        # mean errors across splits and find optimal number
        errors_mean = np.mean(errors, axis=0)
        nfeatures_mean = np.mean(nfeatures, axis=0)
        # we will take the "mean location" of the min to stay
        # within the most 'stable' choice

        mins_idx = np.where(errors_mean == np.min(errors_mean))[0]
        min_idx = mins_idx[int(len(mins_idx) / 2)]
        min_error = errors_mean[min_idx]
        assert (min_error == np.min(errors_mean))
        nfeatures_min = nfeatures_mean[min_idx]

        if __debug__:
            debug(
                "RFEC", "Choosing among %d choices to have %d features with "
                "mean error=%.2g (initial mean error %.2g)",
                (len(mins_idx), nfeatures_min, min_error, errors_mean[0]))

        self.nfeatures_min = nfeatures_min

        if __debug__:
            debug(
                "RFEC", "Stage 2: running RFE on full training dataset to "
                "obtain the best %d features" % nfeatures_min)

        super(SplitRFE, self)._train(dataset)

    def _untrain(self):
        super(SplitRFE, self)._untrain()
        self.lrn.untrain()
        self.nfeatures_min = 0  # reset the knowledge
Beispiel #22
0
class SVM(_SVM):
    """Support Vector Machine Classifier.

    This is a simple interface to the libSVM package.
    """

    # Since this is internal feature of LibSVM, this conditional attribute is present
    # here
    probabilities = ConditionalAttribute(
        enabled=False,
        doc="Estimates of samples probabilities as provided by LibSVM")

    # TODO p is specific for SVR
    _KNOWN_PARAMS = [
        'epsilon', 'probability', 'shrinking', 'weight_label', 'weight'
    ]

    #_KNOWN_KERNEL_PARAMS = [ 'cache_size' ]

    _KNOWN_SENSITIVITIES = {
        'linear': LinearSVMWeights,
    }
    _KNOWN_IMPLEMENTATIONS = {
        'C_SVC':
        (_svm.svmc.C_SVC, ('C', ), ('binary', 'multiclass', 'oneclass'),
         'C-SVM classification'),
        'NU_SVC':
        (_svm.svmc.NU_SVC, ('nu', ), ('binary', 'multiclass', 'oneclass'),
         'nu-SVM classification'),
        'ONE_CLASS':
        (_svm.svmc.ONE_CLASS, (), ('oneclass-binary', ), 'one-class-SVM'),
        'EPSILON_SVR': (_svm.svmc.EPSILON_SVR, ('C', 'tube_epsilon'),
                        ('regression', ), 'epsilon-SVM regression'),
        'NU_SVR': (_svm.svmc.NU_SVR, ('nu', 'tube_epsilon'),
                   ('regression', 'oneclass'), 'nu-SVM regression')
    }

    __default_kernel_class__ = LinearLSKernel
    __tags__ = _SVM.__tags__ + ['libsvm']

    def __init__(self, **kwargs):
        # XXX Determine which parameters depend on each other and implement
        # safety/simplifying logic around them
        # already done for: nr_weight
        # thought: weight and weight_label should be a dict
        """Interface class to LIBSVM classifiers and regressions.

        Default implementation (C/nu/epsilon SVM) is chosen depending
        on the given parameters (C/nu/tube_epsilon).
        """

        svm_impl = kwargs.get('svm_impl', None)
        # Depending on given arguments, figure out desired SVM
        # implementation
        if svm_impl is None:
            for arg, impl in [('tube_epsilon', 'EPSILON_SVR'), ('C', 'C_SVC'),
                              ('nu', 'NU_SVC')]:
                if arg in kwargs:
                    svm_impl = impl
                    if __debug__:
                        debug(
                            'SVM', 'No implementation was specified. Since '
                            '%s is given among arguments, assume %s' %
                            (arg, impl))
                    break
            if svm_impl is None:
                svm_impl = 'C_SVC'
                if __debug__:
                    debug('SVM', 'Assign C_SVC "by default"')
        kwargs['svm_impl'] = svm_impl

        # init base class
        _SVM.__init__(self, **kwargs)

        self._svm_type = self._KNOWN_IMPLEMENTATIONS[svm_impl][0]

        if 'nu' in self._KNOWN_PARAMS and 'epsilon' in self._KNOWN_PARAMS:
            # overwrite eps param with new default value (information
            # taken from libSVM docs
            self.params['epsilon']._set_default(0.001)

        self.__model = None
        """Holds the trained SVM."""

    @due.dcite(Doi('10.1145/1961189.1961199'),
               description="LIBSVM: A library for support vector machines",
               path="libsvm",
               tags=["implementation"])
    # TODO: conditioned citations for nu-SVM and one-class
    #    B. Schölkopf, A. Smola, R. Williamson, and P. L. Bartlett. New support vector algorithms. Neural Computation, 12, 2000, 1207-1245.
    #    B. Schölkopf, J. Platt, J. Shawe-Taylor, A. J. Smola, and R. C. Williamson. Estimating the support of a high-dimensional distribution. Neural Computation, 13, 2001, 1443-1471.
    def _train(self, dataset):
        """Train SVM
        """
        super(SVM, self)._train(dataset)
        targets_sa_name = self.get_space()  # name of targets sa
        targets_sa = dataset.sa[targets_sa_name]  # actual targets sa

        # libsvm needs doubles
        src = _data2ls(dataset)

        # libsvm cannot handle literal labels
        labels = self._attrmap.to_numeric(targets_sa.value).tolist()

        svmprob = _svm.SVMProblem(labels, src)

        # Translate few params
        TRANSLATEDICT = {'epsilon': 'eps', 'tube_epsilon': 'p'}
        args = []
        for paramname, param in list(self.params.items()) \
                + list(self.kernel_params.items()):
            if paramname in TRANSLATEDICT:
                argname = TRANSLATEDICT[paramname]
            elif paramname in _svm.SVMParameter.default_parameters:
                argname = paramname
            else:
                if __debug__:
                    debug(
                        "SVM_", "Skipping parameter %s since it is not known "
                        "to libsvm" % paramname)
                continue
            args.append((argname, param.value))

        # ??? All those parameters should be fetched if present from
        # **kwargs and create appropriate parameters within .params or
        # .kernel_params
        libsvm_param = _svm.SVMParameter(
            kernel_type=self.params.kernel.as_raw_ls(),  # Just an integer ID
            svm_type=self._svm_type,
            **dict(args))
        """Store SVM parameters in libSVM compatible format."""

        if 'C' in self.params:  # svm_type in [_svm.svmc.C_SVC]:
            Cs = self._get_cvec(dataset)
            if len(Cs) > 1:
                C0 = abs(Cs[0])
                scale = 1.0 / (C0)  #*np.sqrt(C0))
                # so we got 1 C per label
                uls = self._attrmap.to_numeric(targets_sa.unique)
                if len(Cs) != len(uls):
                    raise ValueError(
                        "SVM was parameterized with %d Cs but there are %d "
                        "labels in the dataset" %
                        (len(Cs), len(targets_sa.unique)))
                weight = [c * scale for c in Cs]
                # All 3 need to be set to take an effect
                libsvm_param._set_parameter('weight', weight)
                libsvm_param._set_parameter('nr_weight', len(weight))
                libsvm_param._set_parameter('weight_label', uls)
            libsvm_param._set_parameter('C', Cs[0])

        try:
            self.__model = _svm.SVMModel(svmprob, libsvm_param)
        except Exception as e:
            raise FailedToTrainError(str(e))

    @accepts_samples_as_dataset
    def _predict(self, data):
        """Predict values for the data
        """
        # libsvm needs doubles
        src = _data2ls(data)
        ca = self.ca

        predictions = [self.model.predict(p) for p in src]

        if ca.is_enabled('estimates'):
            if self.__is_regression__:
                estimates = [self.model.predict_values_raw(p)[0] for p in src]
            else:
                # if 'trained_targets' are literal they have to be mapped
                if (np.issubdtype(self.ca.trained_targets.dtype, 'c')
                        or np.issubdtype(self.ca.trained_targets.dtype, 'U')):
                    trained_targets = self._attrmap.to_numeric(
                        self.ca.trained_targets)
                else:
                    trained_targets = self.ca.trained_targets
                nlabels = len(trained_targets)
                # XXX We do duplicate work. model.predict calls
                # predict_values_raw internally and then does voting or
                # thresholding. So if speed becomes a factor we might
                # want to move out logic from libsvm over here to base
                # predictions on obtined values, or adjust libsvm to
                # spit out values from predict() as well
                if nlabels == 2 and self._svm_impl != 'ONE_CLASS':
                    # Apperently libsvm reorders labels so we need to
                    # track (1,0) values instead of (0,1) thus just
                    # lets take negative reverse
                    estimates = [
                        self.model.predict_values(p)[(trained_targets[1],
                                                      trained_targets[0])]
                        for p in src
                    ]
                    if len(estimates) > 0:
                        if __debug__:
                            debug(
                                "SVM",
                                "Forcing estimates to be ndarray and reshaping"
                                " them into 1D vector")
                        estimates = np.asarray(estimates).reshape(
                            len(estimates))
                else:
                    # In multiclass we return dictionary for all pairs
                    # of labels, since libsvm does 1-vs-1 pairs
                    estimates = [self.model.predict_values(p) for p in src]
            ca.estimates = estimates

        if ca.is_enabled("probabilities"):
            # XXX Is this really necesssary? yoh don't think so since
            # assignment to ca is doing the same
            #self.probabilities = [ self.model.predict_probability(p)
            #                       for p in src ]
            try:
                ca.probabilities = [
                    self.model.predict_probability(p) for p in src
                ]
            except TypeError:
                warning("Current SVM %s doesn't support probability " % self +
                        " estimation.")
        return predictions

    def summary(self):
        """Provide quick summary over the SVM classifier
        """
        s = super(SVM, self).summary()
        if self.trained:
            s += '\n #SVs:%d' % self.__model.get_total_n_sv()
            try:
                param = self.__model.model.param
                C = param.C
                # extract information of how many SVs sit inside the margin,
                # i.e. so called 'bounded SVs'
                inside_margin = np.sum(
                    # take 0.99 to avoid rounding issues
                    np.abs(self.__model.get_sv_coef()) >= 0.99 * param.C)
                s += ' #bounded_SVs:%d' % inside_margin
                s += (' used_C:%-5g' % C).rstrip()
            except:
                pass
        return s

    def _untrain(self):
        """Untrain libsvm's SVM: forget the model
        """
        if __debug__ and "SVM" in debug.active:
            debug("SVM", "Untraining %s and destroying libsvm model" % self)
        super(SVM, self)._untrain()
        del self.__model
        self.__model = None

    model = property(fget=lambda self: self.__model)
    """Access to the SVM model."""
Beispiel #23
0
class RFE(IterativeFeatureSelection):
    """Recursive feature elimination.

    A `FeaturewiseMeasure` is used to compute sensitivity maps given a
    certain dataset. These sensitivity maps are in turn used to discard
    unimportant features. For each feature selection the transfer error on some
    testdatset is computed. This procedure is repeated until a given
    `StoppingCriterion` is reached.

    References
    ----------
    Such strategy after
      Guyon, I., Weston, J., Barnhill, S., & Vapnik, V. (2002). Gene
      selection for cancer classification using support vector
      machines. Mach. Learn., 46(1-3), 389--422.
    was applied to SVM-based analysis of fMRI data in
      Hanson, S. J. & Halchenko, Y. O. (2008). Brain reading using
      full brain support vector machines for object recognition:
      there is no "face identification area". Neural Computation, 20,
      486--503.

    Examples
    --------

    There are multiple possible ways to design an RFE.  Here is one
    example which would rely on a SplitClassifier to extract
    sensitivities and provide estimate of performance (error)

    >>> # Lazy import
    >>> from mvpa2.suite import *
    >>> rfesvm_split = SplitClassifier(LinearCSVMC(), OddEvenPartitioner())
    >>> # design an RFE feature selection to be used with a classifier
    >>> rfe = RFE(rfesvm_split.get_sensitivity_analyzer(
    ...              # take sensitivities per each split, L2 norm, mean, abs them
    ...              postproc=ChainMapper([ FxMapper('features', l2_normed),
    ...                                     FxMapper('samples', np.mean),
    ...                                     FxMapper('samples', np.abs)])),
    ...           # use the error stored in the confusion matrix of split classifier
    ...           ConfusionBasedError(rfesvm_split, confusion_state='stats'),
    ...           # we just extract error from confusion, so need to split dataset
    ...           Repeater(2),
    ...           # select 50% of the best on each step
    ...           fselector=FractionTailSelector(
    ...               0.50,
    ...               mode='select', tail='upper'),
    ...           # and stop whenever error didn't improve for up to 10 steps
    ...           stopping_criterion=NBackHistoryStopCrit(BestDetector(), 10),
    ...           # we just extract it from existing confusion
    ...           train_pmeasure=False,
    ...           # but we do want to update sensitivities on each step
    ...           update_sensitivity=True)
    >>> clf = FeatureSelectionClassifier(
    ...           LinearCSVMC(),
    ...           # on features selected via RFE
    ...           rfe,
    ...           # custom description
    ...           descr='LinSVM+RFE(splits_avg)' )

    """

    history = ConditionalAttribute(
        doc="Last step # when each feature was still present")
    sensitivities = ConditionalAttribute(
        enabled=False,
        doc="History of sensitivities (might consume too much memory")

    def __init__(self,
                 fmeasure,
                 pmeasure,
                 splitter,
                 fselector=FractionTailSelector(0.05),
                 update_sensitivity=True,
                 **kwargs):
        # XXX Allow for multiple stopping criterions, e.g. error not decreasing
        # anymore OR number of features less than threshold
        """Initialize recursive feature elimination

        Parameters
        ----------
        fmeasure : FeaturewiseMeasure
        pmeasure : Measure
          used to compute the transfer error of a classifier based on a
          certain feature set on the test dataset.
          NOTE: If sensitivity analyzer is based on the same
          classifier as transfer_error is using, make sure you
          initialize transfer_error with train=False, otherwise
          it would train classifier twice without any necessity.
        splitter: Splitter
          This splitter instance has to generate at least two dataset splits
          when called with the input dataset. The first split serves as the
          training dataset and the second as the evaluation dataset.
        fselector : Functor
          Given a sensitivity map it has to return the ids of those
          features that should be kept.
        update_sensitivity : bool
          If False the sensitivity map is only computed once and reused
          for each iteration. Otherwise the senstitivities are
          recomputed at each selection step.
        """
        # bases init first
        IterativeFeatureSelection.__init__(self, fmeasure, pmeasure, splitter,
                                           fselector, **kwargs)

        self.__update_sensitivity = update_sensitivity
        """Flag whether sensitivity map is recomputed for each step."""

    def _train(self, ds):
        """Proceed and select the features recursively eliminating less
        important ones.

        Parameters
        ----------
        dataset : Dataset
          used to compute sensitivity maps and train a classifier
          to determine the transfer error
        testdataset : Dataset
          used to test the trained classifer to determine the
          transfer error

        Returns a tuple of two new datasets with the feature subset of
        `dataset` that had the lowest transfer error of all tested
        sets until the stopping criterion was reached. The first
        dataset is the feature subset of the training data and the
        second the selection of the test dataset.
        """
        # get the initial split into train and test
        dataset, testdataset = self._get_traintest_ds(ds)

        if __debug__:
            debug('RFEC',
                  "Initiating RFE with training on %s and testing using %s",
                  (dataset, testdataset))
        errors = []
        """Computed error for each tested features set."""

        ca = self.ca
        ca.nfeatures = []
        """Number of features at each step. Since it is not used by the
        algorithm it is stored directly in the conditional attribute"""

        ca.history = np.arange(dataset.nfeatures)
        """Store the last step # when the feature was still present
        """

        ca.sensitivities = []

        stop = False
        """Flag when RFE should be stopped."""

        results = None
        """Will hold the best feature set ever."""

        wdataset = dataset
        """Operate on working dataset initially identical."""

        wtestdataset = testdataset
        """Same feature selection has to be performs on test dataset as well.
        This will hold the current testdataset."""

        step = 0
        """Counter how many selection step where done."""

        orig_feature_ids = np.arange(dataset.nfeatures)
        """List of feature Ids as per original dataset remaining at any given
        step"""

        sensitivity = None
        """Contains the latest sensitivity map."""

        result_selected_ids = orig_feature_ids
        """Resultant ids of selected features. Since the best is not
        necessarily is the last - we better keep this one around. By
        default -- all features are there"""
        selected_ids = result_selected_ids

        while wdataset.nfeatures > 0:

            if __debug__:
                debug('RFEC',
                      "Step %d: nfeatures=%d" % (step, wdataset.nfeatures))

            # mark the features which are present at this step
            # if it brings anyb mentionable computational burden in the future,
            # only mark on removed features at each step
            ca.history[orig_feature_ids] = step

            # Compute sensitivity map
            if self.__update_sensitivity or sensitivity == None:
                sensitivity = self._fmeasure(wdataset)
                if len(sensitivity) > 1:
                    raise ValueError(
                        "RFE cannot handle multiple sensitivities at once. "
                        "'%s' returned %i sensitivities." %
                        (self._fmeasure.__class__.__name__, len(sensitivity)))

            if ca.is_enabled("sensitivities"):
                ca.sensitivities.append(sensitivity)

            # get error for current feature set (handles optional retraining)
            error = self._evaluate_pmeasure(wdataset, wtestdataset)
            # Record the error
            errors.append(np.asscalar(error))

            # Check if it is time to stop and if we got
            # the best result
            stop = self._stopping_criterion(errors)
            isthebest = self._bestdetector(errors)

            nfeatures = wdataset.nfeatures

            if ca.is_enabled("nfeatures"):
                ca.nfeatures.append(wdataset.nfeatures)

            # store result
            if isthebest:
                result_selected_ids = orig_feature_ids

            if __debug__:
                debug(
                    'RFEC',
                    "Step %d: nfeatures=%d error=%.4f best/stop=%d/%d " %
                    (step, nfeatures, np.asscalar(error), isthebest, stop))

            # stop if it is time to finish
            if nfeatures == 1 or stop:
                break

            # Select features to preserve
            selected_ids = self._fselector(sensitivity)

            if __debug__:
                debug(
                    'RFEC_',
                    "Sensitivity: %s, nfeatures_selected=%d, selected_ids: %s"
                    % (sensitivity, len(selected_ids), selected_ids))

            # Create a dataset only with selected features
            wdataset = wdataset[:, selected_ids]

            # select corresponding sensitivity values if they are not
            # recomputed
            if not self.__update_sensitivity:
                sensitivity = sensitivity[selected_ids]

            # need to update the test dataset as well
            # XXX why should it ever become None?
            # yoh: because we can have __transfer_error computed
            #      using wdataset. See xia-generalization estimate
            #      in lightsvm. Or for god's sake leave-one-out
            #      on a wdataset
            # TODO: document these cases in this class
            if not testdataset is None:
                wtestdataset = wtestdataset[:, selected_ids]

            step += 1

            # WARNING: THIS MUST BE THE LAST THING TO DO ON selected_ids
            selected_ids.sort()
            if self.ca.is_enabled("history") \
                   or self.ca.is_enabled('selected_ids'):
                orig_feature_ids = orig_feature_ids[selected_ids]

            # we already have the initial sensitivities, so even for a shared
            # classifier we can cleanup here
            self._pmeasure.untrain()
        # charge conditional attributes
        self.ca.errors = errors
        self.ca.selected_ids = result_selected_ids
        if __debug__:
            debug('RFEC', "Selected %d features: %s",
                  (len(result_selected_ids), result_selected_ids))

        # announce desired features to the underlying slice mapper
        # do copy to survive later selections
        self._safe_assign_slicearg(copy(result_selected_ids))
Beispiel #24
0
 class S12(S1__, S2):
     v12 = ConditionalAttribute()
Beispiel #25
0
class BaseSearchlight(Measure):
    """Base class for searchlights.

    The idea for a searchlight algorithm stems from a paper by
    :ref:`Kriegeskorte et al. (2006) <KGB06>`.
    """

    roi_sizes = ConditionalAttribute(enabled=False,
        doc="Number of features in each ROI.")

    roi_feature_ids = ConditionalAttribute(enabled=False,
        doc="Feature IDs for all generated ROIs.")

    roi_center_ids = ConditionalAttribute(enabled=True,
        doc="Center ID for all generated ROIs.")

    is_trained = True
    """Indicate that this measure is always trained."""


    def __init__(self, queryengine, roi_ids=None, nproc=None,
                 **kwargs):
        """
        Parameters
        ----------
        queryengine : QueryEngine
          Engine to use to discover the "neighborhood" of each feature.
          See :class:`~mvpa2.misc.neighborhood.QueryEngine`.
        roi_ids : None or list(int) or str
          List of feature ids (not coordinates) the shall serve as ROI seeds
          (e.g. sphere centers). Alternatively, this can be the name of a
          feature attribute of the input dataset, whose non-zero values
          determine the feature ids. By default all features will be used.
        nproc : None or int
          How many processes to use for computation.  Requires `pprocess`
          external module.  If None -- all available cores will be used.
        **kwargs
          In addition this class supports all keyword arguments of its
          base-class :class:`~mvpa2.measures.base.Measure`.
      """
        Measure.__init__(self, **kwargs)

        if nproc is not None and nproc > 1 and not externals.exists('pprocess'):
            raise RuntimeError("The 'pprocess' module is required for "
                               "multiprocess searchlights. Please either "
                               "install python-pprocess, or reduce `nproc` "
                               "to 1 (got nproc=%i) or set to default None"
                               % nproc)

        self._queryengine = queryengine
        if roi_ids is not None and not isinstance(roi_ids, str) \
                and not len(roi_ids):
            raise ValueError, \
                  "Cannot run searchlight on an empty list of roi_ids"
        self.__roi_ids = roi_ids
        self.nproc = nproc


    def __repr__(self, prefixes=[]):
        """String representation of a `Measure`

        Includes only arguments which differ from default ones
        """
        return super(BaseSearchlight, self).__repr__(
            prefixes=prefixes
            + _repr_attrs(self, ['queryengine', 'roi_ids', 'nproc']))


    def _call(self, dataset):
        """Perform the ROI search.
        """
        # local binding
        nproc = self.nproc

        if nproc is None and externals.exists('pprocess'):
            import pprocess
            try:
                nproc = pprocess.get_number_of_cores() or 1
            except AttributeError:
                warning("pprocess version %s has no API to figure out maximal "
                        "number of cores. Using 1"
                        % externals.versions['pprocess'])
                nproc = 1
        # train the queryengine
        self._queryengine.train(dataset)

        # decide whether to run on all possible center coords or just a provided
        # subset
        if isinstance(self.__roi_ids, str):
            roi_ids = dataset.fa[self.__roi_ids].value.nonzero()[0]
        elif self.__roi_ids is not None:
            roi_ids = self.__roi_ids
            # safeguard against stupidity
            if __debug__:
                qe_ids = self._queryengine.ids # known to qe
                if not set(qe_ids).issuperset(roi_ids):
                    raise IndexError(
                          "Some roi_ids are not known to the query engine %s: %s"
                          % (self._queryengine,
                             set(roi_ids).difference(qe_ids)))
        else:
            roi_ids = self._queryengine.ids

        # pass to subclass
        results = self._sl_call(dataset, roi_ids, nproc)
        # charge state
        self.ca.raw_results = results
        # return raw results, base-class will take care of transformations
        return results


    def _sl_call(self, dataset, roi_ids, nproc):
        """Classical generic searchlight implementation
        """
        raise NotImplementedError("Must be implemented in the derived classes")

    queryengine = property(fget=lambda self: self._queryengine)
    roi_ids = property(fget=lambda self: self.__roi_ids)
Beispiel #26
0
class Hyperalignment(ClassWithCollections):
    """Align the features across multiple datasets into a common feature space.

    This is a three-level algorithm. In the first level, a series of input
    datasets is projected into a common feature space using a configurable
    mapper. The common space is initially defined by a chosen exemplar from the
    list of input datasets, but is subsequently refined by iteratively combining
    the common space with the projected input datasets.

    In the second (optional) level, the original input datasets are again
    aligned with (or projected into) the intermediate first-level common
    space. Through a configurable number of iterations the common space is
    further refined by repeated projections of the input datasets and
    combination/aggregation of these projections into an updated common space.

    In the third level, the input datasets are again aligned with the, now
    final, common feature space. The output of this algorithm are trained
    mappers (one for each input dataset) that transform the individual features
    spaces into the common space.

    Level 1 and 2 are performed by the ``train()`` method, and level 3 is
    performed when the trained Hyperalignment instance is called with a list of
    datasets. This dataset list may or may not be identical to the training
    datasets.

    The default values for the parameters of the algorithm (e.g. projection via
    Procrustean transformation, common space aggregation by averaging) resemble
    the setup reported in :ref:`Haxby et al., Neuron (2011) <HGC+11>` *A common,
    high-dimensional model of the representational space in human ventral
    temporal cortex.*

    Examples
    --------
    >>> # get some example data
    >>> from mvpa2.testing.datasets import datasets
    >>> from mvpa2.misc.data_generators import random_affine_transformation
    >>> ds4l = datasets['uni4large']
    >>> # generate a number of distorted variants of this data
    >>> dss = [random_affine_transformation(ds4l) for i in xrange(4)]
    >>> ha = Hyperalignment()
    >>> ha.train(dss)
    >>> mappers = ha(dss)
    >>> len(mappers)
    4
    """

    training_residual_errors = ConditionalAttribute(
        enabled=False,
        doc="""Residual error (norm of the difference between common space
                and projected data) per each training dataset at each level. The
                residuals are stored in a dataset with one row per level, and
                one column per input dataset. The first row corresponds to the
                error 1st-level of hyperalignment the remaining rows store the
                residual errors for each 2nd-level iteration.""")

    residual_errors = ConditionalAttribute(
        enabled=False,
        doc="""Residual error (norm of the difference between common space
                and projected data) per each dataset. The residuals are stored
                in a single-row dataset with one column per input dataset.""")

    # XXX Who cares whether it was chosen, or specified? This should be just
    # 'ref_ds'
    choosen_ref_ds = ConditionalAttribute(
        enabled=True,
        doc="""Index of the input dataset used as 1st-level reference
                dataset.""")

    # Lets use built-in facilities to specify parameters which
    # constructor should accept
    # the ``space`` of the mapper determines where the algorithm places the
    # common space definition in the datasets
    alignment = Parameter(
        ProcrusteanMapper(space='commonspace'),  # might provide allowedtype
        allowedtype='basestring',
        doc="""The multidimensional transformation mapper. If
            `None` (default) an instance of
            :class:`~mvpa2.mappers.procrustean.ProcrusteanMapper` is
            used.""")

    alpha = Parameter(
        1,
        allowedtype='float32',
        min=0,
        max=1,
        doc="""Regularization parameter to traverse between (Shrinkage)-CCA
                (canonical correlation analysis) and regular hyperalignment.
                Setting alpha to 1 makes the algorithm identical to
                hyperalignment and alpha of 0 makes it CCA. By default,
                it is 1, therefore hyperalignment. """)

    level2_niter = Parameter(1,
                             allowedtype='int',
                             min=0,
                             doc="Number of 2nd-level iterations.")

    ref_ds = Parameter(
        None,
        allowedtype='int',
        min=0,
        doc="""Index of a dataset to use as 1st-level common space
                reference.  If `None`, then the dataset with the maximum
                number of features is used.""")

    zscore_all = Parameter(
        False,
        allowedtype='bool',
        doc="""Flag to Z-score all datasets prior hyperalignment.
            Turn it off if Z-scoring is not desired or was already performed.
            If True, returned mappers are ChainMappers with the Z-scoring
            prepended to the actual projection.""")

    zscore_common = Parameter(
        True,
        allowedtype='bool',
        doc="""Flag to Z-score the common space after each adjustment.
                This should be left enabled in most cases.""")

    combiner1 = Parameter(
        lambda x, y: 0.5 * (x + y),  #
        doc="""How to update common space in the 1st-level loop. This must
                be a callable that takes two arguments. The first argument is
                one of the input datasets after projection onto the 1st-level
                common space. The second argument is the current 1st-level
                common space. The 1st-level combiner is called iteratively for
                each projected input dataset, except for the reference dataset.
                By default the new common space is the average of the current
                common space and the recently projected dataset.""")

    combiner2 = Parameter(
        lambda l: np.mean(l, axis=0),
        doc="""How to combine all individual spaces to common space. This
            must be a callable that take a sequence of datasets as an argument.
            The callable must return a single array. This combiner is called
            once with all datasets after 1st-level projection to create an
            updated common space, and is subsequently called again after each
            2nd-level iteration.""")

    def __init__(self, **kwargs):
        ClassWithCollections.__init__(self, **kwargs)
        self.commonspace = None

    def train(self, datasets):
        """Derive a common feature space from a series of datasets.

        Parameters
        ----------
        datasets : sequence of datasets

        Returns
        -------
        A list of trained Mappers matching the number of input datasets.
        """
        params = self.params  # for quicker access ;)
        ca = self.ca
        ndatasets = len(datasets)
        nfeatures = [ds.nfeatures for ds in datasets]
        alpha = params.alpha

        residuals = None
        if ca['training_residual_errors'].enabled:
            residuals = np.zeros((1 + params.level2_niter, ndatasets))
            ca.training_residual_errors = Dataset(
                samples=residuals,
                sa={
                    'levels':
                    ['1'] + ['2:%i' % i for i in xrange(params.level2_niter)]
                })

        if __debug__:
            debug('HPAL',
                  "Hyperalignment %s for %i datasets" % (self, ndatasets))

        if params.ref_ds is None:
            ref_ds = np.argmax(nfeatures)
        else:
            ref_ds = params.ref_ds
            if ref_ds < 0 and ref_ds >= ndatasets:
                raise ValueError, "Requested reference dataset %i is out of " \
                      "bounds. We have only %i datasets provided" \
                      % (ref_ds, ndatasets)
        ca.choosen_ref_ds = ref_ds
        # zscore all data sets
        # ds = [ zscore(ds, chunks_attr=None) for ds in datasets]

        # TODO since we are doing in-place zscoring create deep copies
        # of the datasets with pruned targets and shallow copies of
        # the collections (if they would come needed in the transformation)
        # TODO: handle floats and non-floats differently to prevent
        #       waste of memory if there is no need (e.g. no z-scoring)
        #otargets = [ds.sa.targets for ds in datasets]
        datasets = [ds.copy(deep=False) for ds in datasets]
        #datasets = [Dataset(ds.samples.astype(float), sa={'targets': [None] * len(ds)})
        #datasets = [Dataset(ds.samples, sa={'targets': [None] * len(ds)})
        #            for ds in datasets]

        if params.zscore_all:
            if __debug__:
                debug('HPAL', "Z-scoring all datasets")
            for ids in xrange(len(datasets)):
                zmapper = ZScoreMapper(chunks_attr=None)
                zmapper.train(datasets[ids])
                datasets[ids] = zmapper.forward(datasets[ids])

        if alpha < 1:
            datasets, wmappers = self._regularize(datasets, alpha)

        # initial common space is the reference dataset
        commonspace = datasets[ref_ds].samples
        # the reference dataset might have been zscored already, don't do it
        # twice
        if params.zscore_common and not params.zscore_all:
            if __debug__:
                debug(
                    'HPAL_', "Creating copy of a commonspace and assuring "
                    "it is of a floating type")
            commonspace = commonspace.astype(float)
            zscore(commonspace, chunks_attr=None)

        # create a mapper per dataset
        # might prefer some other way to initialize... later
        mappers = [deepcopy(params.alignment) for ds in datasets]

        #
        # Level 1 -- initial projection
        #
        lvl1_projdata = self._level1(datasets, commonspace, ref_ds, mappers,
                                     residuals)
        #
        # Level 2 -- might iterate multiple times
        #
        # this is the final common space
        self.commonspace = self._level2(datasets, lvl1_projdata, mappers,
                                        residuals)

    def __call__(self, datasets):
        """Derive a common feature space from a series of datasets.

        Parameters
        ----------
        datasets : sequence of datasets

        Returns
        -------
        A list of trained Mappers matching the number of input datasets.
        """
        if self.commonspace is None:
            self.train(datasets)

        # place datasets into a copy of the list since items
        # will be reassigned
        datasets = list(datasets)

        params = self.params  # for quicker access ;)
        alpha = params.alpha  # for letting me be lazy ;)
        if params.zscore_all:
            if __debug__:
                debug('HPAL', "Z-scoring all datasets")
            # zscore them once while storing corresponding ZScoreMapper's
            # so we can assemble a comprehensive mapper at the end
            # (together with procrustes)
            zmappers = []
            for ids in xrange(len(datasets)):
                zmapper = ZScoreMapper(chunks_attr=None)
                zmappers.append(zmapper)
                zmapper.train(datasets[ids])
                datasets[ids] = zmapper.forward(datasets[ids])

        if alpha < 1:
            datasets, wmappers = self._regularize(datasets, alpha)

        #
        # Level 3 -- final, from-scratch, alignment to final common space
        #
        mappers = self._level3(datasets)
        # return trained mappers for projection from all datasets into the
        # common space
        if params.zscore_all:
            # We need to construct new mappers which would chain
            # zscore and then final transformation
            if params.alpha < 1:
                return [
                    ChainMapper([zm, wm, m])
                    for zm, wm, m in zip(zmappers, wmappers, mappers)
                ]
            else:
                return [
                    ChainMapper([zm, m]) for zm, m in zip(zmappers, mappers)
                ]
        else:
            if params.alpha < 1:
                return [
                    ChainMapper([wm, m]) for wm, m in zip(wmappers, mappers)
                ]
            else:
                return mappers

    def _regularize(self, datasets, alpha):
        if __debug__:
            debug('HPAL',
                  "Using regularized hyperalignment with alpha of %d" % alpha)
        wmappers = []
        for ids in xrange(len(datasets)):
            U, S, Vh = np.linalg.svd(datasets[ids])
            S = 1 / np.sqrt((1 - alpha) * np.square(S) + alpha)
            S.resize(len(Vh))
            S = np.matrix(np.diag(S))
            W = np.matrix(Vh.T) * S * np.matrix(Vh)
            wmapper = StaticProjectionMapper(proj=W)
            wmappers.append(wmapper)
            datasets[ids] = wmapper.forward(datasets[ids])
        return datasets, wmappers

    def _level1(self, datasets, commonspace, ref_ds, mappers, residuals):
        params = self.params  # for quicker access ;)
        data_mapped = [ds.samples for ds in datasets]
        for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
            if __debug__:
                debug('HPAL_', "Level 1: ds #%i" % i)
            if i == ref_ds:
                continue
            # assign common space to ``space`` of the mapper, because this is
            # where it will be looking for it
            ds_new.sa[m.get_space()] = commonspace
            # find transformation of this dataset into the current common space
            m.train(ds_new)
            # remove common space attribute again to save on memory when the
            # common space is updated for the next iteration
            del ds_new.sa[m.get_space()]
            # project this dataset into the current common space
            ds_ = m.forward(ds_new.samples)
            if params.zscore_common:
                zscore(ds_, chunks_attr=None)
            # replace original dataset with mapped one -- only the reference
            # dataset will remain unchanged
            data_mapped[i] = ds_

            # compute first-level residuals wrt to the initial common space
            if residuals is not None:
                residuals[0, i] = np.linalg.norm(ds_ - commonspace)

            # Update the common space. This is an incremental update after
            # processing each 1st-level dataset. Maybe there should be a flag
            # to make a batch update after processing all 1st-level datasets
            # to an identical 1st-level common space
            # TODO: make just a function so we dont' waste space
            commonspace = params.combiner1(ds_, commonspace)
            if params.zscore_common:
                zscore(commonspace, chunks_attr=None)
        return data_mapped

    def _level2(self, datasets, lvl1_data, mappers, residuals):
        params = self.params  # for quicker access ;)
        data_mapped = lvl1_data
        # aggregate all processed 1st-level datasets into a new 2nd-level
        # common space
        commonspace = params.combiner2(data_mapped)

        # XXX Why is this commented out? Who knows what combiner2 is doing and
        # whether it changes the distribution of the data
        #if params.zscore_common:
        #zscore(commonspace, chunks_attr=None)

        ndatasets = len(datasets)
        for loop in xrange(params.level2_niter):
            # 2nd-level alignment starts from the original/unprojected datasets
            # again
            for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
                if __debug__:
                    debug('HPAL_',
                          "Level 2 (%i-th iteration): ds #%i" % (loop, i))

                # Optimization speed up heuristic
                # Slightly modify the common space towards other feature
                # spaces and reduce influence of this feature space for the
                # to-be-computed projection
                temp_commonspace = (commonspace * ndatasets - data_mapped[i]) \
                                    / (ndatasets - 1)

                if params.zscore_common:
                    zscore(temp_commonspace, chunks_attr=None)
                # assign current common space
                ds_new.sa[m.get_space()] = temp_commonspace
                # retrain the mapper for this dataset
                m.train(ds_new)
                # remove common space attribute again to save on memory when the
                # common space is updated for the next iteration
                del ds_new.sa[m.get_space()]
                # obtain the 2nd-level projection
                ds_ = m.forward(ds_new.samples)
                if params.zscore_common:
                    zscore(ds_, chunks_attr=None)
                # store for 2nd-level combiner
                data_mapped[i] = ds_
                # compute residuals
                if residuals is not None:
                    residuals[1 + loop, i] = np.linalg.norm(ds_ - commonspace)

            commonspace = params.combiner2(data_mapped)

        # and again
        if params.zscore_common:
            zscore(commonspace, chunks_attr=None)

        # return the final common space
        return commonspace

    def _level3(self, datasets):
        params = self.params  # for quicker access ;)
        # create a mapper per dataset
        mappers = [deepcopy(params.alignment) for ds in datasets]

        # key different from level-2; the common space is uniform
        #temp_commonspace = commonspace

        residuals = None
        if self.ca['residual_errors'].enabled:
            residuals = np.zeros((1, len(datasets)))
            self.ca.residual_errors = Dataset(samples=residuals)

        # start from original input datasets again
        for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
            if __debug__:
                debug('HPAL_', "Level 3: ds #%i" % i)

            # retrain mapper on final common space
            ds_new.sa[m.get_space()] = self.commonspace
            m.train(ds_new)
            # remove common space attribute again to save on memory
            del ds_new.sa[m.get_space()]

            if residuals is not None:
                # obtain final projection
                data_mapped = m.forward(ds_new.samples)
                residuals[0,
                          i] = np.linalg.norm(data_mapped - self.commonspace)

        return mappers
Beispiel #27
0
class Node(ClassWithCollections):
    """Common processing object.

    A `Node` is an object the processes datasets. It can be called with a
    `Dataset` and returns another dataset with the results. In addition, a node
    can also be used as a generator. Upon calling ``generate()`` with a datasets
    it yields (potentially) multiple result datasets.

    Node have a notion of ``space``. The meaning of this space may vary heavily
    across sub-classes. In general, this is a trigger that tells the node to
    compute and store information about the input data that is "interesting" in
    the context of the corresponding processing in the output dataset.
    """

    calling_time = ConditionalAttribute(
        enabled=True, doc="Time (in seconds) it took to call the node")

    raw_results = ConditionalAttribute(
        enabled=False,
        doc="Computed results before invoking postproc. " +
        "Stored only if postproc is not None.")

    def __init__(self, space=None, postproc=None, **kwargs):
        """
        Parameters
        ----------
        space: str, optional
          Name of the 'processing space'. The actual meaning of this argument
          heavily depends on the sub-class implementation. In general, this is
          a trigger that tells the node to compute and store information about
          the input data that is "interesting" in the context of the
          corresponding processing in the output dataset.
        postproc : Node instance, optional
          Node to perform post-processing of results. This node is applied
          in `__call__()` to perform a final processing step on the to be
          result dataset. If None, nothing is done.
        """
        ClassWithCollections.__init__(self, **kwargs)
        if __debug__:
            debug("NO", "Init node '%s' (space: '%s', postproc: '%s')",
                  (self.__class__.__name__, space, str(postproc)))
        self.set_space(space)
        self.set_postproc(postproc)

    def __call__(self, ds):
        """
        The default implementation calls ``_precall()``, ``_call()``, and
        finally returns the output of ``_postcall()``.

        Parameters
        ----------
        ds: Dataset
          Input dataset.

        Returns
        -------
        Dataset
        """
        t0 = time.time()  # record the time when call initiated

        self._precall(ds)
        result = self._call(ds)
        result = self._postcall(ds, result)

        self.ca.calling_time = time.time() - t0  # set the calling_time
        return result

    def _precall(self, ds):
        """Preprocessing of data

        By default, does nothing.

        Parameters
        ----------
        ds: Dataset
          Original input dataset.

        Returns
        -------
        Dataset
        """
        return ds

    def _call(self, ds):
        raise NotImplementedError

    def _postcall(self, ds, result):
        """Postprocessing of results.

        By default, does nothing.

        Parameters
        ----------
        ds: Dataset
          Original input dataset.
        result: Dataset
          Preliminary result dataset (as produced by ``_call()``).

        Returns
        -------
        Dataset
        """
        if not self.__postproc is None:
            if __debug__:
                debug("NO", "Applying post-processing node %s",
                      (self.__postproc, ))
            self.ca.raw_results = result

            result = self.__postproc(result)

        return result

    def generate(self, ds):
        """Yield processing results.

        This methods causes the node to behave like a generator. By default it
        simply yields a single result of its processing -- identical to the
        output of calling the node with a dataset. Subclasses might implement
        generators that yield multiple results.

        Parameters
        ----------
        ds: Dataset
          Input dataset

        Returns
        -------
        generator
          the generator yields the result of the processing.
        """
        yield self(ds)

    def get_space(self):
        """Query the processing space name of this node."""
        return self.__space

    def set_space(self, name):
        """Set the processing space name of this node."""
        self.__space = name

    def get_postproc(self):
        """Returns the post-processing node or None."""
        return self.__postproc

    def set_postproc(self, node):
        """Assigns a post-processing node

        Set to `None` to disable postprocessing.
        """
        self.__postproc = node

    def __str__(self):
        return _str(self)

    def __repr__(self, prefixes=[]):
        return super(Node,
                     self).__repr__(prefixes=prefixes +
                                    _repr_attrs(self, ['space', 'postproc']))

    space = property(get_space,
                     set_space,
                     doc="Processing space name of this node")

    postproc = property(get_postproc,
                        set_postproc,
                        doc="Node to perform post-processing of results")
Beispiel #28
0
 class S1(ClassWithCollections):
     v1 = ConditionalAttribute(enabled=True, doc="values1 is ...")
     v1XXX = ConditionalAttribute(enabled=False, doc="values1 is ...")
Beispiel #29
0
class Classifier(Learner):
    """Abstract classifier class to be inherited by all classifiers
    """

    # Kept separate from doc to don't pollute help(clf), especially if
    # we including help for the parent class
    _DEV__doc__ = """
    Required behavior:

    For every classifier is has to be possible to be instantiated without
    having to specify the training pattern.

    Repeated calls to the train() method with different training data have to
    result in a valid classifier, trained for the particular dataset.

    It must be possible to specify all classifier parameters as keyword
    arguments to the constructor.

    Recommended behavior:

    Derived classifiers should provide access to *estimates* -- i.e. that
    information that is finally used to determine the predicted class label.

    Michael: Maybe it works well if each classifier provides a 'estimates'
             state member. This variable is a list as long as and in same order
             as Dataset.uniquetargets (training data). Each item in the list
             corresponds to the likelyhood of a sample to belong to the
             respective class. However the semantics might differ between
             classifiers, e.g. kNN would probably store distances to class-
             neighbors, where PLR would store the raw function value of the
             logistic function. So in the case of kNN low is predictive and for
             PLR high is predictive. Don't know if there is the need to unify
             that.

             As the storage and/or computation of this information might be
             demanding its collection should be switchable and off be default.

    Nomenclature
     * predictions  : result of the last call to .predict()
     * estimates : might be different from predictions if a classifier's predict()
                   makes a decision based on some internal value such as
                   probability or a distance.
    """
    # Dict that contains the parameters of a classifier.
    # This shall provide an interface to plug generic parameter optimizer
    # on all classifiers (e.g. grid- or line-search optimizer)
    # A dictionary is used because Michael thinks that access by name is nicer.
    # Additionally Michael thinks ATM that additional information might be
    # necessary in some situations (e.g. reasonably predefined parameter range,
    # minimal iteration stepsize, ...), therefore the value to each key should
    # also be a dict or we should use mvpa2.base.param.Parameter'...

    training_stats = ConditionalAttribute(
        enabled=False, doc="Confusion matrix of learning performance")

    predictions = ConditionalAttribute(enabled=True,
                                       doc="Most recent set of predictions")

    estimates = ConditionalAttribute(
        enabled=True,
        doc="Internal classifier estimates the most recent " +
        "predictions are based on")

    predicting_time = ConditionalAttribute(
        enabled=True, doc="Time (in seconds) which took classifier to predict")

    __tags__ = []
    """Describes some specifics about the classifier -- is that it is
    doing regression for instance...."""

    # TODO: make it available only for actually retrainable classifiers
    retrainable = Parameter(
        False,
        constraints='bool',
        doc="""Either to enable retraining for 'retrainable' classifier.""",
        index=1002)

    def __init__(self, space=None, **kwargs):
        # by default we want classifiers to use the 'targets' sample attribute
        # for training/testing
        if space is None:
            space = 'targets'
        Learner.__init__(self, space=space, **kwargs)

        # XXX
        # the place to map literal to numerical labels (and back)
        # this needs to be in the base class, since some classifiers also
        # have this nasty 'regression' mode, and the code in this class
        # needs to deal with converting the regression output into discrete
        # labels
        # however, preferably the mapping should be kept in the respective
        # low-level implementations that need it
        self._attrmap = AttributeMap()

        self.__trainednfeatures = 0
        """Stores number of features for which classifier was trained.
        If 0 -- it wasn't trained at all"""

        self._set_retrainable(self.params.retrainable, force=True)

        # deprecate
        #self.__trainedidhash = None
        #"""Stores id of the dataset on which it was trained to signal
        #in trained() if it was trained already on the same dataset"""

    @property
    def __summary_class__(self):
        if 'regression' in self.__tags__:
            return RegressionStatistics
        else:
            return ConfusionMatrix

    @property
    def __is_regression__(self):
        return 'regression' in self.__tags__

    def __str__(self, *args, **kwargs):
        if __debug__ and 'CLF_' in debug.active:
            return "%s / %s" % (repr(self), super(Classifier, self).__str__())
        else:
            return _str(self, *args, **kwargs)

    def _pretrain(self, dataset):
        """Functionality prior to training
        """
        # So we reset all conditional attributes and may be free up some memory
        # explicitly
        params = self.params
        if not params.retrainable:
            self.untrain()
        else:
            # just reset the ca, do not untrain
            self.ca.reset()
            if not self.__changedData_isset:
                self.__reset_changed_data()
                _changedData = self._changedData
                __idhashes = self.__idhashes
                __invalidatedChangedData = self.__invalidatedChangedData

                # if we don't know what was changed we need to figure
                # them out
                if __debug__:
                    debug('CLF_', "IDHashes are %s", (__idhashes, ))

                # Look at the data if any was changed
                for key, data_ in (('traindata', dataset.samples),
                                   ('targets',
                                    dataset.sa[self.get_space()].value)):
                    _changedData[key] = self.__was_data_changed(key, data_)
                    # if those idhashes were invalidated by retraining
                    # we need to adjust _changedData accordingly
                    if __invalidatedChangedData.get(key, False):
                        if __debug__ and not _changedData[key]:
                            debug(
                                'CLF_', 'Found that idhash for %s was '
                                'invalidated by retraining', (key, ))
                        _changedData[key] = True

                # Look at the parameters
                for col in self._paramscols:
                    changedParams = self._collections[col].which_set()
                    if len(changedParams):
                        _changedData[col] = changedParams

                self.__invalidatedChangedData = {}  # reset it on training

                if __debug__:
                    debug('CLF_', "Obtained _changedData is %s",
                          (self._changedData, ))

    def _posttrain(self, dataset):
        """Functionality post training

        For instance -- computing confusion matrix.

        Parameters
        ----------
        dataset : Dataset
          Data which was used for training
        """
        super(Classifier, self)._posttrain(dataset)

        ca = self.ca

        # needs to be assigned first since below we use predict
        self.__trainednfeatures = dataset.nfeatures

        if __debug__ and 'CHECK_TRAINED' in debug.active:
            self.__trainedidhash = dataset.idhash

        if ca.is_enabled('training_stats') and \
               not ca.is_set('training_stats'):
            # we should not store predictions for training data,
            # it is confusing imho (yoh)
            ca.change_temporarily(disable_ca=["predictions"])
            if self.params.retrainable:
                # we would need to recheck if data is the same,
                # XXX think if there is a way to make this all
                # efficient. For now, probably, retrainable
                # classifiers have no chance but not to use
                # training_stats... sad
                self.__changedData_isset = False
            predictions = self.predict(dataset)
            ca.reset_changed_temporarily()
            targets = dataset.sa[self.get_space()].value
            if is_datasetlike(predictions) and (self.get_space()
                                                in predictions.fa):
                # e.g. in case of pair-wise uncombined results - provide
                # stats per each of the targets pairs
                prediction_targets = predictions.fa[self.get_space()].value
                ca.training_stats = dict(
                    (t,
                     self.__summary_class__(targets=targets,
                                            predictions=predictions.samples[:,
                                                                            i])
                     ) for i, t in enumerate(prediction_targets))
            else:
                ca.training_stats = self.__summary_class__(
                    targets=targets, predictions=predictions)

    def summary(self):
        """Providing summary over the classifier"""

        s = "Classifier %s" % self
        ca = self.ca
        ca_enabled = ca.enabled

        if self.trained:
            s += "\n trained"
            if ca.is_set('training_time'):
                s += ' in %.3g sec' % ca.training_time
            s += ' on data with'
            if ca.is_set('trained_targets'):
                s += ' targets:%s' % list(ca.trained_targets)

            nsamples, nchunks = None, None
            if ca.is_set('trained_nsamples'):
                nsamples = ca.trained_nsamples
            if ca.is_set('trained_dataset'):
                td = ca.trained_dataset
                nsamples, nchunks = td.nsamples, len(td.sa['chunks'].unique)
            if nsamples is not None:
                s += ' #samples:%d' % nsamples
            if nchunks is not None:
                s += ' #chunks:%d' % nchunks

            s += " #features:%d" % self.__trainednfeatures
            if ca.is_set('training_stats'):
                s += ", training error:%.3g" % ca.training_stats.error
        else:
            s += "\n not yet trained"

        if len(ca_enabled):
            s += "\n enabled ca:%s" % ', '.join(
                [str(ca[x]) for x in ca_enabled])
        return s

    def clone(self):
        """Create full copy of the classifier.

        It might require classifier to be untrained first due to
        present SWIG bindings.

        TODO: think about proper re-implementation, without enrollment of deepcopy
        """
        if __debug__:
            debug("CLF", "Cloning %s%s", (self, _strid(self)))
        try:
            return deepcopy(self)
        except:
            self.untrain()
            return deepcopy(self)

    def _train(self, dataset):
        """Function to be actually overridden in derived classes
        """
        raise NotImplementedError

    def _prepredict(self, dataset):
        """Functionality prior prediction
        """
        if not ('notrain2predict' in self.__tags__):
            # check if classifier was trained if that is needed
            if not self.trained:
                raise FailedToPredictError(
                    "Classifier %s wasn't yet trained, therefore can't "
                    "predict" % self)
            nfeatures = dataset.nfeatures  #data.shape[1]
            # check if number of features is the same as in the data
            # it was trained on
            if nfeatures != self.__trainednfeatures:
                raise ValueError, \
                      "Classifier %s was trained on data with %d features, " % \
                      (self, self.__trainednfeatures) + \
                      "thus can't predict for %d features" % nfeatures

        if self.params.retrainable:
            if not self.__changedData_isset:
                self.__reset_changed_data()
                _changedData = self._changedData
                data = np.asanyarray(dataset.samples)
                _changedData['testdata'] = \
                                        self.__was_data_changed('testdata', data)
                if __debug__:
                    debug('CLF_', "prepredict: Obtained _changedData is %s",
                          (_changedData, ))

    def _postpredict(self, dataset, result):
        """Functionality after prediction is computed
        """
        self.ca.predictions = result
        if self.params.retrainable:
            self.__changedData_isset = False

    def _predict(self, dataset):
        """Actual prediction
        """
        raise NotImplementedError

    @accepts_samples_as_dataset
    def predict(self, dataset):
        """Predict classifier on data

        Shouldn't be overridden in subclasses unless explicitly needed
        to do so. Also subclasses trying to call super class's predict
        should call _predict if within _predict instead of predict()
        since otherwise it would loop
        """
        ## ??? yoh: changed to asany from as without exhaustive check
        data = np.asanyarray(dataset.samples)
        if __debug__:
            # Verify that we have no NaN/Inf's which we do not "support" ATM
            if not np.all(np.isfinite(data)):
                raise ValueError(
                    "Some input data for predict is not finite (NaN or Inf)")
            debug("CLF", "Predicting classifier %s on ds %s", (self, dataset))

        # remember the time when started computing predictions
        t0 = time.time()

        ca = self.ca
        # to assure that those are reset (could be set due to testing
        # post-training)
        ca.reset(['estimates', 'predictions'])

        self._prepredict(dataset)

        if self.__trainednfeatures > 0 \
               or 'notrain2predict' in self.__tags__:
            result = self._predict(dataset)
        else:
            warning(
                "Trying to predict using classifier trained on no features")
            if __debug__:
                debug("CLF",
                      "No features were present for training, prediction is " \
                      "bogus")
            result = [None] * data.shape[0]

        ca.predicting_time = time.time() - t0

        # with labels mapping in-place, we also need to go back to the
        # literal labels
        if self._attrmap:
            try:
                result = self._attrmap.to_literal(result)
            except KeyError, e:
                raise FailedToPredictError, \
                      "Failed to convert predictions from numeric into " \
                      "literals: %s" % e

        self._postpredict(dataset, result)
        return result
Beispiel #30
0
 class S2(ClassWithCollections):
     v2 = ConditionalAttribute(enabled=True, doc="values12 is ...")
Beispiel #31
0
class Hyperalignment(ClassWithCollections):
    """Align the features across multiple datasets into a common feature space.

    This is a three-level algorithm. In the first level, a series of input
    datasets is projected into a common feature space using a configurable
    mapper. The common space is initially defined by a chosen exemplar from the
    list of input datasets, but is subsequently refined by iteratively combining
    the common space with the projected input datasets.

    In the second (optional) level, the original input datasets are again
    aligned with (or projected into) the intermediate first-level common
    space. Through a configurable number of iterations the common space is
    further refined by repeated projections of the input datasets and
    combination/aggregation of these projections into an updated common space.

    In the third level, the input datasets are again aligned with the, now
    final, common feature space. The output of this algorithm are trained
    mappers (one for each input dataset) that transform the individual features
    spaces into the common space.

    Level 1 and 2 are performed by the ``train()`` method, and level 3 is
    performed when the trained Hyperalignment instance is called with a list of
    datasets. This dataset list may or may not be identical to the training
    datasets.

    The default values for the parameters of the algorithm (e.g. projection via
    Procrustean transformation, common space aggregation by averaging) resemble
    the setup reported in :ref:`Haxby et al., Neuron (2011) <HGC+11>` *A common,
    high-dimensional model of the representational space in human ventral
    temporal cortex.*

    Examples
    --------
    >>> # get some example data
    >>> from mvpa2.testing.datasets import datasets
    >>> from mvpa2.misc.data_generators import random_affine_transformation
    >>> ds4l = datasets['uni4large']
    >>> # generate a number of distorted variants of this data
    >>> dss = [random_affine_transformation(ds4l) for i in xrange(4)]
    >>> ha = Hyperalignment()
    >>> ha.train(dss)
    >>> mappers = ha(dss)
    >>> len(mappers)
    4
    """

    training_residual_errors = ConditionalAttribute(
        enabled=False,
        doc="""Residual error (norm of the difference between common space
                and projected data) per each training dataset at each level. The
                residuals are stored in a dataset with one row per level, and
                one column per input dataset. The first row corresponds to the
                error 1st-level of hyperalignment the remaining rows store the
                residual errors for each 2nd-level iteration.""")

    residual_errors = ConditionalAttribute(
        enabled=False,
        doc="""Residual error (norm of the difference between common space
                and projected data) per each dataset. The residuals are stored
                in a single-row dataset with one column per input dataset.""")

    # XXX Who cares whether it was chosen, or specified? This should be just
    # 'ref_ds'
    chosen_ref_ds = ConditionalAttribute(
        enabled=True,
        doc="""Index of the input dataset used as 1st-level reference
                dataset.""")

    # Lets use built-in facilities to specify parameters which
    # constructor should accept
    # the ``space`` of the mapper determines where the algorithm places the
    # common space definition in the datasets
    alignment = Parameter(
        ProcrusteanMapper(space='commonspace'),
        # might provide allowedtype
        # XXX Currently, there's no way to handle this with constraints
        doc="""The multidimensional transformation mapper. If
            `None` (default) an instance of
            :class:`~mvpa2.mappers.procrustean.ProcrusteanMapper` is
            used.""")
    output_dim = Parameter(
        None,
        constraints=(EnsureInt() & EnsureRange(min=1) | EnsureNone()),
        doc="""Output common space dimensionality. If None, datasets are aligned
             to the features of the `ref_ds`. Otherwise, dimensionality reduction is
             performed using SVD and only the top SVs are kept. To get all features in
             SVD-aligned space, give output_dim>=nfeatures.
            """)

    alpha = Parameter(
        1,
        constraints=EnsureFloat() & EnsureRange(min=0, max=1),
        doc="""Regularization parameter to traverse between (Shrinkage)-CCA
                (canonical correlation analysis) and regular hyperalignment.
                Setting alpha to 1 makes the algorithm identical to
                hyperalignment and alpha of 0 makes it CCA. By default,
                it is 1, therefore hyperalignment. """)

    level2_niter = Parameter(1,
                             constraints=EnsureInt() & EnsureRange(min=0),
                             doc="Number of 2nd-level iterations.")

    ref_ds = Parameter(
        None,
        constraints=(EnsureRange(min=0) & EnsureInt()
                     | EnsureNone()),
        doc="""Index of a dataset to use as 1st-level common space
                reference.  If `None`, then the dataset with the maximum
                number of features is used.""")

    nproc = Parameter(
        1,
        constraints=EnsureInt(),
        doc="""Number of processes to use to parallelize the last step of
                alignment. If different from 1, it passes it as n_jobs to
                `joblib.Parallel`. Requires joblib package.""")

    zscore_all = Parameter(
        False,
        constraints='bool',
        doc="""Flag to Z-score all datasets prior hyperalignment.
            Turn it off if Z-scoring is not desired or was already performed.
            If True, returned mappers are ChainMappers with the Z-scoring
            prepended to the actual projection.""")

    zscore_common = Parameter(
        True,
        constraints='bool',
        doc="""Flag to Z-score the common space after each adjustment.
                This should be left enabled in most cases.""")

    combiner1 = Parameter(
        mean_xy,  #
        doc="""How to update common space in the 1st-level loop. This must
                be a callable that takes two arguments. The first argument is
                one of the input datasets after projection onto the 1st-level
                common space. The second argument is the current 1st-level
                common space. The 1st-level combiner is called iteratively for
                each projected input dataset, except for the reference dataset.
                By default the new common space is the average of the current
                common space and the recently projected dataset.""")

    level1_equal_weight = Parameter(
        False,
        constraints='bool',
        doc="""Flag to force all datasets to have the same weight in the
            level 1 iteration. False (default) means each time the new common
            space is the average of the current common space and the newly
            aligned dataset, and therefore earlier datasets have less weight."""
    )

    combiner2 = Parameter(
        mean_axis0,
        doc="""How to combine all individual spaces to common space. This
            must be a callable that take a sequence of datasets as an argument.
            The callable must return a single array. This combiner is called
            once with all datasets after 1st-level projection to create an
            updated common space, and is subsequently called again after each
            2nd-level iteration.""")

    joblib_backend = Parameter(
        None,
        constraints=EnsureChoice('multiprocessing', 'threading')
        | EnsureNone(),
        doc="""Backend to use for joblib when using nproc>1.
            Options are 'multiprocessing' and 'threading'. Default is to use
            'multiprocessing' unless run on OSX which have known issues with
            joblib v0.10.3. If it is set to specific value here, then that will
            be used at the risk of failure.""")

    def __init__(self, **kwargs):
        ClassWithCollections.__init__(self, **kwargs)
        self.commonspace = None
        # mapper to a low-dimensional subspace derived using SVD on training data
        # Initializing here so that call can access it without passing after train.
        # Moreover, it is similar to commonspace, in that, it is required for mapping
        # new subjects
        self._svd_mapper = None

    @due.dcite(Doi('10.1016/j.neuron.2011.08.026'),
               description="Hyperalignment of data to a common space",
               tags=["implementation"])
    def train(self, datasets):
        """Derive a common feature space from a series of datasets.

        Parameters
        ----------
        datasets : sequence of datasets

        Returns
        -------
        A list of trained Mappers matching the number of input datasets.
        """
        params = self.params  # for quicker access ;)
        ca = self.ca
        # Check to make sure we get a list of datasets as input.
        if not isinstance(datasets, (list, tuple, np.ndarray)):
            raise TypeError("Input datasets should be a sequence "
                            "(of type list, tuple, or ndarray) of datasets.")

        ndatasets = len(datasets)
        nfeatures = [ds.nfeatures for ds in datasets]
        alpha = params.alpha

        residuals = None
        if ca['training_residual_errors'].enabled:
            residuals = np.zeros((1 + params.level2_niter, ndatasets))
            ca.training_residual_errors = Dataset(
                samples=residuals,
                sa={
                    'levels':
                    ['1'] + ['2:%i' % i for i in xrange(params.level2_niter)]
                })

        if __debug__:
            debug('HPAL',
                  "Hyperalignment %s for %i datasets" % (self, ndatasets))

        if params.ref_ds is None:
            ref_ds = np.argmax(nfeatures)
        else:
            ref_ds = params.ref_ds
            # Making sure that ref_ds is within range.
            #Parameter() already checks for it being a non-negative integer
            if ref_ds >= ndatasets:
                raise ValueError, "Requested reference dataset %i is out of " \
                      "bounds. We have only %i datasets provided" \
                      % (ref_ds, ndatasets)
        ca.chosen_ref_ds = ref_ds
        # zscore all data sets
        # ds = [ zscore(ds, chunks_attr=None) for ds in datasets]

        # TODO since we are doing in-place zscoring create deep copies
        # of the datasets with pruned targets and shallow copies of
        # the collections (if they would come needed in the transformation)
        # TODO: handle floats and non-floats differently to prevent
        #       waste of memory if there is no need (e.g. no z-scoring)
        #otargets = [ds.sa.targets for ds in datasets]
        datasets = [ds.copy(deep=False) for ds in datasets]
        #datasets = [Dataset(ds.samples.astype(float), sa={'targets': [None] * len(ds)})
        #datasets = [Dataset(ds.samples, sa={'targets': [None] * len(ds)})
        #            for ds in datasets]

        if params.zscore_all:
            if __debug__:
                debug('HPAL', "Z-scoring all datasets")
            for ids in xrange(len(datasets)):
                zmapper = ZScoreMapper(chunks_attr=None)
                zmapper.train(datasets[ids])
                datasets[ids] = zmapper.forward(datasets[ids])

        if alpha < 1:
            datasets, wmappers = self._regularize(datasets, alpha)

        # initial common space is the reference dataset
        commonspace = datasets[ref_ds].samples
        # the reference dataset might have been zscored already, don't do it
        # twice
        if params.zscore_common and not params.zscore_all:
            if __debug__:
                debug(
                    'HPAL_', "Creating copy of a commonspace and assuring "
                    "it is of a floating type")
            commonspace = commonspace.astype(float)
            zscore(commonspace, chunks_attr=None)
        # If there is only one dataset in training phase, there is nothing to be done
        # just use that data as the common space
        if len(datasets) < 2:
            self.commonspace = commonspace
        else:
            # create a mapper per dataset
            # might prefer some other way to initialize... later
            mappers = [deepcopy(params.alignment) for ds in datasets]

            #
            # Level 1 -- initial projection
            #
            lvl1_projdata = self._level1(datasets, commonspace, ref_ds,
                                         mappers, residuals)
            #
            # Level 2 -- might iterate multiple times
            #
            # this is the final common space
            self.commonspace = self._level2(datasets, lvl1_projdata, mappers,
                                            residuals)
        if params.output_dim is not None:
            mappers = self._level3(datasets)
            self._svd_mapper = SVDMapper()
            self._svd_mapper.train(self._map_and_mean(datasets, mappers))
            self._svd_mapper = StaticProjectionMapper(
                proj=self._svd_mapper.proj[:, :params.output_dim])

    def __call__(self, datasets):
        """Derive a common feature space from a series of datasets.

        Parameters
        ----------
        datasets : sequence of datasets

        Returns
        -------
        A list of trained Mappers matching the number of input datasets.
        """
        if self.commonspace is None:
            self.train(datasets)
        else:
            # Check to make sure we get a list of datasets as input.
            if not isinstance(datasets, (list, tuple, np.ndarray)):
                raise TypeError(
                    "Input datasets should be a sequence "
                    "(of type list, tuple, or ndarray) of datasets.")

        # place datasets into a copy of the list since items
        # will be reassigned
        datasets = list(datasets)

        params = self.params  # for quicker access ;)
        alpha = params.alpha  # for letting me be lazy ;)
        if params.zscore_all:
            if __debug__:
                debug('HPAL', "Z-scoring all datasets")
            # zscore them once while storing corresponding ZScoreMapper's
            # so we can assemble a comprehensive mapper at the end
            # (together with procrustes)
            zmappers = []
            for ids in xrange(len(datasets)):
                zmapper = ZScoreMapper(chunks_attr=None)
                zmappers.append(zmapper)
                zmapper.train(datasets[ids])
                datasets[ids] = zmapper.forward(datasets[ids])

        if alpha < 1:
            datasets, wmappers = self._regularize(datasets, alpha)

        #
        # Level 3 -- final, from-scratch, alignment to final common space
        #
        mappers = self._level3(datasets)
        # return trained mappers for projection from all datasets into the
        # common space
        if params.zscore_all:
            # We need to construct new mappers which would chain
            # zscore and then final transformation
            if params.alpha < 1:
                mappers = [
                    ChainMapper([zm, wm, m])
                    for zm, wm, m in zip(zmappers, wmappers, mappers)
                ]
            else:
                mappers = [
                    ChainMapper([zm, m]) for zm, m in zip(zmappers, mappers)
                ]
        elif params.alpha < 1:
            mappers = [
                ChainMapper([wm, m]) for wm, m in zip(wmappers, mappers)
            ]
        if params.output_dim is not None:
            mappers = [ChainMapper([m, self._svd_mapper]) for m in mappers]
        return mappers

    def _regularize(self, datasets, alpha):
        if __debug__:
            debug('HPAL',
                  "Using regularized hyperalignment with alpha of %d" % alpha)
        wmappers = []
        for ids in xrange(len(datasets)):
            U, S, Vh = np.linalg.svd(datasets[ids])
            S = 1 / np.sqrt((1 - alpha) * np.square(S) + alpha)
            S.resize(len(Vh))
            S = np.matrix(np.diag(S))
            W = np.matrix(Vh.T) * S * np.matrix(Vh)
            wmapper = StaticProjectionMapper(proj=W, auto_train=False)
            wmapper.train(datasets[ids])
            wmappers.append(wmapper)
            datasets[ids] = wmapper.forward(datasets[ids])
        return datasets, wmappers

    def _level1(self, datasets, commonspace, ref_ds, mappers, residuals):
        params = self.params  # for quicker access ;)
        data_mapped = [ds.samples for ds in datasets]
        counts = 1  # number of datasets used so far for generating commonspace
        for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
            if __debug__:
                debug('HPAL_', "Level 1: ds #%i" % i)
            if i == ref_ds:
                continue
            # assign common space to ``space`` of the mapper, because this is
            # where it will be looking for it
            ds_new.sa[m.get_space()] = commonspace
            # find transformation of this dataset into the current common space
            m.train(ds_new)
            # remove common space attribute again to save on memory when the
            # common space is updated for the next iteration
            del ds_new.sa[m.get_space()]
            # project this dataset into the current common space
            ds_ = m.forward(ds_new.samples)
            if params.zscore_common:
                zscore(ds_, chunks_attr=None)
            # replace original dataset with mapped one -- only the reference
            # dataset will remain unchanged
            data_mapped[i] = ds_

            # compute first-level residuals wrt to the initial common space
            if residuals is not None:
                residuals[0, i] = np.linalg.norm(ds_ - commonspace)

            # Update the common space. This is an incremental update after
            # processing each 1st-level dataset. Maybe there should be a flag
            # to make a batch update after processing all 1st-level datasets
            # to an identical 1st-level common space
            # TODO: make just a function so we dont' waste space
            if params.level1_equal_weight:
                commonspace = params.combiner1(ds_,
                                               commonspace,
                                               weights=(float(counts), 1.0))
            else:
                commonspace = params.combiner1(ds_, commonspace)
            counts += 1
            if params.zscore_common:
                zscore(commonspace, chunks_attr=None)
        return data_mapped

    def _level2(self, datasets, lvl1_data, mappers, residuals):
        params = self.params  # for quicker access ;)
        data_mapped = lvl1_data
        # aggregate all processed 1st-level datasets into a new 2nd-level
        # common space
        commonspace = params.combiner2(data_mapped)

        # XXX Why is this commented out? Who knows what combiner2 is doing and
        # whether it changes the distribution of the data
        #if params.zscore_common:
        #zscore(commonspace, chunks_attr=None)

        ndatasets = len(datasets)
        for loop in xrange(params.level2_niter):
            # 2nd-level alignment starts from the original/unprojected datasets
            # again
            for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
                if __debug__:
                    debug('HPAL_',
                          "Level 2 (%i-th iteration): ds #%i" % (loop, i))

                # Optimization speed up heuristic
                # Slightly modify the common space towards other feature
                # spaces and reduce influence of this feature space for the
                # to-be-computed projection
                temp_commonspace = (commonspace * ndatasets - data_mapped[i]) \
                                    / (ndatasets - 1)

                if params.zscore_common:
                    zscore(temp_commonspace, chunks_attr=None)
                # assign current common space
                ds_new.sa[m.get_space()] = temp_commonspace
                # retrain the mapper for this dataset
                m.train(ds_new)
                # remove common space attribute again to save on memory when the
                # common space is updated for the next iteration
                del ds_new.sa[m.get_space()]
                # obtain the 2nd-level projection
                ds_ = m.forward(ds_new.samples)
                if params.zscore_common:
                    zscore(ds_, chunks_attr=None)
                # store for 2nd-level combiner
                data_mapped[i] = ds_
                # compute residuals
                if residuals is not None:
                    residuals[1 + loop, i] = np.linalg.norm(ds_ - commonspace)

            commonspace = params.combiner2(data_mapped)

        # and again
        if params.zscore_common:
            zscore(commonspace, chunks_attr=None)

        # return the final common space
        return commonspace

    def _level3(self, datasets):
        params = self.params  # for quicker access ;)
        # create a mapper per dataset
        mappers = [deepcopy(params.alignment) for ds in datasets]

        # key different from level-2; the common space is uniform
        #temp_commonspace = commonspace
        # Fixing nproc=0
        if params.nproc == 0:
            from mvpa2.base import warning
            warning("nproc of 0 doesn't make sense. Setting nproc to 1.")
            params.nproc = 1
        # Checking for joblib, if not, set nproc to 1
        if params.nproc != 1:
            from mvpa2.base import externals, warning
            if not externals.exists('joblib'):
                warning(
                    "Setting nproc different from 1 requires joblib package, which "
                    "does not seem to exist. Setting nproc to 1.")
                params.nproc = 1

        # start from original input datasets again
        if params.nproc == 1:
            residuals = []
            for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
                if __debug__:
                    debug('HPAL_', "Level 3: ds #%i" % i)
                m, residual = get_trained_mapper(
                    ds_new, self.commonspace, m,
                    self.ca['residual_errors'].enabled)
                if self.ca['residual_errors'].enabled:
                    residuals.append(residual)
        else:
            if __debug__:
                debug('HPAL_',
                      "Level 3: Using joblib with nproc = %d " % params.nproc)
            verbose_level_parallel = 20 \
                if (__debug__ and 'HPAL' in debug.active) else 0
            from joblib import Parallel, delayed
            import sys
            # joblib's 'multiprocessing' backend has known issues of failure on OSX
            # Tested with MacOS 10.12.13, python 2.7.13, joblib v0.10.3
            if params.joblib_backend is None:
                params.joblib_backend = 'threading' if sys.platform == 'darwin' \
                                        else 'multiprocessing'
            res = Parallel(n_jobs=params.nproc,
                           pre_dispatch=params.nproc,
                           backend=params.joblib_backend,
                           verbose=verbose_level_parallel)(
                               delayed(get_trained_mapper)(
                                   ds, self.commonspace, mapper,
                                   self.ca['residual_errors'].enabled)
                               for ds, mapper in zip(datasets, mappers))
            mappers = [m for m, r in res]
            if self.ca['residual_errors'].enabled:
                residuals = [r for m, r in res]

        if self.ca['residual_errors'].enabled:
            self.ca.residual_errors = Dataset(
                samples=np.array(residuals)[None, :])

        return mappers

    def _map_and_mean(self, datasets, mappers):
        params = self.params
        data_mapped = [[] for ds in datasets]
        for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
            if __debug__:
                debug('HPAL_', "Mapping training data for SVD: ds #%i" % i)
            ds_ = m.forward(ds_new.samples)
            # XXX should we zscore data before averaging and running SVD?
            # zscore(ds_, chunks_attr=None)
            data_mapped[i] = ds_
        dss_mean = params.combiner2(data_mapped)
        return dss_mean
Beispiel #32
0
 class S1__(S1_):
     v1__ = ConditionalAttribute(enabled=False)