def __init__(self, **kwargs):
     _shpaldebug("Initializing.")
     ClassWithCollections.__init__(self, **kwargs)
     self.ndatasets = 0
     self.nfeatures = 0
     self.projections = None
     # This option makes the roi_seed in each SL to be selected during feature selection
     self.force_roi_seed = True
     if self.params.nproc is not None and self.params.nproc > 1 \
             and not externals.exists('pprocess'):
         raise RuntimeError("The 'pprocess' module is required for "
                            "multiprocess searchlights. Please either "
                            "install python-pprocess, or reduce `nproc` "
                            "to 1 (got nproc=%i) or set to default None" %
                            self.params.nproc)
     if not externals.exists('scipy'):
         raise RuntimeError("The 'scipy' module is required for "
                            "searchlight hyperalignment.")
     if self.params.results_backend == 'native':
         raise NotImplementedError(
             "'native' mode to handle results is still a "
             "work in progress.")
         #warning("results_backend is set to 'native'. This has been known"
         #        "to result in longer run time when working with big datasets.")
     if self.params.results_backend == 'hdf5' and \
             not externals.exists('h5py'):
         raise RuntimeError("The 'hdf5' module is required for "
                            "when results_backend is set to 'hdf5'")
Beispiel #2
0
 def __init__(self, space=None, pass_attr=None, postproc=None, **kwargs):
     """
     Parameters
     ----------
     space : str, optional
       Name of the 'processing space'. The actual meaning of this argument
       heavily depends on the sub-class implementation. In general, this is
       a trigger that tells the node to compute and store information about
       the input data that is "interesting" in the context of the
       corresponding processing in the output dataset.
     pass_attr : str, list of str, optional
       What attribute(s) (from sa, fa, a collections, see
       :meth:`Dataset.get_attr`) to pass from original dataset
       provided to __call__ (before applying postproc), or from
       'ca' collection of this instance (use 'ca.' prefix)
       into the resultant dataset.
     postproc : Node instance, optional
       Node to perform post-processing of results. This node is applied
       in `__call__()` to perform a final processing step on the to be
       result dataset. If None, nothing is done.
     """
     ClassWithCollections.__init__(self, **kwargs)
     if __debug__:
         debug("NO",
               "Init node '%s' (space: '%s', postproc: '%s')",
               (self.__class__.__name__, space, str(postproc)))
     self.set_space(space)
     self.set_postproc(postproc)
     if isinstance(pass_attr, basestring):
         pass_attr = (pass_attr,)
     self.__pass_attr = pass_attr
Beispiel #3
0
 def __init__(self, space=None, pass_attr=None, postproc=None, **kwargs):
     """
     Parameters
     ----------
     space : str, optional
       Name of the 'processing space'. The actual meaning of this argument
       heavily depends on the sub-class implementation. In general, this is
       a trigger that tells the node to compute and store information about
       the input data that is "interesting" in the context of the
       corresponding processing in the output dataset.
     pass_attr : str, list of str, optional
       What attribute(s) (from sa, fa, a collections, see
       :meth:`Dataset.get_attr`) to pass from original dataset
       provided to __call__ (before applying postproc), or from
       'ca' collection of this instance (use 'ca.' prefix)
       into the resultant dataset.
     postproc : Node instance, optional
       Node to perform post-processing of results. This node is applied
       in `__call__()` to perform a final processing step on the to be
       result dataset. If None, nothing is done.
     """
     ClassWithCollections.__init__(self, **kwargs)
     if __debug__:
         debug("NO", "Init node '%s' (space: '%s', postproc: '%s')",
               (self.__class__.__name__, space, str(postproc)))
     self.set_space(space)
     self.set_postproc(postproc)
     if isinstance(pass_attr, basestring):
         pass_attr = (pass_attr, )
     self.__pass_attr = pass_attr
Beispiel #4
0
    def __init__(self, sd=0, distribution='rdist', fpp=None, nbins=400, **kwargs):
        """L2-Norm the values, convert them to p-values of a given distribution.

        Parameters
        ----------
        sd : int
          Samples dimension (if len(x.shape)>1) on which to operate
        distribution : string
          Which distribution to use. Known are: 'rdist' (later normal should
          be there as well)
        fpp : float
          At what p-value (both tails) if not None, to control for false
          positives. It would iteratively prune the tails (tentative real positives)
          until empirical p-value becomes less or equal to numerical.
        nbins : int
          Number of bins for the iterative pruning of positives

        WARNING: Highly experimental/slow/etc: no theoretical grounds have been
        presented in any paper, nor proven
        """
        externals.exists('scipy', raise_=True)
        ClassWithCollections.__init__(self, **kwargs)

        self.sd = sd
        if not (distribution in ['rdist']):
            raise ValueError, "Actually only rdist supported at the moment" \
                  " got %s" % distribution
        self.distribution = distribution
        self.fpp = fpp
        self.nbins = nbins
Beispiel #5
0
    def __init__(self,
                 sd=0,
                 distribution='rdist',
                 fpp=None,
                 nbins=400,
                 **kwargs):
        """L2-Norm the values, convert them to p-values of a given distribution.

        Parameters
        ----------
        sd : int
          Samples dimension (if len(x.shape)>1) on which to operate
        distribution : string
          Which distribution to use. Known are: 'rdist' (later normal should
          be there as well)
        fpp : float
          At what p-value (both tails) if not None, to control for false
          positives. It would iteratively prune the tails (tentative real positives)
          until empirical p-value becomes less or equal to numerical.
        nbins : int
          Number of bins for the iterative pruning of positives

        WARNING: Highly experimental/slow/etc: no theoretical grounds have been
        presented in any paper, nor proven
        """
        externals.exists('scipy', raise_=True)
        ClassWithCollections.__init__(self, **kwargs)

        self.sd = sd
        if not (distribution in ['rdist']):
            raise ValueError, "Actually only rdist supported at the moment" \
                  " got %s" % distribution
        self.distribution = distribution
        self.fpp = fpp
        self.nbins = nbins
 def __init__(self, **kwargs):
     _shpaldebug("Initializing.")
     ClassWithCollections.__init__(self, **kwargs)
     self.ndatasets = 0
     self.nfeatures = 0
     self.projections = None
     # This option makes the roi_seed in each SL to be selected during feature selection
     self.force_roi_seed = True
     if self.params.nproc is not None and self.params.nproc > 1 \
             and not externals.exists('pprocess'):
         raise RuntimeError("The 'pprocess' module is required for "
                            "multiprocess searchlights. Please either "
                            "install python-pprocess, or reduce `nproc` "
                            "to 1 (got nproc=%i) or set to default None"
                            % self.params.nproc)
     if not externals.exists('scipy'):
         raise RuntimeError("The 'scipy' module is required for "
                            "searchlight hyperalignment.")
     if self.params.results_backend == 'native':
         raise NotImplementedError("'native' mode to handle results is still a "
                                   "work in progress.")
         #warning("results_backend is set to 'native'. This has been known"
         #        "to result in longer run time when working with big datasets.")
     if self.params.results_backend == 'hdf5' and \
             not externals.exists('h5py'):
         raise RuntimeError("The 'hdf5' module is required for "
                            "when results_backend is set to 'hdf5'")
Beispiel #7
0
 def __init__(self, **kwargs):
     ClassWithCollections.__init__(self, **kwargs)
     self.commonspace = None
     # mapper to a low-dimensional subspace derived using SVD on training data
     # Initializing here so that call can access it without passing after train.
     # Moreover, it is similar to commonspace, in that, it is required for mapping
     # new subjects
     self._svd_mapper = None
Beispiel #8
0
 def __init__(self, **kwargs):
     ClassWithCollections.__init__(self, **kwargs)
     self.commonspace = None
     # mapper to a low-dimensional subspace derived using SVD on training data
     # Initializing here so that call can access it without passing after train.
     # Moreover, it is similar to commonspace, in that, it is required for mapping
     # new subjects
     self._svd_mapper = None
Beispiel #9
0
    def __init__(self, mode='discard', **kwargs):
        """
        Parameters
        ----------
         mode : {'discard', 'select'}
            Decides whether to `select` or to `discard` features.
        """
        ClassWithCollections.__init__(self, **kwargs)

        self._set_mode(mode)
        """Flag whether to select or to discard elements."""
Beispiel #10
0
    def __init__(self, mode='discard', **kwargs):
        """
        Parameters
        ----------
         mode : {'discard', 'select'}
            Decides whether to `select` or to `discard` features.
        """
        ClassWithCollections.__init__(self, **kwargs)

        self._set_mode(mode)
        """Flag whether to select or to discard elements."""
Beispiel #11
0
    def __init__(self, tail='both', **kwargs):
        """
        Parameters
        ----------
        tail : {'left', 'right', 'any', 'both'}
          Which tail of the distribution to report. For 'any' and 'both'
          it chooses the tail it belongs to based on the comparison to
          p=0.5. In the case of 'any' significance is taken like in a
          one-tailed test.
        """
        ClassWithCollections.__init__(self, **kwargs)

        self._set_tail(tail)
Beispiel #12
0
 def __init__(self, space=None, pass_attr=None, postproc=None, **kwargs):
     """
     Parameters
     ----------
     space : str, optional
       Name of the 'processing space'. The actual meaning of this argument
       heavily depends on the sub-class implementation. In general, this is
       a trigger that tells the node to compute and store information about
       the input data that is "interesting" in the context of the
       corresponding processing in the output dataset.
     pass_attr : str, list of str|tuple, optional
       Additional attributes to pass on to an output dataset. Attributes can
       be taken from all three attribute collections of an input dataset
       (sa, fa, a -- see :meth:`Dataset.get_attr`), or from the collection
       of conditional attributes (ca) of a node instance. Corresponding
       collection name prefixes should be used to identify attributes, e.g.
       'ca.null_prob' for the conditional attribute 'null_prob', or
       'fa.stats' for the feature attribute stats. In addition to a plain
       attribute identifier it is possible to use a tuple to trigger more
       complex operations. The first tuple element is the attribute
       identifier, as described before. The second element is the name of the
       target attribute collection (sa, fa, or a). The third element is the
       axis number of a multidimensional array that shall be swapped with the
       current first axis. The fourth element is a new name that shall be
       used for an attribute in the output dataset.
       Example: ('ca.null_prob', 'fa', 1, 'pvalues') will take the
       conditional attribute 'null_prob' and store it as a feature attribute
       'pvalues', while swapping the first and second axes. Simplified
       instructions can be given by leaving out consecutive tuple elements
       starting from the end.
     postproc : Node instance, optional
       Node to perform post-processing of results. This node is applied
       in `__call__()` to perform a final processing step on the to be
       result dataset. If None, nothing is done.
     """
     ClassWithCollections.__init__(self, **kwargs)
     if __debug__:
         debug("NO",
               "Init node '%s' (space: '%s', postproc: '%s')",
               (self.__class__.__name__, space, str(postproc)))
     self.set_space(space)
     self.set_postproc(postproc)
     if isinstance(pass_attr, basestring):
         pass_attr = (pass_attr,)
     self.__pass_attr = pass_attr
Beispiel #13
0
 def __init__(self, space=None, pass_attr=None, postproc=None, **kwargs):
     """
     Parameters
     ----------
     space : str, optional
       Name of the 'processing space'. The actual meaning of this argument
       heavily depends on the sub-class implementation. In general, this is
       a trigger that tells the node to compute and store information about
       the input data that is "interesting" in the context of the
       corresponding processing in the output dataset.
     pass_attr : str, list of str|tuple, optional
       Additional attributes to pass on to an output dataset. Attributes can
       be taken from all three attribute collections of an input dataset
       (sa, fa, a -- see :meth:`Dataset.get_attr`), or from the collection
       of conditional attributes (ca) of a node instance. Corresponding
       collection name prefixes should be used to identify attributes, e.g.
       'ca.null_prob' for the conditional attribute 'null_prob', or
       'fa.stats' for the feature attribute stats. In addition to a plain
       attribute identifier it is possible to use a tuple to trigger more
       complex operations. The first tuple element is the attribute
       identifier, as described before. The second element is the name of the
       target attribute collection (sa, fa, or a). The third element is the
       axis number of a multidimensional array that shall be swapped with the
       current first axis. The fourth element is a new name that shall be
       used for an attribute in the output dataset.
       Example: ('ca.null_prob', 'fa', 1, 'pvalues') will take the
       conditional attribute 'null_prob' and store it as a feature attribute
       'pvalues', while swapping the first and second axes. Simplified
       instructions can be given by leaving out consecutive tuple elements
       starting from the end.
     postproc : Node instance, optional
       Node to perform post-processing of results. This node is applied
       in `__call__()` to perform a final processing step on the to be
       result dataset. If None, nothing is done.
     """
     ClassWithCollections.__init__(self, **kwargs)
     if __debug__:
         debug("NO",
               "Init node '%s' (space: '%s', postproc: '%s')",
               (self.__class__.__name__, space, str(postproc)))
     self.set_space(space)
     self.set_postproc(postproc)
     if isinstance(pass_attr, basestring):
         pass_attr = (pass_attr,)
     self.__pass_attr = pass_attr
Beispiel #14
0
 def __init__(self, space=None, postproc=None, **kwargs):
     """
     Parameters
     ----------
     space: str, optional
       Name of the 'processing space'. The actual meaning of this argument
       heavily depends on the sub-class implementation. In general, this is
       a trigger that tells the node to compute and store information about
       the input data that is "interesting" in the context of the
       corresponding processing in the output dataset.
     postproc : Node instance, optional
       Node to perform post-processing of results. This node is applied
       in `__call__()` to perform a final processing step on the to be
       result dataset. If None, nothing is done.
     """
     ClassWithCollections.__init__(self, **kwargs)
     self.set_space(space)
     self.set_postproc(postproc)
Beispiel #15
0
 def __init__(self, space=None, postproc=None, **kwargs):
     """
     Parameters
     ----------
     space: str, optional
       Name of the 'processing space'. The actual meaning of this argument
       heavily depends on the sub-class implementation. In general, this is
       a trigger that tells the node to compute and store information about
       the input data that is "interesting" in the context of the
       corresponding processing in the output dataset.
     postproc : Node instance, optional
       Node to perform post-processing of results. This node is applied
       in `__call__()` to perform a final processing step on the to be
       result dataset. If None, nothing is done.
     """
     ClassWithCollections.__init__(self, **kwargs)
     if __debug__:
         debug("NO", "Init node '%s' (space: '%s', postproc: '%s')",
               (self.__class__.__name__, space, str(postproc)))
     self.set_space(space)
     self.set_postproc(postproc)
Beispiel #16
0
 def __init__(self, **kwargs):
     ClassWithCollections.__init__(self, **kwargs)
Beispiel #17
0
 def __init__(self, **kwargs):
     # XXX make such example when we actually need to invoke
     # constructor
     # TestClassProper.__init__(self, **kwargs)
     ClassWithCollections.__init__(self, **kwargs)
Beispiel #18
0
 def __init__(self, **kwargs):
     ClassWithCollections.__init__(self, **kwargs)
     self.commonspace = None
Beispiel #19
0
 def __init__(self, **kwargs):
     # XXX make such example when we actually need to invoke
     # constructor
     # TestClassProper.__init__(self, **kwargs)
     ClassWithCollections.__init__(self, **kwargs)
Beispiel #20
0
 def __init__(self, *args, **kwargs):
     """Base Kernel class has no parameters
     """
     ClassWithCollections.__init__(self, *args, **kwargs)
     self._k = None
     """Implementation specific version of the kernel"""
Beispiel #21
0
 def __init__(self, *args, **kwargs):
     """Base Kernel class has no parameters
     """
     ClassWithCollections.__init__(self, *args, **kwargs)
     self._k = None
     """Implementation specific version of the kernel"""
Beispiel #22
0
 def __init__(self, **kwargs):
     ClassWithCollections.__init__(self, **kwargs)
     self.commonspace = None