Beispiel #1
0
    def __init__(self,
                 input_dim=None,
                 output_dim=None,
                 dtype=None,
                 include_last_sample=True):
        """
        For the ``include_last_sample`` switch have a look at the
        SFANode class docstring.
         """
        super(SFANode, self).__init__(input_dim, output_dim, dtype)
        self._include_last_sample = include_last_sample

        # init two covariance matrices
        # one for the input data
        self._cov_mtx = CovarianceMatrix(dtype)
        # one for the derivatives
        self._dcov_mtx = CovarianceMatrix(dtype)

        # set routine for eigenproblem
        self._symeig = symeig

        # SFA eigenvalues and eigenvectors, will be set after training
        self.d = None
        self.sf = None  # second index for outputs
        self.avg = None
        self._bias = None  # avg multiplied with sf
        self.tlen = None
Beispiel #2
0
    def __init__(self,
                 tol=1e-4,
                 max_cycles=100,
                 verbose=False,
                 input_dim=None,
                 output_dim=None,
                 dtype=None):
        """Initializes an object of type 'FANode'.
        
        :param tol: Tolerance (minimum change in log-likelihood before exiting
            the EM algorithm).
        :type tol: float
        
        :param max_cycles: Maximum number of EM cycles/
        :type max_cycles: int
        
        :param verbose: If true, print log-likelihood during the EM-cycles.
        :type verbose: bool
        
        :param input_dim: The input dimensionality.
        :type input_dim: int
        
        :param output_dim: The output dimensionality.
        :type output_dim: int
        
        :param dtype: The datatype.
        :type dtype: numpy.dtype or str
        """

        # Notation as in Max Welling's notes
        super(FANode, self).__init__(input_dim, output_dim, dtype)
        self.tol = tol
        self.max_cycles = max_cycles
        self.verbose = verbose
        self._cov_mtx = CovarianceMatrix(dtype, bias=True)
Beispiel #3
0
    def __init__(self,
                 input_dim=None,
                 output_dim=None,
                 dtype=None,
                 svd=False,
                 reduce=False,
                 var_rel=1E-12,
                 var_abs=1E-15,
                 var_part=None):
        """The number of principal components to be kept can be specified as
        'output_dim' directly (e.g. 'output_dim=10' means 10 components
        are kept) or by the fraction of variance to be explained
        (e.g. 'output_dim=0.95' means that as many components as necessary
        will be kept in order to explain 95% of the input variance).

        Other Keyword Arguments:

        svd -- if True use Singular Value Decomposition instead of the
               standard eigenvalue problem solver. Use it when PCANode
               complains about singular covariance matrices

        reduce -- Keep only those principal components which have a variance
                  larger than 'var_abs' and a variance relative to the
                  first principal component larger than 'var_rel' and a
                  variance relative to total variance larger than 'var_part'
                  (set var_part to None or 0 for no filtering).
                  Note: when the 'reduce' switch is enabled, the actual number
                  of principal components (self.output_dim) may be different
                  from that set when creating the instance.
        """
        # this must occur *before* calling super!
        self.desired_variance = None
        super(PCANode, self).__init__(input_dim, output_dim, dtype)
        self.svd = svd
        # set routine for eigenproblem
        if svd:
            self._symeig = nongeneral_svd
        else:
            self._symeig = symeig
        self.var_abs = var_abs
        self.var_rel = var_rel
        self.var_part = var_part
        self.reduce = reduce
        # empirical covariance matrix, updated during the training phase
        self._cov_mtx = CovarianceMatrix(dtype)
        # attributes that defined in stop_training
        self.d = None  # eigenvalues
        self.v = None  # eigenvectors, first index for coordinates
        self.total_variance = None
        self.tlen = None
        self.avg = None
        self.explained_variance = None
Beispiel #4
0
def _get_iterative_cov(layer, batch, conv_method: str = 'median'):

    #batch = batch[-1]

    if len(batch.shape) == 4:  # conv layer (B x C x H x W)
        if conv_method == 'median':
            batch = np.median(batch, axis=(2, 3))  # channel median
        elif conv_method == 'max':
            batch = np.max(batch, axis=(2, 3))  # channel median
        elif conv_method == 'mean':
            batch = np.mean(batch, axis=(2, 3))

    if not layer in COVARIANCE_MATRICES:
        COVARIANCE_MATRICES[layer] = CovarianceMatrix()
        COVARIANCE_MATRICES[layer]._init_internals(batch)
    else:
        COVARIANCE_MATRICES[layer].update(batch)
    return COVARIANCE_MATRICES[layer]._cov_mtx
Beispiel #5
0
    def __init__(self, tol=1e-4, max_cycles=100, verbose=False,
                 input_dim=None, output_dim=None, dtype=None):

        """
        :Parameters:
          tol
            tolerance (minimum change in log-likelihood before exiting
            the EM algorithm)
          max_cycles
            maximum number of EM cycles
          verbose
            if true, print log-likelihood during the EM-cycles
        """
        # Notation as in Max Welling's notes
        super(FANode, self).__init__(input_dim, output_dim, dtype)
        self.tol = tol
        self.max_cycles = max_cycles
        self.verbose = verbose
        self._cov_mtx = CovarianceMatrix(dtype, bias=True)
Beispiel #6
0
        def record_layer_saturation(layer: torch.nn.Module, input, output):
            """Hook to register in `layer` module."""

            # Increment step counter
            layer.forward_iter += 1
            if layer.forward_iter % layer.interval == 0:
                activations_batch = output.data.cpu().numpy()
                training_state = 'train' if layer.training else 'eval'
                layer_history = setattr(layer,
                                        f'{training_state}_layer_history',
                                        activations_batch)
                eig_vals = None
                if 'lsat' in stats:
                    training_state = 'train' if layer.training else 'eval'

                    if len(activations_batch.shape
                           ) == 4:  # conv layer (B x C x H x W)
                        if self.conv_method == 'median':
                            activations_batch = np.median(
                                activations_batch,
                                axis=(2, 3))  # channel median
                        elif self.conv_method == 'max':
                            activations_batch = np.max(
                                activations_batch,
                                axis=(2, 3))  # channel median
                        elif self.conv_method == 'mean':
                            activations_batch = np.mean(activations_batch,
                                                        axis=(2, 3))

                    if layer.name in self.logs[f'{training_state}-saturation']:
                        self.logs[f'{training_state}-saturation'][
                            layer.name].update(activations_batch)
                    else:
                        self.logs[f'{training_state}-saturation'][
                            layer.name] = CovarianceMatrix()
                        self.logs[f'{training_state}-saturation'][
                            layer.name]._init_internals(activations_batch)
Beispiel #7
0
 def _init_cov(self):
     # init two covariance matrices
     # one for the input data
     self._cov_mtx = CovarianceMatrix(self.dtype)
     # one for the input data
     self._dcov_mtx = CovarianceMatrix(self.dtype)
Beispiel #8
0
    def __init__(self,
                 input_dim=None,
                 output_dim=None,
                 dtype=None,
                 svd=False,
                 reduce=False,
                 var_rel=1E-12,
                 var_abs=1E-15,
                 var_part=None):
        """Initializes an object of type 'PCANode'.

        The number of principal components to be kept can be specified as
        'output_dim' directly (e.g. 'output_dim=10' means 10 components
        are kept) or by the fraction of variance to be explained
        (e.g. 'output_dim=0.95' means that as many components as necessary
        will be kept in order to explain 95% of the input variance).
        
        :param input_dim: Dimensionality of the input.
            Default is None.
        :type input_dim: int
        
        :param output_dim: Dimensionality of the output.
            Default is None.
        :type output_dim: int
        
        :param dtype: Datatype of the input.
            Default is None.
        :type dtype: numpy.dtype, str
        
        :param svd: If True use Singular Value Decomposition instead of the
            standard eigenvalue problem solver. Use it when PCANode
            complains about singular covariance matrices.
            Default is Flase.
        :type svd: bool
        
        :param reduce: Keep only those principal components which have a variance
            larger than 'var_abs' and a variance relative to the
            first principal component larger than 'var_rel' and a
            variance relative to total variance larger than 'var_part'
            (set var_part to None or 0 for no filtering).
            Default is False.
        :type reduce: bool
            
        .. note:: 
            When the *reduce* switch is enabled, the actual number
            of principal components (self.output_dim) may be different
            from that set when creating the instance.
            
        :param var_rel: Variance relative to first principal component threshold.
            Default is 1E-12.
        :type var_rel: float
        
        :param var_abs: Absolute variance threshold.
            Default is 1E-15.
        :type var_abs: float
        
        :param var_part: Variance relative to total variance threshold.
            Default is None.
        :type var_part: float
        """

        # this must occur *before* calling super!
        self.desired_variance = None
        super(PCANode, self).__init__(input_dim, output_dim, dtype)
        self.svd = svd
        # set routine for eigenproblem
        if svd:
            self._symeig = nongeneral_svd
        else:
            self._symeig = symeig
        self.var_abs = var_abs
        self.var_rel = var_rel
        self.var_part = var_part
        self.reduce = reduce
        # empirical covariance matrix, updated during the training phase
        self._cov_mtx = CovarianceMatrix(dtype)
        # attributes that defined in stop_training
        self.d = None  # eigenvalues
        self.v = None  # eigenvectors, first index for coordinates
        self.total_variance = None
        self.tlen = None
        self.avg = None
        self.explained_variance = None
Beispiel #9
0
    def __init__(self, input_dim=None, output_dim=None, dtype=None,
                 include_last_sample=True, rank_deficit_method='none'):
        """
        Initialize an object of type 'SFANode'.

        :param input_dim: The input dimensionality.
        :type input_dim: int
        
        :param output_dim: The output dimensionality.
        :type output_dim: int
        
        :param dtype: The datatype.
        :type dtype: numpy.dtype or str
        
        :param include_last_sample: If ``False`` the `train` method discards the 
            last sample in every chunk during training when calculating 
            the covariance matrix.
            The last sample is in this case only used for calculating the
            covariance matrix of the derivatives. The switch should be set
            to ``False`` if you plan to train with several small chunks. For
            example we can split a sequence (index is time)::

                x_1 x_2 x_3 x_4
    
            in smaller parts like this::

                x_1 x_2
                x_2 x_3
                x_3 x_4

            The SFANode will see 3 derivatives for the temporal covariance
            matrix, and the first 3 points for the spatial covariance matrix.
            Of course you will need to use a generator that *connects* the
            small chunks (the last sample needs to be sent again in the next
            chunk). If ``include_last_sample`` was True, depending on the
            generator you use, you would either get::

                x_1 x_2
                x_2 x_3
                x_3 x_4

            in which case the last sample of every chunk would be used twice
            when calculating the covariance matrix, or::

                x_1 x_2
                x_3 x_4

            in which case you loose the derivative between ``x_3`` and ``x_2``.

            If you plan to train with a single big chunk leave
            ``include_last_sample`` to the default value, i.e. ``True``.

            You can even change this behaviour during training. Just set the
            corresponding switch in the `train` method.
        :type include_last_sample: bool
        
        :param rank_deficit_method: Possible values: 'none' (default), 'reg', 'pca', 'svd', 'auto'
            If not 'none', the ``stop_train`` method solves the SFA eigenvalue
            problem in a way that is robust against linear redundancies in
            the input data. This would otherwise lead to rank deficit in the
            covariance matrix, which usually yields a
            SymeigException ('Covariance matrices may be singular').
            There are several solving methods implemented:

            reg  - works by regularization
            pca  - works by PCA
            svd  - works by SVD
            ldl  - works by LDL decomposition (requires SciPy >= 1.0)

            auto - (Will be: selects the best-benchmarked method of the above)
                   Currently it simply selects pca.

            Note: If you already received an exception
            SymeigException ('Covariance matrices may be singular')
            you can manually set the solving method for an existing node::

               sfa.set_rank_deficit_method('pca')

            That means,::

               sfa = SFANode(rank_deficit='pca')

            is equivalent to::

               sfa = SFANode()
               sfa.set_rank_deficit_method('pca')

            After such an adjustment you can run ``stop_training()`` again,
            which would save a potentially time-consuming rerun of all
            ``train()`` calls.
        :type rank_deficit_method: str
        """
        super(SFANode, self).__init__(input_dim, output_dim, dtype)
        self._include_last_sample = include_last_sample

        # init two covariance matrices
        # one for the input data
        self._cov_mtx = CovarianceMatrix(dtype)
        # one for the derivatives
        self._dcov_mtx = CovarianceMatrix(dtype)

        # set routine for eigenproblem
        self.set_rank_deficit_method(rank_deficit_method)
        self.rank_threshold = 1e-12
        self.rank_deficit = 0

        # SFA eigenvalues and eigenvectors, will be set after training
        self.d = None
        self.sf = None  # second index for outputs
        self.avg = None
        self._bias = None  # avg multiplied with sf
        self.tlen = None