예제 #1
0
    def _stop_training(self):
        self.data = numx.array(self.data, dtype=self.dtype)
        self.data.shape = (self.tlen, self.input_dim)

        # choose initial centroids unless they are already given
        if not self._centroids:
            import random

            centr_idx = random.sample(xrange(self.tlen), self._num_clusters)
            # numx_rand.permutation(self.tlen)[:self._num_clusters]
            centroids = self.data[centr_idx]
        else:
            centroids = self._centroids

        for step in xrange(self.max_iter):
            # list of (sum_position, num_clusters)
            new_centroids = [(0.0, 0.0)] * len(centroids)
            # cluster
            for x in self.data:
                idx = self._nearest_centroid_idx(x, centroids)
                # update position and count
                pos_count = (new_centroids[idx][0] + x, new_centroids[idx][1] + 1.0)
                new_centroids[idx] = pos_count

            # get new centroid position
            new_centroids = numx.array(
                [c[0] / c[1] if c[1] > 0.0 else centroids[idx] for idx, c in enumerate(new_centroids)]
            )
            # check if we are stable
            if numx.all(new_centroids == centroids):
                self._centroids = centroids
                return
            centroids = new_centroids
예제 #2
0
    def __init__(self, n, d, real_dtype="d", integer_dtype="l"):
        """
        Input arguments:
        n -- Number of maxima and minima to remember
        d -- Minimum gap between two hits

        real_dtype -- dtype of sequence items
        integer_dtype -- dtype of sequence indices
        Note: be careful with dtypes!
        """
        self.n = int(n)
        self.d = int(d)
        self.iM = numx.zeros((n, ), dtype=integer_dtype)
        self.im = numx.zeros((n, ), dtype=integer_dtype)
        
        real_dtype = numx.dtype(real_dtype)
        if real_dtype in mdp.utils.get_dtypes('AllInteger'):
            max_num = numx.iinfo(real_dtype).max
            min_num = numx.iinfo(real_dtype).min
        else:
            max_num = numx.finfo(real_dtype).max
            min_num = numx.finfo(real_dtype).min
        self.M = numx.array([min_num]*n, dtype=real_dtype)
        self.m = numx.array([max_num]*n, dtype=real_dtype)
        
        self.lM = 0
        self.lm = 0
예제 #3
0
    def __init__(self, n, d, real_dtype="d", integer_dtype="l"):
        """Initializes an object of type 'OneDimensionalHitParade'.
        
        :param n: Number of maxima and minima to remember.
        :type n: int
        
        :param d: Minimum gap between two hits.
        :type d: int
        
        :param real_dtype: Datatype of sequence items
        :type real_dtype: numpy.dtype or str
        
        :param integer_dtype: Datatype of sequence indices
        :type integer_dtype: numpy.dtype or str
        """
        self.n = int(n)
        self.d = int(d)
        self.iM = numx.zeros((n, ), dtype=integer_dtype)
        self.im = numx.zeros((n, ), dtype=integer_dtype)

        real_dtype = numx.dtype(real_dtype)
        if real_dtype in mdp.utils.get_dtypes('AllInteger'):
            max_num = numx.iinfo(real_dtype).max
            min_num = numx.iinfo(real_dtype).min
        else:
            max_num = numx.finfo(real_dtype).max
            min_num = numx.finfo(real_dtype).min
        self.M = numx.array([min_num] * n, dtype=real_dtype)
        self.m = numx.array([max_num] * n, dtype=real_dtype)

        self.lM = 0
        self.lm = 0
예제 #4
0
 def __init__(self, n, d, real_dtype="d", integer_dtype="l"):
     """Initializes an object of type 'OneDimensionalHitParade'.
     
     :param n: Number of maxima and minima to remember.
     :type n: int
     
     :param d: Minimum gap between two hits.
     :type d: int
     
     :param real_dtype: Datatype of sequence items
     :type real_dtype: numpy.dtype or str
     
     :param integer_dtype: Datatype of sequence indices
     :type integer_dtype: numpy.dtype or str
     """
     self.n = int(n)
     self.d = int(d)
     self.iM = numx.zeros((n, ), dtype=integer_dtype)
     self.im = numx.zeros((n, ), dtype=integer_dtype)
     
     real_dtype = numx.dtype(real_dtype)
     if real_dtype in mdp.utils.get_dtypes('AllInteger'):
         max_num = numx.iinfo(real_dtype).max
         min_num = numx.iinfo(real_dtype).min
     else:
         max_num = numx.finfo(real_dtype).max
         min_num = numx.finfo(real_dtype).min
     self.M = numx.array([min_num]*n, dtype=real_dtype)
     self.m = numx.array([max_num]*n, dtype=real_dtype)
     
     self.lM = 0
     self.lm = 0
예제 #5
0
    def __init__(self, n, d, real_dtype="d", integer_dtype="l"):
        """
        Input arguments:
        n -- Number of maxima and minima to remember
        d -- Minimum gap between two hits

        real_dtype -- dtype of sequence items
        integer_dtype -- dtype of sequence indices
        Note: be careful with dtypes!
        """
        self.n = int(n)
        self.d = int(d)
        self.iM = numx.zeros((n, ), dtype=integer_dtype)
        self.im = numx.zeros((n, ), dtype=integer_dtype)
        
        real_dtype = numx.dtype(real_dtype)
        if real_dtype in mdp.utils.get_dtypes('AllInteger'):
            max_num = numx.iinfo(real_dtype).max
            min_num = numx.iinfo(real_dtype).min
        else:
            max_num = numx.finfo(real_dtype).max
            min_num = numx.finfo(real_dtype).min
        self.M = numx.array([min_num]*n, dtype=real_dtype)
        self.m = numx.array([max_num]*n, dtype=real_dtype)
        
        self.lM = 0
        self.lm = 0
예제 #6
0
    def _stop_training(self):
        self.data = numx.array(self.data, dtype=self.dtype)
        self.data.shape = (self.tlen, self.input_dim)

        # choose initial centroids unless they are already given
        if not self._centroids:
            import random
            centr_idx = random.sample(xrange(self.tlen), self._num_clusters)
            #numx_rand.permutation(self.tlen)[:self._num_clusters]
            centroids = self.data[centr_idx]
        else:
            centroids = self._centroids

        for step in xrange(self.max_iter):
            # list of (sum_position, num_clusters)
            new_centroids = [(0., 0.)] * len(centroids)
            # cluster
            for x in self.data:
                idx = self._nearest_centroid_idx(x, centroids)
                # update position and count
                pos_count = (new_centroids[idx][0] + x,
                             new_centroids[idx][1] + 1.)
                new_centroids[idx] = pos_count

            # get new centroid position
            new_centroids = numx.array([
                c[0] / c[1] if c[1] > 0. else centroids[idx]
                for idx, c in enumerate(new_centroids)
            ])
            # check if we are stable
            if numx.all(new_centroids == centroids):
                self._centroids = centroids
                return
            centroids = new_centroids
예제 #7
0
 def _set_dtype(self, dtype):
     # when typecode is set, we set the whitening node if needed and
     # the SFA and ICA weights
     self._dtype = dtype
     if not self.whitened and self.white.dtype is None:
         self.white.dtype = dtype
     self.icaweights = numx.array(self.icaweights, dtype)
     self.sfaweights = numx.array(self.sfaweights, dtype)
예제 #8
0
 def _set_dtype(self, dtype):
     # when typecode is set, we set the whitening node if needed and
     # the SFA and ICA weights
     self._dtype = dtype
     if not self.whitened and self.white.dtype is None:
         self.white.dtype = dtype
     self.icaweights = numx.array(self.icaweights, dtype)
     self.sfaweights = numx.array(self.sfaweights, dtype)
예제 #9
0
    def _get_nearest_nodes(self, x):
        """Return the two nodes in the graph that are nearest to x and their
        squared distances.
        :param x: Coordinates of point to compute distance to in order to
            specifiy nearest nodes.
        :type x: numpy.ndarray
        
        :return: The coordinates of the nearest two nodes and their
            distances to x. ([node1, node2], [dist1, dist2])
        :rtype: tuple
        """

        # distance function
        def _distance_from_node(node):
            #return norm(node.data.pos-x)**2
            tmp = node.data.pos - x
            return utils.mult(tmp, tmp)

        g = self.graph
        # distances of all graph nodes from x
        distances = numx.array(list(map(_distance_from_node, g.nodes)))
        ids = distances.argsort()[:2]
        #nearest = [g.nodes[idx] for idx in ids]
        #return nearest, distances[ids]
        return (g.nodes[ids[0]], g.nodes[ids[1]]), distances.take(ids)
예제 #10
0
    def _rank_nodes_by_distance(self, x):
        """Return the nodes in the graph in a list ranked by their squared
        distance to x.
        
        :param x: Point to compute distance to.
        :type x: numpy.ndarray
        
        :return: List of nodes ordered by the distance of the node to x.
        :rtype: list
        """

        #TODO: Refactor together with GNGNode._get_nearest_nodes

        # distance function
        def _distance_from_node(node):
            tmp = node.data.pos - x
            return utils.mult(tmp, tmp)  # maps to mdp.numx.dot

        g = self.graph

        # distances of all graph nodes from x
        distances = numx.array(list(map(_distance_from_node, g.nodes)))
        ids = distances.argsort()
        ranked_nodes = [g.nodes[id] for id in ids]

        return ranked_nodes
예제 #11
0
    def __init__(self, input_dim, connections):
        """Create a generic switchboard.

        The input and output dimension as well as dtype have to be fixed
        at initialization time.

        Keyword arguments:
        input_dim -- Dimension of the input data (number of connections).
        connections -- 1d Array or sequence with an entry for each output
            connection, containing the corresponding index of the
            input connection.
        """
        # check connections for inconsistencies
        if len(connections) == 0:
            err = "Received empty connection list."
            raise SwitchboardException(err)
        if numx.nanmax(connections) >= input_dim:
            err = ("One or more switchboard connection "
                   "indices exceed the input dimension.")
            raise SwitchboardException(err)
        # checks passed
        self.connections = numx.array(connections)
        output_dim = len(connections)
        super(Switchboard, self).__init__(input_dim=input_dim,
                                          output_dim=output_dim)
        # try to invert connections
        if self.input_dim == self.output_dim and len(
                numx.unique(self.connections)) == self.input_dim:
            self.inverse_connections = numx.argsort(self.connections)
        else:
            self.inverse_connections = None
예제 #12
0
파일: flows.py 프로젝트: fagan2888/Oger-1
    def train(self, data_iterables):
        data_iterables = self._train_check_iterables(data_iterables)

        if self.external_input_range is None:
            external_input_range = []
        else:
            external_input_range = self.external_input_range

        # train each Node successively
        for i in range(len(self.flow)):
            if self.verbose:
                print "Training node #%d (%s)" % (i, str(self.flow[i]))
            if not (data_iterables[i] == [] or data_iterables[i] is None):
                # Delay the input timeseries with len(flow)-1, because every node connection introduces a one timestep delay
                datax = [x[0:-i, :] for x in data_iterables[i][0]]
                datay = []
                for x in data_iterables[i][0]:
                    c = numx.array([True] * x.shape[1])
                    c[external_input_range] = False
                    datay.append(x[i:, c])
            else:
                datax, datay = [], []
            self._train_node(zip(datax, datay), i)
            if self.verbose:
                print "Training finished"

        self._close_last_node()
예제 #13
0
    def _rank_nodes_by_distance(self, x):
        """Return the nodes in the graph in a list ranked by their squared
        distance to x.
        
        :param x: Point to compute distance to.
        :type x: numpy.ndarray
        
        :return: List of nodes ordered by the distance of the node to x.
        :rtype: list
        """

        #TODO: Refactor together with GNGNode._get_nearest_nodes

        # distance function
        def _distance_from_node(node):
            tmp = node.data.pos - x
            return utils.mult(tmp, tmp) # maps to mdp.numx.dot

        g = self.graph

        # distances of all graph nodes from x
        distances = numx.array(list(map(_distance_from_node, g.nodes)))
        ids = distances.argsort()
        ranked_nodes = [g.nodes[id] for id in ids]

        return ranked_nodes
예제 #14
0
파일: flows.py 프로젝트: JianboTang/Oger
    def train(self, data_iterables):
        data_iterables = self._train_check_iterables(data_iterables)

        if self.external_input_range is None:
            external_input_range = []
        else:
            external_input_range = self.external_input_range

        # train each Node successively
        for i in range(len(self.flow)):
            if self.verbose:
                print "Training node #%d (%s)" % (i, str(self.flow[i]))
            if not data_iterables[i] == []:
                # Delay the input timeseries with len(flow)-1, because every node connection introduces a one timestep delay
                datax = [x[0:-i, :] for x in data_iterables[i][0]]
                datay = []
                for x in data_iterables[i][0]:
                    c = numx.array([True] * x.shape[1])
                    c[external_input_range] = False
                    datay.append(x[i:, c])
            else:
                datax, datay = [], []
            self._train_node(zip(datax, datay), i)
            if self.verbose:
                print "Training finished"

        self._close_last_node()
예제 #15
0
    def __init__(self, input_dim, connections):
        """Create a generic switchboard.

        The input and output dimension as well as dtype have to be fixed
        at initialization time.

        Keyword arguments:
        input_dim -- Dimension of the input data (number of connections).
        connections -- 1d Array or sequence with an entry for each output
            connection, containing the corresponding index of the
            input connection.
        """
        # check connections for inconsistencies
        if len(connections) == 0:
            err = "Received empty connection list."
            raise SwitchboardException(err)
        if numx.nanmax(connections) >= input_dim:
            err = ("One or more switchboard connection "
                   "indices exceed the input dimension.")
            raise SwitchboardException(err)
        # checks passed
        self.connections = numx.array(connections)
        output_dim = len(connections)
        super(Switchboard, self).__init__(input_dim=input_dim,
                                          output_dim=output_dim)
        # try to invert connections
        if (self.input_dim == self.output_dim and
            len(numx.unique(self.connections)) == self.input_dim):
            self.inverse_connections = numx.argsort(self.connections)
        else:
            self.inverse_connections = None
def test_RecursiveExpansionNode2():
    """Testing the tensor-base."""
    data = 1e-6+np.random.rand(10, 4)
    for functup in funcs:
        func = functup[0]
        degree = 4
        name = functup[2]
        recexpn = RecursiveExpansionNode(degree, recf=name,
                                         check=False, with0=True)
        resrec = recexpn.execute(data)
        restensor = np.array([get_handcomputed_function_tensor(data[i, :], func, degree)
                              for i in range(data.shape[0])])

        ressimplex = np.array(
            [makeSimplex(restensor[i, :, :, :, :]) for i in range(data.shape[0])])
        assert_array_almost_equal(resrec, ressimplex, decimal-3)
        print('Multi dim ' + name + ' equal')
예제 #17
0
 def _label(self, x):
     if isinstance(x, (list, tuple, numx.ndarray)):
         y = [0] * len(x)
         p_labs, p_acc, p_vals = libsvmutil.svm_predict(y, x.tolist(), self.model)
         
         return numx.array(p_labs)
     else:
         msg = "Data must be a sequence of vectors"
         raise mdp.NodeException(msg)
예제 #18
0
 def test_execute_routing(self):
     """Test the standard routing for messages."""
     sboard = BiSwitchboard(input_dim=3, connections=[2,0,1])
     x = n.array([[1,2,3],[4,5,6]])
     msg = {
         "string": "blabla",
         "list": [1,2],
         "data": x.copy(),  # should be mapped by switchboard
         "data2": n.zeros(3),  # should not be modified
         "data3": n.zeros((3,4)),  # should not be modified
     }
     y, out_msg = sboard.execute(x, msg)
     reference_y = n.array([[3,1,2],[6,4,5]])
     assert (y == reference_y).all()
     assert out_msg["string"] == msg["string"]
     assert out_msg["list"] == msg["list"]
     assert n.all(out_msg["data"] == reference_y)
     assert out_msg["data2"].shape == (3,)
     assert out_msg["data3"].shape == (3,4)
예제 #19
0
    def _params(self):
        """Return the current parameters of the nodes."""

        params = numx.array([])

        for n in traverseHinet(self.gflow):
            if hasattr(n, '_param_size') and n._param_size() > 0:
                params = numx.concatenate((params, n.params()))

        return params
예제 #20
0
    def _label(self, x):
        if isinstance(x, (list, tuple, numx.ndarray)):
            y = [0] * len(x)
            p_labs, p_acc, p_vals = libsvmutil.svm_predict(
                y, x.tolist(), self.model)

            return numx.array(p_labs)
        else:
            msg = "Data must be a sequence of vectors"
            raise mdp.NodeException(msg)
예제 #21
0
 def test_execute_routing(self):
     """Test the standard routing for messages."""
     sboard = BiSwitchboard(input_dim=3, connections=[2,0,1])
     x = n.array([[1,2,3],[4,5,6]])
     msg = {
         "string": "blabla",
         "list": [1,2],
         "data": x.copy(),  # should be mapped by switchboard
         "data2": n.zeros(3),  # should not be modified
         "data3": n.zeros((3,4)),  # should not be modified
     }
     y, out_msg = sboard.execute(x, msg)
     reference_y = n.array([[3,1,2],[6,4,5]])
     assert (y == reference_y).all()
     assert out_msg["string"] == msg["string"]
     assert out_msg["list"] == msg["list"]
     assert n.all(out_msg["data"] == reference_y)
     assert out_msg["data2"].shape == (3,)
     assert out_msg["data3"].shape == (3,4)
예제 #22
0
    def _params(self):
        """Return the current parameters of the nodes."""

        params = numx.array([])

        for n in traverseHinet(self.gflow):
            if hasattr(n, '_param_size') and n._param_size() > 0:
                params = numx.concatenate((params, n.params()))

        return params
예제 #23
0
 def _label(self, x, threshold=0):
     """Retrieves patterns from the associative memory.
     
     :param x: A matrix having different variables on different columns
         and observations on rows.
     :param threshold: numpy.ndarray
     :return: The patterns.
     """
     # todo: consider iterables
     threshold = numx.zeros(self.input_dim) + threshold
     return numx.array(
         [self._label_one(pattern, threshold) for pattern in x])
예제 #24
0
 def test_switchboard_gradient1(self):
     """Test that gradient is correct for a tiny switchboard."""
     sboard = mdp.hinet.Switchboard(input_dim=4, connections=[2,0])
     x = numx_rand.random((2,4))
     mdp.activate_extension("gradient")
     try:
         result = sboard._gradient(x)
         grad = result[1]["grad"]
         ref_grad = numx.array([[[0,0,1,0], [1,0,0,0]],
                              [[0,0,1,0], [1,0,0,0]]], dtype=grad.dtype)
         assert numx.all(grad == ref_grad)
     finally:
         mdp.deactivate_extension("gradient")
예제 #25
0
def test_RecursiveExpansionNode2():
    """Testing the tensor-base."""
    data = 1e-6 + np.random.rand(10, 4)
    for functup in funcs:
        func = functup[0]
        degree = 4
        name = functup[2]
        recexpn = RecursiveExpansionNode(degree,
                                         recf=name,
                                         check=False,
                                         with0=True)
        resrec = recexpn.execute(data)
        restensor = np.array([
            get_handcomputed_function_tensor(data[i, :], func, degree)
            for i in range(data.shape[0])
        ])

        ressimplex = np.array([
            makeSimplex(restensor[i, :, :, :, :]) for i in range(data.shape[0])
        ])
        assert_array_almost_equal(resrec, ressimplex, decimal - 3)
        print('Multi dim ' + name + ' equal')
예제 #26
0
    def _gradient(self):
        """Get the gradient with respect to the parameters.

        This gradient has been calculated during the last backprop sweep.
        """

        gradient = numx.array([])

        for n in traverseHinet(self.gflow):
            if hasattr(n, '_param_size') and n._param_size() > 0:
                gradient = numx.concatenate((gradient, n.gradient()))

        return gradient
예제 #27
0
    def _gradient(self):
        """Get the gradient with respect to the parameters.

        This gradient has been calculated during the last backprop sweep.
        """

        gradient = numx.array([])

        for n in traverseHinet(self.gflow):
            if hasattr(n, '_param_size') and n._param_size() > 0:
                gradient = numx.concatenate((gradient, n.gradient()))

        return gradient
예제 #28
0
 def test_inverse_message_routing(self):
     """Test the inverse routing for messages."""
     sboard = BiSwitchboard(input_dim=3, connections=[2,0,1])
     x = n.array([[1,2,3],[4,5,6]])
     msg = {
         "string": "blabla",
         "method": "inverse",
         "list": [1,2],
         "data": x,  # should be mapped by switchboard
         "data2": n.zeros(3),  # should not be modified
         "data3": n.zeros((3,4)),  # should not be modified
         "target": "test"
     }
     y, out_msg, target = sboard.execute(None, msg)
     assert y is None
     assert target == "test"
     reference_y = n.array([[2,3,1],[5,6,4]])
     assert out_msg["string"] == msg["string"]
     assert out_msg["list"] == msg["list"]
     assert (out_msg["data"] == reference_y).all()
     assert out_msg["data2"].shape == (3,)
     assert out_msg["data3"].shape == (3,4)
예제 #29
0
 def test_quadexpan_gradient1(self):
     """Test validity of gradient for QuadraticExpansionBiNode."""
     node = mdp.nodes.QuadraticExpansionNode()
     x = numx.array([[1, 3, 4]])
     node.execute(x)
     mdp.activate_extension("gradient")
     try:
         result = node._gradient(x)
         grad = result[1]["grad"]
         reference = numx.array(
             [[[ 1, 0, 0],   # x1
               [ 0, 1, 0],   # x2
               [ 0, 0, 1],   # x3
               [ 2, 0, 0],   # x1x1
               [ 3, 1, 0],   # x1x2
               [ 4, 0, 1],   # x1x3
               [ 0, 6, 0],   # x2x2
               [ 0, 4, 3],   # x2x3
               [ 0, 0, 8]]]) # x3x3
         assert numx.all(grad == reference)
     finally:
         mdp.deactivate_extension("gradient")
예제 #30
0
    def _init_RBF(self, centers, sizes):
        # initialize the centers of the RBFs
        centers = numx.array(centers, self.dtype)

        # define input/output dim
        self.set_input_dim(centers.shape[1])
        self.set_output_dim(centers.shape[0])

        # multiply sizes if necessary
        sizes = numx.array(sizes, self.dtype)
        if sizes.ndim==0 or sizes.ndim==2:
            sizes = numx.array([sizes]*self._output_dim)
        else:
            # check number of sizes correct
            if sizes.shape[0] != self._output_dim:
                msg = "There must be as many RBF sizes as centers"
                raise mdp.NodeException, msg

        if numx.isscalar(sizes[0]):
            # isotropic RBFs
            self._isotropic = True
        else:
            # anisotropic RBFs
            self._isotropic = False

            # check size
            if (sizes.shape[1] != self._input_dim or
                sizes.shape[2] != self._input_dim):
                msg = ("Dimensionality of size matrices should be the same " +
                       "as input dimensionality (%d != %d)"
                       % (sizes.shape[1], self._input_dim))
                raise mdp.NodeException, msg

            # compute inverse covariance matrix
            for i in range(sizes.shape[0]):
                sizes[i,:,:] = mdp.utils.inv(sizes[i,:,:])

        self._centers = centers
        self._sizes = sizes
예제 #31
0
 def test_inverse_message_routing(self):
     """Test the inverse routing for messages."""
     sboard = BiSwitchboard(input_dim=3, connections=[2,0,1])
     x = n.array([[1,2,3],[4,5,6]])
     msg = {
         "string": "blabla",
         "method": "inverse",
         "list": [1,2],
         "data": x,  # should be mapped by switchboard
         "data2": n.zeros(3),  # should not be modified
         "data3": n.zeros((3,4)),  # should not be modified
         "target": "test"
     }
     y, out_msg, target = sboard.execute(None, msg)
     assert y is None
     assert target == "test"
     reference_y = n.array([[2,3,1],[5,6,4]])
     assert out_msg["string"] == msg["string"]
     assert out_msg["list"] == msg["list"]
     assert (out_msg["data"] == reference_y).all()
     assert out_msg["data2"].shape == (3,)
     assert out_msg["data3"].shape == (3,4)
예제 #32
0
 def test_quadexpan_gradient1(self):
     """Test validity of gradient for QuadraticExpansionBiNode."""
     node = mdp.nodes.QuadraticExpansionNode()
     x = numx.array([[1, 3, 4]])
     node.execute(x)
     mdp.activate_extension("gradient")
     try:
         result = node._gradient(x)
         grad = result[1]["grad"]
         reference = numx.array([[
             [1, 0, 0],  # x1
             [0, 1, 0],  # x2
             [0, 0, 1],  # x3
             [2, 0, 0],  # x1x1
             [3, 1, 0],  # x1x2
             [4, 0, 1],  # x1x3
             [0, 6, 0],  # x2x2
             [0, 4, 3],  # x2x3
             [0, 0, 8]
         ]])  # x3x3
         assert numx.all(grad == reference)
     finally:
         mdp.deactivate_extension("gradient")
예제 #33
0
 def test_switchboard_gradient1(self):
     """Test that gradient is correct for a tiny switchboard."""
     sboard = mdp.hinet.Switchboard(input_dim=4, connections=[2, 0])
     x = numx_rand.random((2, 4))
     mdp.activate_extension("gradient")
     try:
         result = sboard._gradient(x)
         grad = result[1]["grad"]
         ref_grad = numx.array(
             [[[0, 0, 1, 0], [1, 0, 0, 0]], [[0, 0, 1, 0], [1, 0, 0, 0]]],
             dtype=grad.dtype)
         assert numx.all(grad == ref_grad)
     finally:
         mdp.deactivate_extension("gradient")
예제 #34
0
 def _get_nearest_nodes(self, x):
     """Return the two nodes in the graph that are nearest to x and their
     squared distances. (Return ([node1, node2], [dist1, dist2])"""
     # distance function
     def _distance_from_node(node):
         #return norm(node.data.pos-x)**2
         tmp = node.data.pos - x
         return utils.mult(tmp, tmp)
     g = self.graph
     # distances of all graph nodes from x
     distances = numx.array(map(_distance_from_node, g.nodes))
     ids = distances.argsort()[:2]
     #nearest = [g.nodes[idx] for idx in ids]
     #return nearest, distances[ids]
     return (g.nodes[ids[0]], g.nodes[ids[1]]), distances.take(ids)
예제 #35
0
    def _get_nearest_nodes(self, x):
        """Return the two nodes in the graph that are nearest to x and their
        squared distances. (Return ([node1, node2], [dist1, dist2])"""

        # distance function
        def _distance_from_node(node):
            #return norm(node.data.pos-x)**2
            tmp = node.data.pos - x
            return utils.mult(tmp, tmp)

        g = self.graph
        # distances of all graph nodes from x
        distances = numx.array(map(_distance_from_node, g.nodes))
        ids = distances.argsort()[:2]
        #nearest = [g.nodes[idx] for idx in ids]
        #return nearest, distances[ids]
        return (g.nodes[ids[0]], g.nodes[ids[1]]), distances.take(ids)
예제 #36
0
    def _rank_nodes_by_distance(self, x):
        """Return the nodes in the graph in a list ranked by their squared
        distance to x. """

        #TODO: Refactor together with GNGNode._get_nearest_nodes

        # distance function
        def _distance_from_node(node):
            tmp = node.data.pos - x
            return utils.mult(tmp, tmp)  # maps to mdp.numx.dot

        g = self.graph

        # distances of all graph nodes from x
        distances = numx.array(map(_distance_from_node, g.nodes))
        ids = distances.argsort()
        ranked_nodes = [g.nodes[id] for id in ids]

        return ranked_nodes
예제 #37
0
    def _rank_nodes_by_distance(self, x):
        """Return the nodes in the graph in a list ranked by their squared
        distance to x. """

        #TODO: Refactor together with GNGNode._get_nearest_nodes

        # distance function
        def _distance_from_node(node):
            tmp = node.data.pos - x
            return utils.mult(tmp, tmp) # maps to mdp.numx.dot

        g = self.graph

        # distances of all graph nodes from x
        distances = numx.array(map(_distance_from_node, g.nodes))
        ids = distances.argsort()
        ranked_nodes = [g.nodes[id] for id in ids]

        return ranked_nodes
예제 #38
0
    def _stop_training(self):
        super(GrowingNeuralGasExpansionNode, self)._stop_training()

        # set the output dimension to the number of nodes of the neural gas
        self._output_dim = self.get_nodes_position().shape[0]

        # use the nodes of the learned neural gas as centers for a radial
        # basis function expansion.
        centers = self.get_nodes_position()

        # use the mean distances to the neighbours as size of the RBF expansion
        sizes = []

        for i,node in enumerate(self.graph.nodes):

            # calculate the size of the current RBF
            pos = node.data.pos
            sizes.append(numx.array([((pos-neighbor.data.pos)**2).sum()
                                     for neighbor in node.neighbors() ]).mean())

        # initialize the radial basis function expansion with centers and sizes
        self.rbf_expansion = mdp.nodes.RBFExpansionNode(centers = centers,
                                                        sizes = sizes)
예제 #39
0
 def _get_nearest_nodes(self, x):
     """Return the two nodes in the graph that are nearest to x and their
     squared distances.
     :param x: Coordinates of point to compute distance to in order to
         specifiy nearest nodes.
     :type x: numpy.ndarray
     
     :return: The coordinates of the nearest two nodes and their
         distances to x. ([node1, node2], [dist1, dist2])
     :rtype: tuple
     """
     # distance function
     def _distance_from_node(node):
         #return norm(node.data.pos-x)**2
         tmp = node.data.pos - x
         return utils.mult(tmp, tmp)
     g = self.graph
     # distances of all graph nodes from x
     distances = numx.array(list(map(_distance_from_node, g.nodes)))
     ids = distances.argsort()[:2]
     #nearest = [g.nodes[idx] for idx in ids]
     #return nearest, distances[ids]
     return (g.nodes[ids[0]], g.nodes[ids[1]]), distances.take(ids)
예제 #40
0
 def get_nodes_position(self):
     return numx.array(map(lambda n: n.data.pos, self.graph.nodes),
                       dtype = self.dtype)
예제 #41
0
 def _label(self, x, threshold=0):
     """Retrieves patterns from the associative memory.
     """
     threshold = numx.zeros(self.input_dim) + threshold
     return numx.array([self._label_one(pattern, threshold) for pattern in x])
예제 #42
0
 def _stop_training(self, *args, **kwargs):
     """Transform the data and labels lists to array objects and reshape them."""
     self.data = numx.array(self.data, dtype=self.dtype)
     self.data.shape = (self.tlen, self.input_dim)
     self.labels = numx.array(self.labels)
     self.labels.shape = (self.tlen)
예제 #43
0
def makeSimplex(tensor):
    simplex = np.concatenate(
        (tensor[:, 0, 0, 0], tensor[0, 1:, 0, 0], tensor[0, 0, 1:,
                                                         0], tensor[0, 0, 0,
                                                                    1:]))
    x1 = tensor[1, 0, 0, 0]
    x2 = tensor[2, 0, 0, 0]
    x3 = tensor[3, 0, 0, 0]

    y1 = tensor[0, 1, 0, 0]
    y2 = tensor[0, 2, 0, 0]
    y3 = tensor[0, 3, 0, 0]

    z1 = tensor[0, 0, 1, 0]
    z2 = tensor[0, 0, 2, 0]
    z3 = tensor[0, 0, 3, 0]

    w1 = tensor[0, 0, 0, 1]
    w2 = tensor[0, 0, 0, 2]
    w3 = tensor[0, 0, 0, 3]

    simplex = np.concatenate((simplex, x1 * np.array([y1, y2, y3])))
    simplex = np.concatenate((simplex, x2 * np.array([y1, y2])))
    simplex = np.concatenate((simplex, np.array([x3 * y1])))

    simplex = np.concatenate((simplex, x1 * np.array([z1, z2, z3])))
    simplex = np.concatenate((simplex, x2 * np.array([z1, z2])))
    simplex = np.concatenate((simplex, x3 * np.array([z1])))

    simplex = np.concatenate((simplex, y1 * np.array([z1, z2, z3])))
    simplex = np.concatenate((simplex, y2 * np.array([z1, z2])))
    simplex = np.concatenate((simplex, y3 * np.array([z1])))

    simplex = np.concatenate((simplex, x1 * np.array([w1, w2, w3])))
    simplex = np.concatenate((simplex, x2 * np.array([w1, w2])))
    simplex = np.concatenate((simplex, x3 * np.array([w1])))

    simplex = np.concatenate((simplex, y1 * np.array([w1, w2, w3])))
    simplex = np.concatenate((simplex, y2 * np.array([w1, w2])))
    simplex = np.concatenate((simplex, y3 * np.array([w1])))

    simplex = np.concatenate((simplex, z1 * np.array([w1, w2, w3])))
    simplex = np.concatenate((simplex, z2 * np.array([w1, w2])))
    simplex = np.concatenate((simplex, z3 * np.array([w1])))

    simplex = np.concatenate((simplex, x1 * np.array([y1 * z1, y1 * z2])))
    simplex = np.concatenate((simplex, x2 * np.array([y1 * z1])))
    simplex = np.concatenate((simplex, x1 * np.array([y2 * z1])))

    simplex = np.concatenate((simplex, x1 * np.array([y1 * w1, y1 * w2])))
    simplex = np.concatenate((simplex, x2 * np.array([y1 * w1])))
    simplex = np.concatenate((simplex, x1 * np.array([y2 * w1])))

    simplex = np.concatenate((simplex, x1 * np.array([z1 * w1, z1 * w2])))
    simplex = np.concatenate((simplex, x2 * np.array([z1 * w1])))
    simplex = np.concatenate((simplex, y1 * np.array([z1 * w1, z1 * w2])))

    simplex = np.concatenate((simplex, y2 * np.array([z1 * w1])))
    simplex = np.concatenate((simplex, x1 * np.array([z2 * w1])))
    simplex = np.concatenate((simplex, y1 * np.array([z2 * w1])))

    simplex = np.concatenate((simplex, x1 * np.array([y1 * z1 * w1])))

    return simplex
예제 #44
0
 def get_nodes_position(self):
     return numx.array([n.data.pos for n in self.graph.nodes],
                       dtype=self.dtype)
예제 #45
0
 def _stop_training(self, *args, **kwargs):
     """Transform the data and labels lists to array objects and reshape them."""
     self.data = numx.array(self.data, dtype=self.dtype)
     self.data.shape = (self.tlen, self.input_dim)
     self.labels = numx.array(self.labels)
     self.labels.shape = (self.tlen)
예제 #46
0
def test_symeig_fake_integer():
    a = numx.array([[1,2],[2,7]])
    b = numx.array([[3,1],[1,5]])
    w,z = utils._symeig._symeig_fake(a)
    w,z = utils._symeig._symeig_fake(a,b)
예제 #47
0
 def _nearest_centroid_idx(self, data, centroids):
     dists = numx.array([numx.linalg.norm(data - c) for c in centroids])
     return dists.argmin()
예제 #48
0
 def get_nodes_position(self):
     return numx.array([n.data.pos for n in self.graph.nodes],
                       dtype = self.dtype)
예제 #49
0
 def _label(self, x, threshold=0):
     """Retrieves patterns from the associative memory.
     """
     threshold = numx.zeros(self.input_dim) + threshold
     return numx.array(
         [self._label_one(pattern, threshold) for pattern in x])
예제 #50
0
 def _nearest_centroid_idx(self, data, centroids):
     dists = numx.array([numx.linalg.norm(data - c) for c in centroids])
     return dists.argmin()
def makeSimplex(tensor):
    simplex = np.concatenate(
        (tensor[:, 0, 0, 0], tensor[0, 1:, 0, 0], tensor[0, 0, 1:, 0],
         tensor[0, 0, 0, 1:]))
    x1 = tensor[1, 0, 0, 0]
    x2 = tensor[2, 0, 0, 0]
    x3 = tensor[3, 0, 0, 0]

    y1 = tensor[0, 1, 0, 0]
    y2 = tensor[0, 2, 0, 0]
    y3 = tensor[0, 3, 0, 0]

    z1 = tensor[0, 0, 1, 0]
    z2 = tensor[0, 0, 2, 0]
    z3 = tensor[0, 0, 3, 0]

    w1 = tensor[0, 0, 0, 1]
    w2 = tensor[0, 0, 0, 2]
    w3 = tensor[0, 0, 0, 3]

    simplex = np.concatenate((simplex, x1 * np.array([y1, y2, y3])))
    simplex = np.concatenate((simplex, x2 * np.array([y1, y2])))
    simplex = np.concatenate((simplex, np.array([x3 * y1])))

    simplex = np.concatenate((simplex, x1 * np.array([z1, z2, z3])))
    simplex = np.concatenate((simplex, x2 * np.array([z1, z2])))
    simplex = np.concatenate((simplex, x3 * np.array([z1])))

    simplex = np.concatenate((simplex, y1 * np.array([z1, z2, z3])))
    simplex = np.concatenate((simplex, y2 * np.array([z1, z2])))
    simplex = np.concatenate((simplex, y3 * np.array([z1])))

    simplex = np.concatenate((simplex, x1 * np.array([w1, w2, w3])))
    simplex = np.concatenate((simplex, x2 * np.array([w1, w2])))
    simplex = np.concatenate((simplex, x3 * np.array([w1])))

    simplex = np.concatenate((simplex, y1 * np.array([w1, w2, w3])))
    simplex = np.concatenate((simplex, y2 * np.array([w1, w2])))
    simplex = np.concatenate((simplex, y3 * np.array([w1])))

    simplex = np.concatenate((simplex, z1 * np.array([w1, w2, w3])))
    simplex = np.concatenate((simplex, z2 * np.array([w1, w2])))
    simplex = np.concatenate((simplex, z3 * np.array([w1])))

    simplex = np.concatenate((simplex, x1 * np.array([y1*z1, y1*z2])))
    simplex = np.concatenate((simplex, x2 * np.array([y1*z1])))
    simplex = np.concatenate((simplex, x1 * np.array([y2*z1])))

    simplex = np.concatenate((simplex, x1 * np.array([y1*w1, y1*w2])))
    simplex = np.concatenate((simplex, x2 * np.array([y1*w1])))
    simplex = np.concatenate((simplex, x1 * np.array([y2*w1])))

    simplex = np.concatenate((simplex, x1 * np.array([z1*w1, z1*w2])))
    simplex = np.concatenate((simplex, x2 * np.array([z1*w1])))
    simplex = np.concatenate((simplex, y1 * np.array([z1*w1, z1*w2])))

    simplex = np.concatenate((simplex, y2 * np.array([z1*w1])))
    simplex = np.concatenate((simplex, x1 * np.array([z2*w1])))
    simplex = np.concatenate((simplex, y1 * np.array([z2*w1])))

    simplex = np.concatenate((simplex, x1 * np.array([y1*z1*w1])))

    return simplex
예제 #52
0
    def __init__(self, lags=1, sfa_ica_coeff=(1., 1.), icaweights=None,
                 sfaweights=None, whitened=False, white_comp = None,
                 white_parm = None, eps_contrast=1e-6, max_iter=10000,
                 RP=None, verbose=False, input_dim=None, output_dim=None,
                 dtype=None):
        """Initializes an object of type 'ISFANode' to perform 
        Independent Slow Feature Analysis.

        The notation is the same used in the paper by Blaschke et al. Please
        refer to the paper for more information.
        
        
        :param lags:  A list of time-lags to generate the time-delayed covariance
            matrices (in the paper this is the set of \tau). If lags is an
            integer, time-lags 1,2,...,'lags' are used.
            Note that time-lag == 0 (instantaneous correlation) is always
            implicitly used.
        :type lags: list or int
            
        :param sfa_ica_coeff: A list of float with two entries, which defines the
            weights of the SFA and ICA part of the objective function. 
            They are called b_{SFA} and b_{ICA} in the paper.
        :type sfa_ica_coeff: list
        
        :param icaweights: Weighting factors for the cov matrices relative
            to the ICA part of the objective function (called
            \kappa_{ICA}^{\tau} in the paper). Default is 1.           
            Possible values are:

            	- An integer ``n``: All matrices are weighted the same
                  (note that it does not make sense to have ``n != 1``).
            	- A list or array of floats of ``len == len(lags)``:
                  Each element of the list is used for weighting the
                  corresponding matrix.
            	- ``None``: Use the default values.
        
        :type icaweights: int, list or array

        :param sfaweights: Weighting factors for the covariance matrices relative
            to the SFA part of the objective function (called
            \kappa_{SFA}^{\tau} in the paper). Default is [1., 0., ..., 0.]
            For possible values see the description of icaweights.
        :type sfaweights: int, list or array
        
        :param whitened: ``True`` if input data is already white, ``False``
            otherwise (the data will be whitened internally).
        :type whitened: bool
        
        :param white_comp: If whitened is false, you can set ``white_comp`` to the
            number of whitened components to keep during the
            calculation (i.e., the input dimensions are reduced to
            `white_comp`` by keeping the components of largest variance).
        :type white_comp: int
        
        :param white_parm: A dictionary with additional parameters for whitening.
            It is passed directly to the WhiteningNode constructor.
            Ex: white_parm = { 'svd' : True }
        :type white_parm: dict
        
        :param eps_contrast: Convergence is achieved when the relative
            improvement in the contrast is below this threshold.
            Values in the range [1E-4, 1E-10] are usually reasonable.
        :type eps_contrast: float
        
        :param max_iter: If the algorithms does not achieve convergence within
            max_iter iterations raise an Exception. 
            Should be larger than 100.
        :type max_iter: int
        
        :param RP: Starting rotation-permutation matrix. It is an
            input_dim x input_dim matrix used to initially rotate the
            input components. If not set, the identity matrix is used.
            In the paper this is used to start the algorithm at the
            SFA solution (which is often quite near to the optimum).
        
        :param verbose: Print progress information during convergence. This can
            slow down the algorithm, but it's the only way to see
            the rate of improvement and immediately spot if something
            is going wrong.
        :type verbose: bool
        
        :param input_dim:  The input dimensionality.
        :type input_dim: int
        
        :param output_dim: Sets the number of independent components that have to
            be extracted. Note that if this is not smaller than input_dim,
            the problem is solved linearly and SFA would give the same
            solution only much faster.
        :type output_dim: int
        
        :param dtype: Datatype to be used.
        :type dtype: numpy.dtype or str
        """

        # check that the "lags" argument has some meaningful value
        if isinstance(lags, (int, int)):
            lags = list(range(1, lags+1))
        elif isinstance(lags, (list, tuple)):
            lags = numx.array(lags, "i")
        elif isinstance(lags, numx.ndarray):
            if not (lags.dtype.char in ['i', 'l']):
                err_str = "lags must be integer!"
                raise NodeException(err_str)
            else:
                pass
        else:
            err_str = ("Lags must be int, list or array. Found "
                       "%s!" % (type(lags).__name__))
            raise NodeException(err_str)
        self.lags = lags

        # sanity checks for weights
        if icaweights is None:
            self.icaweights = 1.
        else:
            if (len(icaweights) != len(lags)):
                err = ("icaweights vector length is %d, "
                       "should be %d" % (str(len(icaweights)), str(len(lags))))
                raise NodeException(err)
            self.icaweights = icaweights

        if sfaweights is None:
            self.sfaweights = [0]*len(lags)
            self.sfaweights[0] = 1.
        else:
            if (len(sfaweights) != len(lags)):
                err = ("sfaweights vector length is %d, "
                       "should be %d" % (str(len(sfaweights)), str(len(lags))))
                raise NodeException(err)
            self.sfaweights = sfaweights

        # store attributes
        self.sfa_ica_coeff = sfa_ica_coeff
        self.max_iter = max_iter
        self.verbose = verbose
        self.eps_contrast = eps_contrast

        # if input is not white, insert a WhiteningNode
        self.whitened = whitened
        if not whitened:
            if white_parm is None:
                white_parm = {}
            if output_dim is not None:
                white_comp = output_dim
            elif white_comp is not None:
                output_dim = white_comp
            self.white = WhiteningNode(input_dim=input_dim,
                                       output_dim=white_comp,
                                       dtype=dtype, **white_parm)

        # initialize covariance matrices
        self.covs = [ DelayCovarianceMatrix(dt, dtype=dtype) for dt in lags ]

        # initialize the global rotation-permutation matrix
        # if not set that we'll eventually be an identity matrix
        self.RP = RP

        # initialize verbose structure to print nice and useful progress info
        if verbose:
            info = { 'sweep' : max(len(str(self.max_iter)), 5),
                     'perturbe': max(len(str(self.max_iter)), 5),
                     'float' : 5+8,
                     'fmt' : "%.5e",
                     'sep' : " | "}
            f1 = "Sweep".center(info['sweep'])
            f1_2 = "Pertb". center(info['perturbe'])
            f2 = "SFA part".center(info['float'])
            f3 = "ICA part".center(info['float'])
            f4 = "Contrast".center(info['float'])
            header = info['sep'].join([f1, f1_2, f2, f3, f4])
            info['header'] = header+'\n'
            info['line'] = len(header)*"-"
            self._info = info

        # finally call base class constructor
        super(ISFANode, self).__init__(input_dim, output_dim, dtype)
예제 #53
0
 def get_nodes_position(self):
     return numx.array(map(lambda n: n.data.pos, self.graph.nodes),
                       dtype=self.dtype)
예제 #54
0
    def __init__(self, lags=1, sfa_ica_coeff=(1., 1.), icaweights=None,
                 sfaweights=None, whitened=False, white_comp = None,
                 white_parm = None, eps_contrast=1e-6, max_iter=10000,
                 RP=None, verbose=False, input_dim=None, output_dim=None,
                 dtype=None):
        """
        Perform Independent Slow Feature Analysis.

        The notation is the same used in the paper by Blaschke et al. Please
        refer to the paper for more information.

        :Parameters:
          lags
            list of time-lags to generate the time-delayed covariance
            matrices (in the paper this is the set of \tau). If
            lags is an integer, time-lags 1,2,...,'lags' are used.
            Note that time-lag == 0 (instantaneous correlation) is
            always implicitly used.

          sfa_ica_coeff
            a list of float with two entries, which defines the
            weights of the SFA and ICA part of the objective
            function. They are called b_{SFA} and b_{ICA} in the
            paper.

          sfaweights
            weighting factors for the covariance matrices relative
            to the SFA part of the objective function (called
            \kappa_{SFA}^{\tau} in the paper). Default is
            [1., 0., ..., 0.]
            For possible values see the description of icaweights.

          icaweights
            weighting factors for the cov matrices relative
            to the ICA part of the objective function (called
            \kappa_{ICA}^{\tau} in the paper). Default is 1.
            Possible values are:

            - an integer ``n``: all matrices are weighted the same
              (note that it does not make sense to have ``n != 1``)

            - a list or array of floats of ``len == len(lags)``:
              each element of the list is used for weighting the
              corresponding matrix

            - ``None``: use the default values.

          whitened
            ``True`` if input data is already white, ``False``
            otherwise (the data will be whitened internally).

          white_comp
            If whitened is false, you can set ``white_comp`` to the
            number of whitened components to keep during the
            calculation (i.e., the input dimensions are reduced to
            ``white_comp`` by keeping the components of largest variance).
          white_parm
            a dictionary with additional parameters for whitening.
            It is passed directly to the WhiteningNode constructor.
            Ex: white_parm = { 'svd' : True }

          eps_contrast
            Convergence is achieved when the relative
            improvement in the contrast is below this threshold.
            Values in the range [1E-4, 1E-10] are usually
            reasonable.

          max_iter
            If the algorithms does not achieve convergence within
            max_iter iterations raise an Exception. Should be
            larger than 100.

          RP
            Starting rotation-permutation matrix. It is an
            input_dim x input_dim matrix used to initially rotate the
            input components. If not set, the identity matrix is used.
            In the paper this is used to start the algorithm at the
            SFA solution (which is often quite near to the optimum).

          verbose
            print progress information during convergence. This can
            slow down the algorithm, but it's the only way to see
            the rate of improvement and immediately spot if something
            is going wrong.

          output_dim
            sets the number of independent components that have to
            be extracted. Note that if this is not smaller than
            input_dim, the problem is solved linearly and SFA
            would give the same solution only much faster.
        """
        # check that the "lags" argument has some meaningful value
        if isinstance(lags, (int, int)):
            lags = list(range(1, lags+1))
        elif isinstance(lags, (list, tuple)):
            lags = numx.array(lags, "i")
        elif isinstance(lags, numx.ndarray):
            if not (lags.dtype.char in ['i', 'l']):
                err_str = "lags must be integer!"
                raise NodeException(err_str)
            else:
                pass
        else:
            err_str = ("Lags must be int, list or array. Found "
                       "%s!" % (type(lags).__name__))
            raise NodeException(err_str)
        self.lags = lags

        # sanity checks for weights
        if icaweights is None:
            self.icaweights = 1.
        else:
            if (len(icaweights) != len(lags)):
                err = ("icaweights vector length is %d, "
                       "should be %d" % (str(len(icaweights)), str(len(lags))))
                raise NodeException(err)
            self.icaweights = icaweights

        if sfaweights is None:
            self.sfaweights = [0]*len(lags)
            self.sfaweights[0] = 1.
        else:
            if (len(sfaweights) != len(lags)):
                err = ("sfaweights vector length is %d, "
                       "should be %d" % (str(len(sfaweights)), str(len(lags))))
                raise NodeException(err)
            self.sfaweights = sfaweights

        # store attributes
        self.sfa_ica_coeff = sfa_ica_coeff
        self.max_iter = max_iter
        self.verbose = verbose
        self.eps_contrast = eps_contrast

        # if input is not white, insert a WhiteningNode
        self.whitened = whitened
        if not whitened:
            if white_parm is None:
                white_parm = {}
            if output_dim is not None:
                white_comp = output_dim
            elif white_comp is not None:
                output_dim = white_comp
            self.white = WhiteningNode(input_dim=input_dim,
                                       output_dim=white_comp,
                                       dtype=dtype, **white_parm)

        # initialize covariance matrices
        self.covs = [ DelayCovarianceMatrix(dt, dtype=dtype) for dt in lags ]

        # initialize the global rotation-permutation matrix
        # if not set that we'll eventually be an identity matrix
        self.RP = RP

        # initialize verbose structure to print nice and useful progress info
        if verbose:
            info = { 'sweep' : max(len(str(self.max_iter)), 5),
                     'perturbe': max(len(str(self.max_iter)), 5),
                     'float' : 5+8,
                     'fmt' : "%.5e",
                     'sep' : " | "}
            f1 = "Sweep".center(info['sweep'])
            f1_2 = "Pertb". center(info['perturbe'])
            f2 = "SFA part".center(info['float'])
            f3 = "ICA part".center(info['float'])
            f4 = "Contrast".center(info['float'])
            header = info['sep'].join([f1, f1_2, f2, f3, f4])
            info['header'] = header+'\n'
            info['line'] = len(header)*"-"
            self._info = info

        # finally call base class constructor
        super(ISFANode, self).__init__(input_dim, output_dim, dtype)