Ejemplo n.º 1
0
    def test_sfa2_gradient2(self):
        def _alt_sfa2_grad(self, x):
            """Reference grad method based on quadratic forms."""
            # note that the H and f arrays are cached in the node and remain even
            # after the extension has been deactivated
            if not hasattr(self, "__gradient_Hs"):
                quad_forms = [
                    self.get_quadratic_form(i) for i in range(self.output_dim)
                ]
                self.__gradient_Hs = numx.vstack(
                    (quad_form.H[numx.newaxis] for quad_form in quad_forms))
                self.__gradient_fs = numx.vstack(
                    (quad_form.f[numx.newaxis] for quad_form in quad_forms))
            grad = (numx.dot(x, self.__gradient_Hs) + numx.repeat(
                self.__gradient_fs[numx.newaxis, :, :], len(x), axis=0))
            return grad

        sfa2_node = bimdp.nodes.SFA2BiNode(output_dim=3)
        x = numx_rand.random((300, 6))
        sfa2_node.train(x)
        sfa2_node.stop_training()
        x = numx_rand.random((2, 6))
        mdp.activate_extension("gradient")
        try:
            result1 = sfa2_node.execute(x, {"method": "gradient"})
            grad1 = result1[1]["grad"]
            grad2 = _alt_sfa2_grad(sfa2_node, x)
            assert numx.amax(abs(grad1 - grad2)) < 1E-9
        finally:
            mdp.deactivate_extension("gradient")
Ejemplo n.º 2
0
 def test_sfa2_gradient2(self):
     def _alt_sfa2_grad(self, x):
         """Reference grad method based on quadratic forms."""
         # note that the H and f arrays are cached in the node and remain even
         # after the extension has been deactivated
         if not hasattr(self, "__gradient_Hs"):
             quad_forms = [self.get_quadratic_form(i)
                           for i in range(self.output_dim)]
             self.__gradient_Hs = numx.vstack((quad_form.H[numx.newaxis]
                                             for quad_form in quad_forms))
             self.__gradient_fs = numx.vstack((quad_form.f[numx.newaxis]
                                             for quad_form in quad_forms))
         grad = (numx.dot(x, self.__gradient_Hs) +
                     numx.repeat(self.__gradient_fs[numx.newaxis,:,:],
                               len(x), axis=0))
         return grad
     sfa2_node = bimdp.nodes.SFA2BiNode(output_dim=3)
     x = numx_rand.random((300, 6))
     sfa2_node.train(x)
     sfa2_node.stop_training()
     x = numx_rand.random((2, 6))
     mdp.activate_extension("gradient")
     try:
         result1 = sfa2_node.execute(x, {"method": "gradient"})
         grad1 = result1[1]["grad"]
         grad2 = _alt_sfa2_grad(sfa2_node, x)
         assert numx.amax(abs(grad1 - grad2)) < 1E-9
     finally:
         mdp.deactivate_extension("gradient")
Ejemplo n.º 3
0
def verify_ICANode(icanode, rand_func = uniform, vars=3, N=8000, prec=3):
    dim = (N, vars)
    mat,mix,inp = get_random_mix(rand_func=rand_func,mat_dim=dim)
    icanode.train(inp)
    act_mat = icanode.execute(inp)
    cov = mdp.utils.cov2((mat-mean(mat,axis=0))/std(mat,axis=0), act_mat)
    maxima = numx.amax(abs(cov), axis=0)
    assert_array_almost_equal(maxima,numx.ones(vars), prec)
Ejemplo n.º 4
0
def verify_ICANode(icanode, rand_func=uniform, vars=3, N=8000, prec=3):
    dim = (N, vars)
    mat, mix, inp = get_random_mix(rand_func=rand_func, mat_dim=dim)
    icanode.train(inp)
    act_mat = icanode.execute(inp)
    cov = mdp.utils.cov2((mat - mean(mat, axis=0)) / std(mat, axis=0), act_mat)
    maxima = numx.amax(abs(cov), axis=0)
    assert_array_almost_equal(maxima, numx.ones(vars), prec)
Ejemplo n.º 5
0
    def _train(self, x):
        """Determine coordinatewise and absolute maxima and minima.

        :param x: Chuck of data to be used for training. Observations/samples
            must be along the first axis, variables along the second.
        :type x: numpy.ndarray

        The values are used to generate a transformation to the valid domain
        for the data.
        """
        if self.amin is None:
            self.amaxcolumn = np.amax(x, axis=0)
            self.amincolumn = np.amin(x, axis=0)
            self.amax = np.amax(self.amaxcolumn)
            self.amin = np.amin(self.amincolumn)
        else:
            self.amaxcolumn = np.maximum(self.amaxcolumn, np.amax(x, axis=0))
            self.amincolumn = np.minimum(self.amincolumn, np.amin(x, axis=0))
            self.amax = np.amax(self.amaxcolumn)
            self.amin = np.amin(self.amincolumn)
Ejemplo n.º 6
0
    def check_domain(self, x, prec=1e-6):
        """Checks for compliance of the data x with the domain on which
            the function sequence selected is defined or orthogonal.

        :param x: The data to be expanded. Observations/samples must
            be along the first axis, variables along the second.
        :type x: numpy.ndarray

        :param prec: (Numerical) tolerance when checking validity.
        :type prec: float

        :raise mdp.NodeException: If one or more values lie outside of the function
            specific domain.
        """
        xmax = np.amax(x)-prec
        xmin = np.amin(x)+prec

        if (self.upper < xmax) or (self.lower > xmin):
            raise mdp.NodeException(
                "One or more values lie outside of the function specific domain.")
Ejemplo n.º 7
0
 def test_gradient_product(self):
     """Test that the product of gradients is calculated correctly."""
     sfa_node1 = bimdp.nodes.SFABiNode(output_dim=5)
     sfa_node2 = bimdp.nodes.SFABiNode(output_dim=3)
     flow = sfa_node1 + sfa_node2
     x = numx_rand.random((300, 10))
     flow.train(x)
     mdp.activate_extension("gradient")
     try:
         x1 = numx_rand.random((2, 10))
         x2, msg = sfa_node1.execute(x1, {"method": "gradient"})
         grad1 = msg["grad"]
         _, msg = sfa_node2.execute(x2, {"method": "gradient"})
         grad2 = msg["grad"]
         grad12 = flow.execute(x1, {"method": "gradient"})[1]["grad"]
         # use a different way to calculate the product of the gradients,
         # this method is too memory intensive for large data
         ref_grad = numx.sum(grad2[:,:,numx.newaxis,:] *
                          numx.transpose(grad1[:,numx.newaxis,:,:], (0,1,3,2)),
                          axis=3)
         assert numx.amax(abs(ref_grad - grad12)) < 1E-9
     finally:
         mdp.deactivate_extension("gradient")
Ejemplo n.º 8
0
 def test_gradient_product(self):
     """Test that the product of gradients is calculated correctly."""
     sfa_node1 = bimdp.nodes.SFABiNode(output_dim=5)
     sfa_node2 = bimdp.nodes.SFABiNode(output_dim=3)
     flow = sfa_node1 + sfa_node2
     x = numx_rand.random((300, 10))
     flow.train(x)
     mdp.activate_extension("gradient")
     try:
         x1 = numx_rand.random((2, 10))
         x2, msg = sfa_node1.execute(x1, {"method": "gradient"})
         grad1 = msg["grad"]
         _, msg = sfa_node2.execute(x2, {"method": "gradient"})
         grad2 = msg["grad"]
         grad12 = flow.execute(x1, {"method": "gradient"})[1]["grad"]
         # use a different way to calculate the product of the gradients,
         # this method is too memory intensive for large data
         ref_grad = numx.sum(grad2[:, :, numx.newaxis, :] *
                             numx.transpose(grad1[:, numx.newaxis, :, :],
                                            (0, 1, 3, 2)),
                             axis=3)
         assert numx.amax(abs(ref_grad - grad12)) < 1E-9
     finally:
         mdp.deactivate_extension("gradient")