Exemplo n.º 1
0
    def _execute(self, x):
        #----------------------------------------------------
        # similar algorithm to that within self.stop_training()
        #  refer there for notes & comments on code
        #----------------------------------------------------
        N = self.data.shape[0]
        Nx = x.shape[0]
        W = numx.zeros((Nx, N), dtype=self.dtype)

        k, r = self.k, self.r
        d_out = self.output_dim
        Q_diag_idx = numx.arange(k)

        for row in range(Nx):
            #find nearest neighbors of x in M
            M_xi = self.data - x[row]
            nbrs = numx.argsort((M_xi**2).sum(1))[:k]
            M_xi = M_xi[nbrs]

            #find corrected covariance matrix Q
            Q = mult(M_xi, M_xi.T)
            if r is None and k > d_out:
                sig2 = (svd(M_xi, compute_uv=0))**2
                r = numx.sum(sig2[d_out:])
                Q[Q_diag_idx, Q_diag_idx] += r
            if r is not None:
                Q[Q_diag_idx, Q_diag_idx] += r

            #solve for weights
            w = self._refcast(numx_linalg.solve(Q, numx.ones(k)))
            w /= w.sum()
            W[row, nbrs] = w

        #multiply weights by result of SVD from training
        return numx.dot(W, self.training_projection)
Exemplo n.º 2
0
    def _execute(self, x):
        #----------------------------------------------------
        # similar algorithm to that within self.stop_training()
        #  refer there for notes & comments on code
        #----------------------------------------------------
        N = self.data.shape[0]
        Nx = x.shape[0]
        W = numx.zeros((Nx, N), dtype=self.dtype)

        k, r = self.k, self.r
        d_out = self.output_dim
        Q_diag_idx = numx.arange(k)

        for row in range(Nx):
            #find nearest neighbors of x in M
            M_xi = self.data-x[row]
            nbrs = numx.argsort( (M_xi**2).sum(1) )[:k]
            M_xi = M_xi[nbrs]

            #find corrected covariance matrix Q
            Q = mult(M_xi, M_xi.T)
            if r is None and k > d_out:
                sig2 = (svd(M_xi, compute_uv=0))**2
                r = numx.sum(sig2[d_out:])
                Q[Q_diag_idx, Q_diag_idx] += r
            if r is not None:
                Q[Q_diag_idx, Q_diag_idx] += r

            #solve for weights
            w = self._refcast(numx_linalg.solve(Q , numx.ones(k)))
            w /= w.sum()
            W[row, nbrs] = w

        #multiply weights by result of SVD from training
        return numx.dot(W, self.training_projection)
Exemplo n.º 3
0
 def _label(self, x):
     """Label the data by comparison with the reference points."""
     square_distances = (x * x).sum(1)[:, numx.newaxis] + (self.samples * self.samples).sum(1)
     square_distances -= 2 * numx.dot(x, self.samples.T)
     min_inds = square_distances.argsort()
     win_inds = [numx.bincount(self.sample_label_indices[indices[0 : self.k]]).argmax(0) for indices in min_inds]
     labels = [self.ordered_labels[i] for i in win_inds]
     return labels
Exemplo n.º 4
0
 def _label(self, x):
     """  
     :param x: A matrix having different variables on different columns
         and observations on the rows.
     :type x: numpy.ndarray
     :return: An array with class labels from the perceptron.
     :rtype: numpy.ndarray
     """
     # todo: consider iterables
     return numx.sign(numx.dot(x, self.weights) + self.offset_weight)
Exemplo n.º 5
0
 def _label(self, x):
     """Label the data by comparison with the reference points."""
     square_distances = (x*x).sum(1)[:, numx.newaxis] \
                   + (self.samples*self.samples).sum(1)
     square_distances -= 2 * numx.dot(x, self.samples.T)
     min_inds = square_distances.argsort()
     win_inds = [
         numx.bincount(
             self.sample_label_indices[indices[0:self.k]]).argmax(0)
         for indices in min_inds
     ]
     labels = [self.ordered_labels[i] for i in win_inds]
     return labels
Exemplo n.º 6
0
 def _alt_sfa2_grad(self, x):
     """Reference grad method based on quadratic forms."""
     # note that the H and f arrays are cached in the node and remain even
     # after the extension has been deactivated
     if not hasattr(self, "__gradient_Hs"):
         quad_forms = [
             self.get_quadratic_form(i) for i in range(self.output_dim)
         ]
         self.__gradient_Hs = numx.vstack(
             (quad_form.H[numx.newaxis] for quad_form in quad_forms))
         self.__gradient_fs = numx.vstack(
             (quad_form.f[numx.newaxis] for quad_form in quad_forms))
     grad = (numx.dot(x, self.__gradient_Hs) + numx.repeat(
         self.__gradient_fs[numx.newaxis, :, :], len(x), axis=0))
     return grad
Exemplo n.º 7
0
 def _alt_sfa2_grad(self, x):
     """Reference grad method based on quadratic forms."""
     # note that the H and f arrays are cached in the node and remain even
     # after the extension has been deactivated
     if not hasattr(self, "__gradient_Hs"):
         quad_forms = [self.get_quadratic_form(i)
                       for i in range(self.output_dim)]
         self.__gradient_Hs = numx.vstack((quad_form.H[numx.newaxis]
                                         for quad_form in quad_forms))
         self.__gradient_fs = numx.vstack((quad_form.f[numx.newaxis]
                                         for quad_form in quad_forms))
     grad = (numx.dot(x, self.__gradient_Hs) +
                 numx.repeat(self.__gradient_fs[numx.newaxis,:,:],
                           len(x), axis=0))
     return grad
Exemplo n.º 8
0
 def test_switchboard_gradient2(self):
     """Test gradient for a larger switchboard."""
     dim = 100
     connections = [int(i) for i in numx.random.random((dim,)) * (dim-1)]
     sboard = mdp.hinet.Switchboard(input_dim=dim, connections=connections)
     x = numx.random.random((10, dim))
     # assume a 5-dimensional gradient at this stage
     grad = numx.random.random((10, dim, 5))
     # original reference implementation
     def _switchboard_grad(self, x):
         grad = numx.zeros((self.output_dim, self.input_dim))
         grad[range(self.output_dim), self.connections] = 1
         return numx.tile(grad, (len(x), 1, 1))
     with mdp.extension("gradient"):
         result = sboard._gradient(x, grad)
         ext_grad = result[1]["grad"]
         tmp_grad = _switchboard_grad(sboard, x)
         ref_grad = numx.asarray([numx.dot(tmp_grad[i], grad[i])
                                  for i in range(len(tmp_grad))])
     assert numx.all(ext_grad == ref_grad)
Exemplo n.º 9
0
    def test_switchboard_gradient2(self):
        """Test gradient for a larger switchboard."""
        dim = 100
        connections = [int(i) for i in numx.random.random((dim, )) * (dim - 1)]
        sboard = mdp.hinet.Switchboard(input_dim=dim, connections=connections)
        x = numx.random.random((10, dim))
        # assume a 5-dimensional gradient at this stage
        grad = numx.random.random((10, dim, 5))

        # original reference implementation
        def _switchboard_grad(self, x):
            grad = numx.zeros((self.output_dim, self.input_dim))
            grad[range(self.output_dim), self.connections] = 1
            return numx.tile(grad, (len(x), 1, 1))

        with mdp.extension("gradient"):
            result = sboard._gradient(x, grad)
            ext_grad = result[1]["grad"]
            tmp_grad = _switchboard_grad(sboard, x)
            ref_grad = numx.asarray(
                [numx.dot(tmp_grad[i], grad[i]) for i in range(len(tmp_grad))])
        assert numx.all(ext_grad == ref_grad)
Exemplo n.º 10
0
    def _label_one(self, pattern, threshold):
        pattern = mdp.utils.bool_to_sign(pattern)

        has_converged = False
        while not has_converged:
            has_converged = True
            iter_order = range(len(self._weight_matrix))
            if self._shuffled_update:
                numx_rand.shuffle(iter_order)
            for row in iter_order:
                w_row = self._weight_matrix[row]

                thresh_row = threshold[row]
                new_pattern_row = numx.sign(numx.dot(w_row, pattern) - thresh_row)

                if new_pattern_row == 0:
                    # Following McKay, Neural Networks, we do nothing
                    # when the new pattern is zero
                    pass
                elif pattern[row] != new_pattern_row:
                    has_converged = False
                    pattern[row] = new_pattern_row
        return mdp.utils.sign_to_bool(pattern)
Exemplo n.º 11
0
    def _label_one(self, pattern, threshold):
        pattern = mdp.utils.bool_to_sign(pattern)

        has_converged = False
        while not has_converged:
            has_converged = True
            iter_order = range(len(self._weight_matrix))
            if self._shuffled_update:
                numx_rand.shuffle(iter_order)
            for row in iter_order:
                w_row = self._weight_matrix[row]

                thresh_row = threshold[row]
                new_pattern_row = numx.sign(
                    numx.dot(w_row, pattern) - thresh_row)

                if new_pattern_row == 0:
                    # Following McKay, Neural Networks, we do nothing
                    # when the new pattern is zero
                    pass
                elif pattern[row] != new_pattern_row:
                    has_converged = False
                    pattern[row] = new_pattern_row
        return mdp.utils.sign_to_bool(pattern)
Exemplo n.º 12
0
 def _label(self, x):
     """Returns an array with class labels from the perceptron.
     """
     return numx.sign(numx.dot(x, self.weights) + self.offset_weight)
Exemplo n.º 13
0
 def _label(self, x):
     """Returns an array with class labels from the perceptron.
     """
     return numx.sign(numx.dot(x, self.weights) + self.offset_weight)
Exemplo n.º 14
0
 def _execute(self, x):
     return numx.dot(x, numx.ones((self.input_dim, self.output_dim)))