Esempio n. 1
0
def assert_array_almost_equal_diff(x,y,digits,err_msg=''):
    x,y = numx.asarray(x), numx.asarray(y)
    msg = '\nArrays are not almost equal'
    assert 0 in [len(numx.shape(x)),len(numx.shape(y))] \
           or (len(numx.shape(x))==len(numx.shape(y)) and \
               numx.alltrue(numx.equal(numx.shape(x),numx.shape(y)))),\
               msg + ' (shapes %s, %s mismatch):\n\t' \
               % (numx.shape(x),numx.shape(y)) + err_msg
    maxdiff = max(numx.ravel(abs(x-y)))/\
              max(max(abs(numx.ravel(x))),max(abs(numx.ravel(y))))
    if numx.iscomplexobj(x) or numx.iscomplexobj(y): maxdiff = maxdiff/2
    cond =  maxdiff< 10**(-digits)
    msg = msg+'\n\t Relative maximum difference: %e'%(maxdiff)+'\n\t'+\
          'Array1: '+str(x)+'\n\t'+\
          'Array2: '+str(y)+'\n\t'+\
          'Absolute Difference: '+str(abs(y-x))
    assert cond, msg
Esempio n. 2
0
def assert_array_almost_equal_diff(x, y, digits, err_msg=''):
    x, y = numx.asarray(x), numx.asarray(y)
    msg = '\nArrays are not almost equal'
    assert 0 in [len(numx.shape(x)),len(numx.shape(y))] \
           or (len(numx.shape(x))==len(numx.shape(y)) and \
               numx.alltrue(numx.equal(numx.shape(x),numx.shape(y)))),\
               msg + ' (shapes %s, %s mismatch):\n\t' \
               % (numx.shape(x),numx.shape(y)) + err_msg
    maxdiff = max(numx.ravel(abs(x-y)))/\
              max(max(abs(numx.ravel(x))),max(abs(numx.ravel(y))))
    if numx.iscomplexobj(x) or numx.iscomplexobj(y): maxdiff = maxdiff / 2
    cond = maxdiff < 10**(-digits)
    msg = msg+'\n\t Relative maximum difference: %e'%(maxdiff)+'\n\t'+\
          'Array1: '+str(x)+'\n\t'+\
          'Array2: '+str(y)+'\n\t'+\
          'Absolute Difference: '+str(abs(y-x))
    assert cond, msg
Esempio n. 3
0
 def _train(self, x, labels):
     """Add the sampel points to the classes.
     
     labels -- Can be a list, tuple or array of labels (one for each data
         point) or a single label, in which case all input data is assigned
         to the same class (computationally this is more efficient).
     """
     if isinstance(labels, (list, tuple, numx.ndarray)):
         labels = numx.asarray(labels)
         for label in set(labels):
             x_label = numx.compress(labels == label, x, axis=0)
             self._add_samples(x_label, label)
     else:
         self._add_samples(x, labels)
Esempio n. 4
0
 def _train(self, x, labels):
     """Add the sampel points to the classes.
     
     labels -- Can be a list, tuple or array of labels (one for each data
         point) or a single label, in which case all input data is assigned
         to the same class (computationally this is more efficient).
     """
     if isinstance(labels, (list, tuple, numx.ndarray)):
         labels = numx.asarray(labels)
         for label in set(labels):
             x_label = numx.compress(labels == label, x, axis=0)
             self._add_samples(x_label, label)
     else:
         self._add_samples(x, labels)
Esempio n. 5
0
 def _train(self, x, labels):
     """Update the mean information for the different classes.
     
     :param x: The data.
     :type x: numpy.ndarray
     :param labels: Can be a list, tuple or array of labels (one for each data
         point) or a single label, in which case all input data is assigned
         to the same class (computationally this is more efficient).
     """
     if isinstance(labels, (list, tuple, numx.ndarray)):
         labels = numx.asarray(labels)
         for label in set(labels):
             x_label = numx.compress(labels == label, x, axis=0)
             self._update_mean(x_label, label)
     else:
         self._update_mean(x, labels)
Esempio n. 6
0
 def _train(self, x, labels):
     """
     :param x: Data
     :type x: numpy.ndarray
     :param labels: Can be a list, tuple or array of labels (one for each data point)
         or a single label, in which case all input data is assigned to
         the same class.
     """
     # if labels is a number, all x's belong to the same class
     if isinstance(labels, (list, tuple, numx.ndarray)):
         labels_ = numx.asarray(labels)
         # get all classes from cl
         for lbl in set(labels_):
             x_lbl = numx.compress(labels_ == lbl, x, axis=0)
             self._update_covs(x_lbl, lbl)
     else:
         self._update_covs(x, labels)
Esempio n. 7
0
 def _train(self, x, labels):
     """
     :Arguments:
       x
           data
       labels
           Can be a list, tuple or array of labels (one for each data point)
           or a single label, in which case all input data is assigned to
           the same class.
     """
     # if labels is a number, all x's belong to the same class
     if isinstance(labels, (list, tuple, numx.ndarray)):
         labels_ = numx.asarray(labels)
         # get all classes from cl
         for lbl in set(labels_):
             x_lbl = numx.compress(labels_ == lbl, x, axis=0)
             self._update_covs(x_lbl, lbl)
     else:
         self._update_covs(x, labels)
Esempio n. 8
0
 def test_switchboard_gradient2(self):
     """Test gradient for a larger switchboard."""
     dim = 100
     connections = [int(i) for i in numx.random.random((dim,)) * (dim-1)]
     sboard = mdp.hinet.Switchboard(input_dim=dim, connections=connections)
     x = numx.random.random((10, dim))
     # assume a 5-dimensional gradient at this stage
     grad = numx.random.random((10, dim, 5))
     # original reference implementation
     def _switchboard_grad(self, x):
         grad = numx.zeros((self.output_dim, self.input_dim))
         grad[range(self.output_dim), self.connections] = 1
         return numx.tile(grad, (len(x), 1, 1))
     with mdp.extension("gradient"):
         result = sboard._gradient(x, grad)
         ext_grad = result[1]["grad"]
         tmp_grad = _switchboard_grad(sboard, x)
         ref_grad = numx.asarray([numx.dot(tmp_grad[i], grad[i])
                                  for i in range(len(tmp_grad))])
     assert numx.all(ext_grad == ref_grad)
Esempio n. 9
0
    def test_switchboard_gradient2(self):
        """Test gradient for a larger switchboard."""
        dim = 100
        connections = [int(i) for i in numx.random.random((dim, )) * (dim - 1)]
        sboard = mdp.hinet.Switchboard(input_dim=dim, connections=connections)
        x = numx.random.random((10, dim))
        # assume a 5-dimensional gradient at this stage
        grad = numx.random.random((10, dim, 5))

        # original reference implementation
        def _switchboard_grad(self, x):
            grad = numx.zeros((self.output_dim, self.input_dim))
            grad[range(self.output_dim), self.connections] = 1
            return numx.tile(grad, (len(x), 1, 1))

        with mdp.extension("gradient"):
            result = sboard._gradient(x, grad)
            ext_grad = result[1]["grad"]
            tmp_grad = _switchboard_grad(sboard, x)
            ref_grad = numx.asarray(
                [numx.dot(tmp_grad[i], grad[i]) for i in range(len(tmp_grad))])
        assert numx.all(ext_grad == ref_grad)
Esempio n. 10
0
    def __init__(self,
                 in_channels_xy,
                 field_channels_xy,
                 in_channel_dim=1,
                 out_channels=1,
                 field_dstr='uniform'):
        """Calculate the connections.

        Keyword arguments:
        in_channels_xy -- 2-Tuple with number of input channels in the x- and
            y-direction (or a single number for both). This has to be
            specified, since the actual input is only one 1d array.
        field_channels_xy -- 2-Tuple with number of channels in each field in
            the x- and y-direction (or a single number for both).
        field_dstr -- Distribution for random sampling of fields. (default: uniform).
                      Currently supports only uniform random distribution.
        in_channel_dim -- Number of connections per input channel.
        out_channels -- Number of output channels (default: 1)
        """
        in_channels_xy = to_2tuple(in_channels_xy)
        field_channels_xy = to_2tuple(field_channels_xy)
        self.field_dstr = field_dstr
        self.in_channels_xy = in_channels_xy
        self.field_channels_xy = field_channels_xy
        self.out_channels = out_channels

        out_channel_dim = (in_channel_dim * field_channels_xy[0] *
                           field_channels_xy[1])
        # check parameters for inconsistencies
        for i, name in enumerate(["x", "y"]):
            if field_channels_xy[i] > in_channels_xy[i]:
                err = ("Number of field channels exceeds the number of "
                       "input channels in %s-direction. "
                       "This would lead to an empty connection list." % name)
                raise RandomChannelSwitchboardException(err)

        in_trans = CoordinateTranslator(*in_channels_xy)

        # setup a look up table for accessing connected grid points.
        # This incurs a one-time expensive computation when the node is created but
        # keeps the execute call efficient computationally.
        self._lut_conn = numx.zeros([
            self.in_channels_xy[1] - self.field_channels_xy[1] + 1,
            self.in_channels_xy[0] - self.field_channels_xy[0] + 1,
            out_channel_dim
        ],
                                    dtype=numx.int32)

        x_fields_origin = range(self.in_channels_xy[0] -
                                self.field_channels_xy[0] + 1)
        y_fields_origin = range(self.in_channels_xy[1] -
                                self.field_channels_xy[1] + 1)

        tot_conns = self._lut_conn.shape[0] * self._lut_conn.shape[1]
        origins = numx.asarray(numx.meshgrid(x_fields_origin,
                                             y_fields_origin)).reshape(
                                                 2, tot_conns).T

        for i in xrange(tot_conns):
            # inner loop over field
            x_start_chan, y_start_chan = origins[i]
            connections = numx.zeros([out_channel_dim], dtype=numx.int32)
            first_out_con = 0
            for y_in_chan in range(y_start_chan,
                                   y_start_chan + field_channels_xy[1]):
                for x_in_chan in range(x_start_chan,
                                       x_start_chan + field_channels_xy[0]):
                    first_in_con = (
                        in_trans.image_to_index(x_in_chan, y_in_chan) *
                        in_channel_dim)
                    connections[first_out_con:first_out_con +
                                in_channel_dim] = range(
                                    first_in_con,
                                    first_in_con + in_channel_dim)
                    first_out_con += in_channel_dim
            self._lut_conn[y_start_chan, x_start_chan] = connections

        connections = self._new_connections()
        super(RandomChannelSwitchboard, self).__init__(
            input_dim=(in_channel_dim * in_channels_xy[0] * in_channels_xy[1]),
            connections=connections,
            out_channel_dim=out_channel_dim,
            in_channel_dim=in_channel_dim)
Esempio n. 11
0
    def _adjust_output_dim(self):
        # this function is called if we need to compute the number of
        # output dimensions automatically; some quantities that are
        # useful later are pre-calculated to spare precious time

        if self.verbose:
            print ' - adjusting output dim:'

        #otherwise, we need to compute output_dim
        #                  from desired_variance
        M = self.data
        k = self.k
        N, d_in = M.shape

        m_est_array = []
        Qs = numx.zeros((N, k, k))
        sig2s = numx.zeros((N, d_in))
        nbrss = numx.zeros((N, k), dtype='i')

        for row in range(N):
            #-----------------------------------------------
            #  find k nearest neighbors
            #-----------------------------------------------
            M_Mi = M - M[row]
            nbrs = numx.argsort((M_Mi**2).sum(1))[1:k + 1]
            M_Mi = M_Mi[nbrs]
            # compute covariance matrix of distances
            Qs[row, :, :] = mult(M_Mi, M_Mi.T)
            nbrss[row, :] = nbrs

            #-----------------------------------------------
            # singular values of M_Mi give the variance:
            #   use this to compute intrinsic dimensionality
            #   at this point
            #-----------------------------------------------
            sig2 = (svd(M_Mi, compute_uv=0))**2
            sig2s[row, :sig2.shape[0]] = sig2

            #-----------------------------------------------
            # use sig2 to compute intrinsic dimensionality of the
            #   data at this neighborhood.  The dimensionality is the
            #   number of eigenvalues needed to sum to the total
            #   desired variance
            #-----------------------------------------------
            sig2 /= sig2.sum()
            S = sig2.cumsum()
            m_est = S.searchsorted(self.desired_variance)
            if m_est > 0:
                m_est += (self.desired_variance - S[m_est - 1]) / sig2[m_est]
            else:
                m_est = self.desired_variance / sig2[m_est]
            m_est_array.append(m_est)

        m_est_array = numx.asarray(m_est_array)
        self.output_dim = int(numx.ceil(numx.median(m_est_array)))
        if self.verbose:
            msg = ('      output_dim = %i'
                   ' for variance of %.2f' %
                   (self.output_dim, self.desired_variance))
            print msg

        return Qs, sig2s, nbrss
Esempio n. 12
0
    def _adjust_output_dim(self):
        # this function is called if we need to compute the number of
        # output dimensions automatically; some quantities that are
        # useful later are pre-calculated to spare precious time

        if self.verbose:
            print ' - adjusting output dim:'

        #otherwise, we need to compute output_dim
        #                  from desired_variance
        M = self.data
        k = self.k
        N, d_in = M.shape

        m_est_array = []
        Qs = numx.zeros((N, k, k))
        sig2s = numx.zeros((N, d_in))
        nbrss = numx.zeros((N, k), dtype='i')

        for row in range(N):
            #-----------------------------------------------
            #  find k nearest neighbors
            #-----------------------------------------------
            M_Mi = M-M[row]
            nbrs = numx.argsort((M_Mi**2).sum(1))[1:k+1]
            M_Mi = M_Mi[nbrs]
            # compute covariance matrix of distances
            Qs[row, :, :] = mult(M_Mi, M_Mi.T)
            nbrss[row, :] = nbrs

            #-----------------------------------------------
            # singular values of M_Mi give the variance:
            #   use this to compute intrinsic dimensionality
            #   at this point
            #-----------------------------------------------
            sig2 = (svd(M_Mi, compute_uv=0))**2
            sig2s[row, :sig2.shape[0]] = sig2

            #-----------------------------------------------
            # use sig2 to compute intrinsic dimensionality of the
            #   data at this neighborhood.  The dimensionality is the
            #   number of eigenvalues needed to sum to the total
            #   desired variance
            #-----------------------------------------------
            sig2 /= sig2.sum()
            S = sig2.cumsum()
            m_est = S.searchsorted(self.desired_variance)
            if m_est > 0:
                m_est += (self.desired_variance-S[m_est-1])/sig2[m_est]
            else:
                m_est = self.desired_variance/sig2[m_est]
            m_est_array.append(m_est)

        m_est_array = numx.asarray(m_est_array)
        self.output_dim = int( numx.ceil( numx.median(m_est_array) ) )
        if self.verbose:
            msg = ('      output_dim = %i'
                   ' for variance of %.2f' % (self.output_dim,
                                              self.desired_variance))
            print msg

        return Qs, sig2s, nbrss