예제 #1
0
    def _inverse(self, y, n=None):
        """Project 'y' to the input space using the first 'n' components.
        
        :param y: Vectors from the output space.
        :type y: numpy.ndarray
        
        :param n: The number of components to use for projection to the
            input space. If 'n' is not set, use all available components.
        :type n: int
        
        :return: The projected vectors.
        :rtype: numpy.ndarray
        
        :raises mdp.NodeException: If the valid dimension is exceeded.
        """
        if n is None:
            n = y.shape[1]
        if n > self.output_dim:
            error_str = ("y has dimension %d,"
                         " should be at most %d" % (n, self.output_dim))
            raise mdp.NodeException(error_str)

        v = self.get_recmatrix()
        if n is not None:
            return mult(y, v[:n, :])
        return mult(y, v)
예제 #2
0
def matmult_n_MDP_benchmark(dim):
    """    This benchmark multiplies two non-contiguous matrices using the
    MDP internal matrix multiplication routine.
    First argument matrix dimensionality"""
    a = numx_rand.random((dim,dim)).T
    b = numx_rand.random((dim,dim)).T
    mult(a,b)
예제 #3
0
    def _train(self, x, y):
        """
        :param x: Array of different input observations.
        :type x: numpy.ndarray

        :param y: Array of size (x.shape[0], output_dim) that contains the 
            observed output to the input x's.
        :type y: numpy.ndarray
        """
        # initialize internal vars if necessary
        if self._xTx is None:
            if self.with_bias:
                x_size = self._input_dim + 1
            else:
                x_size = self._input_dim
            self._xTx = numx.zeros((x_size, x_size), self._dtype)
            self._xTy = numx.zeros((x_size, self._output_dim), self._dtype)

        if self.with_bias:
            x = self._add_constant(x)

        # update internal variables
        self._xTx += mult(x.T, x)
        self._xTy += mult(x.T, y)
        self._tlen += x.shape[0]
예제 #4
0
    def _inverse(self, y, n=None):
        """Project 'y' to the input space using the first 'n' components.
        
        :param y: Vectors from the output space.
        :type y: numpy.ndarray
        
        :param n: The number of components to use for projection to the
            input space. If 'n' is not set, use all available components.
        :type n: int
        
        :return: The projected vectors.
        :rtype: numpy.ndarray
        
        :raises mdp.NodeException: If the valid dimension is exceeded.
        """
        if n is None:
            n = y.shape[1]
        if n > self.output_dim:
            error_str = ("y has dimension %d,"
                         " should be at most %d" % (n, self.output_dim))
            raise mdp.NodeException(error_str)

        v = self.get_recmatrix()
        if n is not None:
            return mult(y, v[:n, :])
        return mult(y, v)
예제 #5
0
 def _sample_h(self, v, x):
     # returns P(h=1|v,W,b) and a sample from it
     dynamic_b = mult(x, self.b)
     probs = Oger.utils.LogisticFunction.f(self.bh + mult(v, self.w) +
                                           dynamic_b)
     h = (probs > random(probs.shape)).astype(self.dtype)
     return probs, h
예제 #6
0
def matmult_n_MDP_benchmark(dim):
    """    This benchmark multiplies two non-contiguous matrices using the
    MDP internal matrix multiplication routine.
    First argument matrix dimensionality"""
    a = numx_rand.random((dim, dim)).T
    b = numx_rand.random((dim, dim)).T
    mult(a, b)
예제 #7
0
    def _train(self, x):
        """Update the principal components.
        
        :param x: Data vectors.
        :type x: numpy.ndarray
        """
        [w1, w2] = self._amnesic(self.get_current_train_iteration() + 1)
        red_j = self.output_dim
        red_j_flag = False
        explained_var = 0.0

        r = x
        for j in range(self.output_dim):
            v = self._v[:, j:j + 1]
            d = self.d[j]

            v = w1 * v + w2 * mult(r, v) / d * r.T
            d = mdp.numx_linalg.norm(v)
            vn = old_div(v, d)
            r = r - mult(r, vn) * vn.T
            explained_var += d

            if not red_j_flag:
                ratio = explained_var / self._var_tot
                if ratio > self.var_rel:
                    red_j = j
                    red_j_flag = True

            self._v[:, j:j + 1] = v
            self.v[:, j:j + 1] = vn
            self.d[j] = d

        self._var_tot = explained_var
        self._reduced_dims = red_j
예제 #8
0
 def _energy(self, v, h):
     if self._gaussian:
         return ((((v - self.bv) ** 2).sum() / 2) - mult(h, self.bh) -
                 (mult(v, self.w) * h).sum(axis=1))
     else:
         return (-mult(v, self.bv) - mult(h, self.bh) -
                 (mult(v, self.w) * h).sum(axis=1))
예제 #9
0
    def _train(self, x):
        """Update the principal components.
        
        :param x: Data vectors.
        :type x: numpy.ndarray
        """
        [w1, w2] = self._amnesic(self.get_current_train_iteration() + 1)
        red_j = self.output_dim
        red_j_flag = False
        explained_var = 0.0

        r = x
        for j in range(self.output_dim):
            v = self._v[:, j:j + 1]
            d = self.d[j]

            v = w1 * v + w2 * mult(r, v) / d * r.T
            d = mdp.numx_linalg.norm(v)
            vn = old_div(v, d)
            r = r - mult(r, vn) * vn.T
            explained_var += d

            if not red_j_flag:
                ratio = explained_var / self._var_tot
                if ratio > self.var_rel:
                    red_j = j
                    red_j_flag = True

            self._v[:, j:j + 1] = v
            self.v[:, j:j + 1] = vn
            self.d[j] = d

        self._var_tot = explained_var
        self._reduced_dims = red_j
예제 #10
0
 def _energy(self, v, h):
     if self._gaussian:
         return ((((v - self.bv)**2).sum() / 2) - mult(h, self.bh) -
                 (mult(v, self.w) * h).sum(axis=1))
     else:
         return (-mult(v, self.bv) - mult(h, self.bh) -
                 (mult(v, self.w) * h).sum(axis=1))
예제 #11
0
 def _calculate_gradient(self, y):
     x = self._last_x
     dy = Oger.utils.LogisticFunction.df(x, self._last_y) * y
     dw = mult(x.T, dy)
     self._gradient_vector = numx.concatenate((dw.ravel(), dy.sum(axis=0)))
     dx = mult(self.w, dy.T).T
     return dx
예제 #12
0
    def _train(self, x, y):
        """
        :param x: Array of different input observations.
        :type x: numpy.ndarray

        :param y: Array of size (x.shape[0], output_dim) that contains the 
            observed output to the input x's.
        :type y: numpy.ndarray
        """
        # initialize internal vars if necessary
        if self._xTx is None:
            if self.with_bias:
                x_size = self._input_dim + 1
            else:
                x_size = self._input_dim
            self._xTx = numx.zeros((x_size, x_size), self._dtype)
            self._xTy = numx.zeros((x_size, self._output_dim), self._dtype)

        if self.with_bias:
            x = self._add_constant(x)

        # update internal variables
        self._xTx += mult(x.T, x)
        self._xTy += mult(x.T, y)
        self._tlen += x.shape[0]
예제 #13
0
    def _inverse(self, y, n=None):
        """Project data from the output to the input space using the
        first 'n' components.
        
        If 'n' is not set, use all available components.
        
        :param y: Data to be projected to the input space.
        :type y: numpy.ndarray
        
        :param n: Number of first principle components.
        :type n: int
        
        :return: The projected data
        :rtype: numpy.ndarray
        """

        if n is None:
            n = y.shape[1]
        if n > self.output_dim:
            error_str = ("y has dimension %d,"
                         " should be at most %d" % (n, self.output_dim))
            raise mdp.NodeException(error_str)

        v = self.get_recmatrix()
        if n is not None:
            return mult(y, v[:n, :]) + self.avg
        return mult(y, v) + self.avg
예제 #14
0
    def _inverse(self, y, n=None):
        """Project data from the output to the input space using the
        first 'n' components.
        
        If 'n' is not set, use all available components.
        
        :param y: Data to be projected to the input space.
        :type y: numpy.ndarray
        
        :param n: Number of first principle components.
        :type n: int
        
        :return: The projected data
        :rtype: numpy.ndarray
        """

        if n is None:
            n = y.shape[1]
        if n > self.output_dim:
            error_str = ("y has dimension %d,"
                         " should be at most %d" % (n, self.output_dim))
            raise mdp.NodeException(error_str)

        v = self.get_recmatrix()
        if n is not None:
            return mult(y, v[:n, :]) + self.avg
        return mult(y, v) + self.avg
예제 #15
0
 def _calculate_gradient(self, y):
     x = self._last_x
     dy = Oger.utils.LogisticFunction.df(x, self._last_y) * y
     dw = mult(x.T, dy)
     self._gradient_vector = numx.concatenate((dw.ravel(), dy.sum(axis=0)))
     dx = mult(self.w, dy.T).T
     return dx
예제 #16
0
 def _gsfa(self, x):
     for layernum in xrange(self._nlayers):
         z = self.fa[layernum](x)
         if layernum == self._nlayers - 1:
             x = mult(z, self.v[layernum][:, 1:self.output_dim + 1])
         else:
             x = mult(z, self.v[layernum][:, :self._npoly])
     return x
예제 #17
0
 def _calculate_gradient(self, y):
     ''' y is the gradient that is propagated from the previous layer'''
     x = self._last_x
     dy = self.transfer_func.df(x, self._last_y) * y
     dw = mult(x.T, dy)
     self._gradient_vector = numx.concatenate((dw.ravel(), dy.sum(axis=0)))
     dx = mult(self.w, dy.T).T
     return dx
예제 #18
0
 def _calculate_gradient(self, y):
     ''' y is the gradient that is propagated from the previous layer'''
     x = self._last_x
     dy = self.transfer_func.df(x, self._last_y) * y
     dw = mult(x.T, dy)
     self._gradient_vector = numx.concatenate((dw.ravel(), dy.sum(axis=0)))
     dx = mult(self.w, dy.T).T
     return dx
예제 #19
0
 def _train(self, x):
     phi, phi_, a, r, done = self._split_x(x)
     td_err = r + self._gamma * self.get_value(phi_) - self.get_value(phi)
     grad_theta = self._alpha * mult(phi.T, td_err)
     grad_psi = self._beta * mult(phi.T,
                                  (td_err > 0) * (a - self.get_action(phi)))
     self._theta += grad_theta
     self._psi += grad_psi
     self.td_err = td_err
예제 #20
0
 def _sample_v(self, h, x):
     # returns  P(v=1|h,W,b) and a sample from it
     dynamic_b = mult(x, self.a)
     v_in = self.bv + mult(h, self.w.T) + dynamic_b
     if self._gaussian:
         return v_in, v_in
     else:
         probs = Oger.utils.LogisticFunction.f(v_in)
         v = (probs > random(probs.shape)).astype(self.dtype)
         return probs, v
예제 #21
0
 def _sample_v(self, h, x):
     # returns  P(v=1|h,W,b) and a sample from it
     dynamic_b = mult(x, self.a)
     v_in = self.bv + mult(h, self.w.T) + dynamic_b
     if self._gaussian:
         return v_in, v_in
     else:
         probs = Oger.utils.LogisticFunction.f(v_in)
         v = (probs > random(probs.shape)).astype(self.dtype)
         return probs, v
예제 #22
0
 def _energy(self, v, h, x):
     ba = mult(x, self.a)
     bb = mult(x, self.b)
     ba += self.bv
     bb += self.bh
     if self._gaussian:
         return (((v - ba) ** 2).sum() / 2 - (h * bb).sum(axis=1) -
                 (mult(v, self.w) * h).sum(axis=1))
     else:
         return (-(v * ba).sum(axis=1) - (h * bb).sum(axis=1) -
                 (mult(v, self.w) * h).sum(axis=1))
예제 #23
0
 def _energy(self, v, h, x):
     ba = mult(x, self.a)
     bb = mult(x, self.b)
     ba += self.bv
     bb += self.bh
     if self._gaussian:
         return (((v - ba)**2).sum() / 2 - (h * bb).sum(axis=1) -
                 (mult(v, self.w) * h).sum(axis=1))
     else:
         return (-(v * ba).sum(axis=1) - (h * bb).sum(axis=1) -
                 (mult(v, self.w) * h).sum(axis=1))
예제 #24
0
def test_mult_diag():
    dim = 20
    d = numx_rand.random(size=(dim,))
    dd = numx.diag(d)
    mtx = numx_rand.random(size=(dim, dim))

    res1 = utils.mult(dd, mtx)
    res2 = utils.mult_diag(d, mtx, left=True)
    assert_array_almost_equal(res1, res2, 10)
    res1 = utils.mult(mtx, dd)
    res2 = utils.mult_diag(d, mtx, left=False)
    assert_array_almost_equal(res1, res2, 10)
예제 #25
0
    def _train(self, x, y):
        # initialize internal vars if necessary
        if self._xTx is None:
            x_size = self._input_dim + 1
            self._xTx = numx.zeros((x_size, x_size), self._dtype)
            self._xTy = numx.zeros((x_size, self._output_dim), self._dtype)

        x = self._add_constant(x)

        # update internal variables
        self._xTx += mult(x.T, x)
        self._xTy += mult(x.T, y)
        self._tlen += x.shape[0]
예제 #26
0
파일: pca_nodes.py 프로젝트: pmolfese/afni
    def _inverse(self, y, n=None):
        """Project 'y' to the input space using the first 'n' components.
        If 'n' is not set, use all available components."""
        if n is None:
            n = y.shape[1]
        if n > self.output_dim:
            error_str = "y has dimension %d," " should be at most %d" % (n, self.output_dim)
            raise mdp.NodeException(error_str)

        v = self.get_recmatrix()
        if n is not None:
            return mult(y, v[:n, :]) + self.avg
        return mult(y, v) + self.avg
예제 #27
0
    def get_CD_gradient(self, x, n_updates=1):
        """Use Gibbs sampling to estimate the contrastive divergence gradient.

            - x: a binary matrix having different variables on different columns and observations on the rows (concatenation of visibles and context)
            - n_updates: number of CD iterations. Default value: 1

        Returns a tuple (dw, dbv, dbh, da, db) that contains the gradients of the
        weights and the biases of the visibles and the hidden respectively and
        the autoregressive gradients da and db.
        """

        # useful quantities
        n = x.shape[0]
        v, x = self._split_data(x)
        w, a, b, bv, bh = self.w, self.a, self.b, self.bv, self.bh

        # first update of the hidden units for the data term
        ph_data, h_data = self._sample_h(v, x)
        # n updates of both v and h for the model term
        h_model = h_data.copy()
        for i in range(n_updates):
            pv_model, v_model = self._sample_v(h_model, x)
            ph_model, h_model = self._sample_h(v_model, x)

        # find dw
        data_term = mult(v.T, ph_data)
        model_term = mult(v_model.T, ph_model)
        dw = (data_term - model_term) / n

        # find da
        data_term = v
        model_term = v_model
        # Should I include the weight decay here as well?
        da = mult(x.T, data_term - model_term) / n

        # find db
        data_term = ph_data
        model_term = ph_model
        db = mult(x.T, data_term - model_term) / n

        # find dbv
        data_term = v.sum(axis=0)
        model_term = v_model.sum(axis=0)
        dbv = (data_term - model_term) / n

        # find dbh
        data_term = ph_data.sum(axis=0)
        model_term = ph_model.sum(axis=0)
        dbh = (data_term - model_term) / n

        return (dw, dbv, dbh, da, db)
예제 #28
0
    def get_CD_gradient(self, x, n_updates=1):
        """Use Gibbs sampling to estimate the contrastive divergence gradient.

            - x: a binary matrix having different variables on different columns and observations on the rows (concatenation of visibles and context)
            - n_updates: number of CD iterations. Default value: 1

        Returns a tuple (dw, dbv, dbh, da, db) that contains the gradients of the
        weights and the biases of the visibles and the hidden respectively and
        the autoregressive gradients da and db.
        """

        # useful quantities
        n = x.shape[0]
        v, x = self._split_data(x)
        w, a, b, bv, bh = self.w, self.a, self.b, self.bv, self.bh

        # first update of the hidden units for the data term
        ph_data, h_data = self._sample_h(v, x)
        # n updates of both v and h for the model term
        h_model = h_data.copy()
        for i in range(n_updates):
            pv_model, v_model = self._sample_v(h_model, x)
            ph_model, h_model = self._sample_h(v_model, x)

        # find dw
        data_term = mult(v.T, ph_data)
        model_term = mult(v_model.T, ph_model)
        dw = (data_term - model_term) / n

        # find da
        data_term = v
        model_term = v_model
        # Should I include the weight decay here as well?
        da = mult(x.T, data_term - model_term) / n

        # find db
        data_term = ph_data
        model_term = ph_model
        db = mult(x.T, data_term - model_term) / n

        # find dbv
        data_term = v.sum(axis=0)
        model_term = v_model.sum(axis=0)
        dbv = (data_term - model_term) / n

        # find dbh
        data_term = ph_data.sum(axis=0)
        model_term = ph_model.sum(axis=0)
        dbh = (data_term - model_term) / n

        return (dw, dbv, dbh, da, db)
예제 #29
0
    def _inverse(self, y, n=None):
        """Project 'y' to the input space using the first 'n' components.
        If 'n' is not set, use all available components."""
        if n is None:
            n = y.shape[1]
        if n > self.output_dim:
            error_str = ("y has dimension %d,"
                         " should be at most %d" % (n, self.output_dim))
            raise mdp.NodeException(error_str)

        v = self.get_recmatrix()
        if n is not None:
            return mult(y, v[:n, :]) + self.avg
        return mult(y, v) + self.avg
예제 #30
0
 def _get_laplacian(adj, normalize='True'):
     if normalize:
         d = adj.sum(axis=1)
         identity = mdp.numx.identity(len(d))
         mat_lapl = identity * d - adj
         osd = mdp.numx.zeros(len(d))
         for i in range(len(d)):
             if d[i] > 0:
                 osd[i] = mdp.numx.sqrt(1.0 / d[i])
         t = identity * osd
         return mult(t, mult(mat_lapl, t))
     else:
         mat_degree = mdp.numx.diag(adj.sum(axis=0))
         return mat_degree - adj
예제 #31
0
파일: learn.py 프로젝트: noverkill/isolated
def guess(input, reservoir, dirname):
	
    #print input.shape
    
    """
    pylab.plot(input)
    pylab.show()			
    pylab.figure()
    """
	
    try:
        beta = np.loadtxt(dirname + os.sep + 'beta.mat')
    except:
        return 0   #19
        
    x = reservoir.execute(input)

    #m = readout._execute(x)
    #m = mult(x, readout.beta)
    m = mult(x, beta)
        
    # find maximum place of m
    mcs = np.zeros(m.shape[1])

    for i in range(m.shape[1]):
        mc = sum(m[:,i]) / m.shape[1]
        mcs[i] = mc 

    return mcs.argmax()
예제 #32
0
 def _execute(self, x, n=None):
     """Project the input on the first 'n' principal components.
     
     :param x: The input that is to project.
     :type x: numpy.ndarray
     
     :param n: The number of first principle components to project on.
         If 'n' is not set, use all available components.
     :type n: int
     
     :return: The projected input.
     :rtype: numpy.ndarray
     """
     if n is not None:
         return mult(x, self.v[:, :n])
     return mult(x, self.v)
예제 #33
0
    def _down_pass(self, h, top_updates=0, epsilon=0.1, decay=0., momentum=0.):
        """
        top_updates -- set >0 for top node, so that it ends up sampling
                       from the prior
        """
        # TODO: check input

        pv, v = self._sample_v(h)
        for _ in range(top_updates):
            ph, h = self._sample_h(v)
            pv, v = self._sample_v(h)
            
        # reconstruct hidden state
        ph1, h1 = self._sample_h(v)
        
        # adapt generative weights
        delta = mult(v.T, (h - ph1))/v.shape[0]
        self.dw_sleep = (momentum*self.dw_sleep
                         + epsilon*(delta - decay*self.w_rec))
        self.w_rec += self.dw_sleep

        # adapt biases
        delta = (h - ph1).mean(axis=0)
        self.dbh = momentum*self.dbh + epsilon*delta
        self.bh += self.dbh
        
        return v, pv, mdp.utils.norm2(self.dbh)
예제 #34
0
    def _down_pass(self, h, top_updates=0, epsilon=0.1, decay=0.0, momentum=0.0):
        """
        top_updates -- set >0 for top node, so that it ends up sampling
                       from the prior
        """
        # TODO: check input

        pv, v = self._sample_v(h)
        for _ in range(top_updates):
            ph, h = self._sample_h(v)
            pv, v = self._sample_v(h)

        # reconstruct hidden state
        ph1, h1 = self._sample_h(v)

        # adapt generative weights
        delta = mult(v.T, (h - ph1)) / v.shape[0]
        self.dw_sleep = momentum * self.dw_sleep + epsilon * (delta - decay * self.w_rec)
        self.w_rec += self.dw_sleep

        # adapt biases
        delta = (h - ph1).mean(axis=0)
        self.dbh = momentum * self.dbh + epsilon * delta
        self.bh += self.dbh

        return v, pv, mdp.utils.norm2(self.dbh)
예제 #35
0
    def _execute(self, data, n=None):
        """ Execute learned transformation on *data*.
        
        Projects the given data to the axis of the most significant
        eigenvectors and returns the data in this lower-dimensional subspace.
        """
        # 'INITIALIZATION'
        if self.retained_channels == None:
            self.retained_channels = data.shape[1]
        if n is None:
            n = self.retained_channels
        if self.channel_names is None:
            self.channel_names = data.channel_names
        if len(self.channel_names) < self.retained_channels:
            self.retained_channels = len(self.channel_names)
            self._log(
                "To many channels chosen for the retained channels! Replaced by maximum number.",
                level=logging.CRITICAL)
        if not (self.output_dim == self.retained_channels):
            # overwrite internal output_dim variable, since it is set wrong
            self._output_dim = self.retained_channels

        # 'Real' Processing
        #projected_data = super(PCANodeWrapper, self)._execute(data, n)
        x = data.view(numpy.ndarray)
        projected_data = mult(x - self.avg, self.v[:, :self.retained_channels])

        if self.new_channels is None:
            self.new_channel_names = [
                "pca%03d" % i for i in range(projected_data.shape[1])
            ]
        return TimeSeries(projected_data, self.new_channel_names,
                          data.sampling_frequency, data.start_time,
                          data.end_time, data.name, data.marker_name)
예제 #36
0
 def _inverse(self, y):
     # counter-rotate input
     x = mult(y, self.RP.T)
     # invert whitening node if needed
     if not self.whitened:
         x = self.white.inverse(x)
     return x
예제 #37
0
    def _execute(self, x):
        #----------------------------------------------------
        # similar algorithm to that within self.stop_training()
        #  refer there for notes & comments on code
        #----------------------------------------------------
        N = self.data.shape[0]
        Nx = x.shape[0]
        W = numx.zeros((Nx, N), dtype=self.dtype)

        k, r = self.k, self.r
        d_out = self.output_dim
        Q_diag_idx = numx.arange(k)

        for row in range(Nx):
            #find nearest neighbors of x in M
            M_xi = self.data-x[row]
            nbrs = numx.argsort( (M_xi**2).sum(1) )[:k]
            M_xi = M_xi[nbrs]

            #find corrected covariance matrix Q
            Q = mult(M_xi, M_xi.T)
            if r is None and k > d_out:
                sig2 = (svd(M_xi, compute_uv=0))**2
                r = numx.sum(sig2[d_out:])
                Q[Q_diag_idx, Q_diag_idx] += r
            if r is not None:
                Q[Q_diag_idx, Q_diag_idx] += r

            #solve for weights
            w = self._refcast(numx_linalg.solve(Q , numx.ones(k)))
            w /= w.sum()
            W[row, nbrs] = w

        #multiply weights by result of SVD from training
        return numx.dot(W, self.training_projection)
예제 #38
0
 def get_value(self, phi, a=None):
     """Returns q value(s)."""
     if a is not None:
         return (phi * self._theta[:, a.ravel()].T).sum(axis=1,
                                                        keepdims=True)
     else:
         return mult(phi, self._theta)
예제 #39
0
 def _inverse(self, y):
     # counter-rotate input
     x = mult(y, self.RP.T)
     # invert whitening node if needed
     if not self.whitened:
         x = self.white.inverse(x)
     return x
예제 #40
0
    def get_quadratic_form(self, nr):
        """Return the matrix H, the vector f and the constant c of the
        quadratic form 1/2 x'Hx + f'x + c that defines the output
        of the component 'nr' of the SFA node.

        :param nr: The component 'nr' of the SFA node.

        :returns: The matrix H, the vector f and the constant c of the
            quadratic form.
        :rtype: numpy.ndarray, numpy.ndarray, float
        """
        if self.sf is None:
            self._if_training_stop_training()

        sf = self.sf[:, nr]
        c = -mult(self.avg, sf)
        n = self.input_dim
        f = sf[:n]
        h = numx.zeros((n, n), dtype=self.dtype)
        k = n
        for i in range(n):
            for j in range(n):
                if j > i:
                    h[i, j] = sf[k]
                    k = k + 1
                elif j == i:
                    h[i, j] = 2 * sf[k]
                    k = k + 1
                else:
                    h[i, j] = h[j, i]

        return QuadraticForm(h, f, c, dtype=self.dtype)
예제 #41
0
    def _execute(self, x):
        #----------------------------------------------------
        # similar algorithm to that within self.stop_training()
        #  refer there for notes & comments on code
        #----------------------------------------------------
        N = self.data.shape[0]
        Nx = x.shape[0]
        W = numx.zeros((Nx, N), dtype=self.dtype)

        k, r = self.k, self.r
        d_out = self.output_dim
        Q_diag_idx = numx.arange(k)

        for row in range(Nx):
            #find nearest neighbors of x in M
            M_xi = self.data - x[row]
            nbrs = numx.argsort((M_xi**2).sum(1))[:k]
            M_xi = M_xi[nbrs]

            #find corrected covariance matrix Q
            Q = mult(M_xi, M_xi.T)
            if r is None and k > d_out:
                sig2 = (svd(M_xi, compute_uv=0))**2
                r = numx.sum(sig2[d_out:])
                Q[Q_diag_idx, Q_diag_idx] += r
            if r is not None:
                Q[Q_diag_idx, Q_diag_idx] += r

            #solve for weights
            w = self._refcast(numx_linalg.solve(Q, numx.ones(k)))
            w /= w.sum()
            W[row, nbrs] = w

        #multiply weights by result of SVD from training
        return numx.dot(W, self.training_projection)
예제 #42
0
 def _execute(self, x, n=None):
     """Project the input on the first 'n' principal components.
     
     :param x: The input that is to project.
     :type x: numpy.ndarray
     
     :param n: The number of first principle components to project on.
         If 'n' is not set, use all available components.
     :type n: int
     
     :return: The projected input.
     :rtype: numpy.ndarray
     """
     if n is not None:
         return mult(x, self.v[:, :n])
     return mult(x, self.v)
예제 #43
0
    def _execute(self, data, n = None):
        """ Execute learned transformation on *data*.
        
        Projects the given data to the axis of the most significant
        eigenvectors and returns the data in this lower-dimensional subspace.
        """
        # 'INITIALIZATION'
        if self.retained_channels==None:
            self.retained_channels = data.shape[1]
        if n is None:
            n = self.retained_channels
        if self.channel_names is None:
            self.channel_names = data.channel_names
        if len(self.channel_names)<self.retained_channels:
            self.retained_channels = len(self.channel_names)
            self._log("To many channels chosen for the retained channels! Replaced by maximum number.",level=logging.CRITICAL)
        if not(self.output_dim==self.retained_channels):
            # overwrite internal output_dim variable, since it is set wrong
            self._output_dim = self.retained_channels

        # 'Real' Processing
        #projected_data = super(PCANodeWrapper, self)._execute(data, n)
        x = data.view(numpy.ndarray)
        projected_data = mult(x-self.avg, self.v[:, :self.retained_channels])
        
        if self.new_channels is None:
            self.new_channel_names = ["pca%03d" % i 
                                for i in range(projected_data.shape[1])]
        return TimeSeries(projected_data, self.new_channel_names,
                          data.sampling_frequency, data.start_time,
                          data.end_time, data.name, data.marker_name)
예제 #44
0
    def _sample_v(self, h, sample_l=False, concatenate=True):
        # returns  P(v=1|h,W,b), a sample from it, P(l=1|h,W,b),
        # and a sample from it

        ldim, vdim = self._labels_dim, self._visible_dim

        # activation
        a = self.bv + mult(h, self.w.T)
        av, al = a[:, :vdim], a[:, vdim:]

        # ## visible units: logistic activation
        probs_v = old_div(1.,(1. + exp(-av)))
        v = (probs_v > random(probs_v.shape)).astype('d')

        # ## label units: softmax activation
        # subtract maximum to regularize exponent
        exponent = al - rrep(al.max(axis=1), ldim)
        probs_l = exp(exponent)
        probs_l /= rrep(probs_l.sum(axis=1), ldim)

        if sample_l:
            # ?? todo: I'm sure this can be optimized
            l = numx.zeros((h.shape[0], ldim))
            for t in range(h.shape[0]):
                l[t, :] = mdp.numx_rand.multinomial(1, probs_l[t, :])
        else:
            l = probs_l.copy()

        if concatenate:
            probs = numx.concatenate((probs_v, probs_l), axis=1)
            x = numx.concatenate((v, l), axis=1)
            return probs, x
        else:
            return probs_v, probs_l, v, l
예제 #45
0
    def _sample_v(self, h, sample_l=False, concatenate=True):
        # returns  P(v=1|h,W,b), a sample from it, P(l=1|h,W,b),
        # and a sample from it

        ldim, vdim = self._labels_dim, self._visible_dim

        # activation
        a = self.bv + mult(h, self.w.T)
        av, al = a[:, :vdim], a[:, vdim:]

        # ## visible units: logistic activation
        probs_v = old_div(1., (1. + exp(-av)))
        v = (probs_v > random(probs_v.shape)).astype('d')

        # ## label units: softmax activation
        # subtract maximum to regularize exponent
        exponent = al - rrep(al.max(axis=1), ldim)
        probs_l = exp(exponent)
        probs_l /= rrep(probs_l.sum(axis=1), ldim)

        if sample_l:
            # ?? todo: I'm sure this can be optimized
            l = numx.zeros((h.shape[0], ldim))
            for t in range(h.shape[0]):
                l[t, :] = mdp.numx_rand.multinomial(1, probs_l[t, :])
        else:
            l = probs_l.copy()

        if concatenate:
            probs = numx.concatenate((probs_v, probs_l), axis=1)
            x = numx.concatenate((v, l), axis=1)
            return probs, x
        else:
            return probs_v, probs_l, v, l
예제 #46
0
    def get_quadratic_form(self, nr):
        """
        Return the matrix H, the vector f and the constant c of the
        quadratic form 1/2 x'Hx + f'x + c that defines the output
        of the component 'nr' of the SFA node.
        """
        if self.sf is None:
            self._if_training_stop_training()

        sf = self.sf[:, nr]
        c = -mult(self.avg, sf)
        n = self.input_dim
        f = sf[:n]
        h = numx.zeros((n, n), dtype=self.dtype)
        k = n
        for i in range(n):
            for j in range(n):
                if j > i:
                    h[i, j] = sf[k]
                    k = k+1
                elif j == i:
                    h[i, j] = 2*sf[k]
                    k = k+1
                else:
                    h[i, j] = h[j, i]

        return QuadraticForm(h, f, c, dtype=self.dtype)
예제 #47
0
 def _execute(self, x):
     """Return slow feature response.
     
     :return: Slow feature response.
     """
     if self.remove_mean:
         x = self.avgnode._execute(x)
     return mult(x, self.sf)
예제 #48
0
    def _execute(self, x):
        """Return slow feature response.

        :return: Slow feature response.
        """
        if self.remove_mean:
            x = self.avgnode._execute(x)
        return mult(x, self.sf)
예제 #49
0
 def _sample_v(self, h):
     # returns  P(v=1|h,W,b) and a sample from it
     v_in = self.bv + mult(h, self.w.T)
     if self._gaussian:
         return v_in, v_in
     else:
         probs = 1. / (1. + exp(-v_in))
         v = (probs > random(probs.shape)).astype(self.dtype)
         return probs, v
예제 #50
0
def test_random_rot():
    dim = 20
    tlen = 10
    for i in xrange(tlen):
        x = utils.random_rot(dim, dtype='f')
        assert x.dtype.char=='f', 'Wrong dtype'
        y = utils.mult(x.T, x)
        assert_almost_equal(numx_linalg.det(x), 1., 4)
        assert_array_almost_equal(y, numx.eye(dim), 4)
예제 #51
0
 def _sample_v(self, h):
     # returns  P(v=1|h,W,b) and a sample from it
     v_in = self.bv + mult(h, self.w.T)
     if self._gaussian:
         return v_in, v_in
     else:
         probs = 1. / (1. + exp(-v_in))
         v = (probs > random(probs.shape)).astype(self.dtype)
         return probs, v
예제 #52
0
 def _execute(self, x, n=None):
     """Compute the output of the slowest functions.
     If 'n' is an integer, then use the first 'n' slowest components."""
     if n:
         sf = self.sf[:, :n]
         bias = self._bias[:n]
     else:
         sf = self.sf
         bias = self._bias
     return mult(x, sf) - bias
예제 #53
0
 def _execute(self, x, n=None):
     """Compute the output of the slowest functions.
     If 'n' is an integer, then use the first 'n' slowest components."""
     if n:
         sf = self.sf[:, :n]
         bias = self._bias[:n]
     else:
         sf = self.sf
         bias = self._bias
     return mult(x, sf) - bias
예제 #54
0
    def _execute(self, x):
        if not self._is_initialized:
            self.initialize()

        n = x.shape[0]
        if n > 1:
            bias = numx.tile(self.b, (n, 1))
        else:
            bias = self.b
        y = self.transfer_func.f(mult(x, self.w) + bias)
        return y
예제 #55
0
파일: CPRBMNode.py 프로젝트: dfm/pyarxiv
  def _sample_v(self, h):
    # returns  P(v=n|h,W,b) and a sample from it

    # un-normalized poisson rate, l
    l = exp(self.bv + mult(h, self.w.T))
    # now we normalize it wrt length of wordvector and partition function
    l = l * self.v.sum(axis=1)[:,newaxis] / l.sum(axis=1)[:,newaxis]       

    probs = self._Ps(self.v, l)
    v = (probs > random(probs.shape)).astype(self.dtype)
    return probs, v
예제 #56
0
    def _train(self, x):
        """Update the minor components."""
        c = mult(x.T, x)
        for j in range(self.output_dim):
            v = self.v[:, j:j + 1]
            d = self.d[j]

            n = self.eps / (1 + j * 1.2)
            a = mult(c, v)
            if self.normalize:
                v = (1.5 - n) * v - n * a
            else:
                v = (1.5 - n * (d ** 2)) * v - n * a
            l = mult(v.T, v)
            c += self.gamma * mult(v, v.T) / l

            self.v[:, j:j + 1] = v
            self.d[j] = mdp.numx.sqrt(l)
            if self.normalize:
                self.v[:, j:j + 1] = old_div(v, self.d[j])
예제 #57
0
    def _execute(self, x):
        if not self._is_initialized:
            self.initialize()

        n = x.shape[0]
        if n > 1:
            bias = numx.tile(self.b, (n, 1))
        else:
            bias = self.b
        y = self.transfer_func.f(mult(x, self.w) + bias)
        return y
예제 #58
0
    def _train(self, x):
        """Update the minor components."""
        c = mult(x.T, x)
        for j in range(self.output_dim):
            v = self.v[:, j:j + 1]
            d = self.d[j]

            n = self.eps / (1 + j * 1.2)
            a = mult(c, v)
            if self.normalize:
                v = (1.5 - n) * v - n * a
            else:
                v = (1.5 - n * (d**2)) * v - n * a
            l = mult(v.T, v)
            c += self.gamma * mult(v, v.T) / l

            self.v[:, j:j + 1] = v
            self.d[j] = mdp.numx.sqrt(l)
            if self.normalize:
                self.v[:, j:j + 1] = old_div(v, self.d[j])
예제 #59
0
def _mgs(a):
    m, n = a.shape
    v = a.copy()
    r = numx.zeros((n, n))
    for i in range(n):
        r[i, i] = numx_linalg.norm(v[:, i])
        v[:, i] = v[:, i]/r[i, i]
        for j in range(i+1, n):
            r[i, j] = mult(v[:, i], v[:, j])
            v[:, j] = v[:, j] - r[i, j]*v[:, i]
    # q is v
    return v, r
예제 #60
0
def testSFANode():
    dim=10000
    freqs = [2*numx.pi*1, 2*numx.pi*5]
    t =  numx.linspace(0,1,num=dim)
    mat = numx.array([numx.sin(freqs[0]*t), numx.sin(freqs[1]*t)]).T
    mat = (old_div((mat - mean(mat[:-1,:], axis=0)), std(mat[:-1,:],axis=0)))
    des_mat = mat.copy()
    mat = mult(mat,uniform((2,2))) + uniform(2)
    sfa = mdp.nodes.SFANode()
    sfa.train(mat)
    out = sfa.execute(mat)
    correlation = old_div(mult(des_mat[:-1,:].T,out[:-1,:]),(dim - 2))
    assert sfa.get_eta_values(t=0.5) is not None, 'get_eta is None'
    assert_array_almost_equal(abs(correlation),
                              numx.eye(2), decimal-3)
    sfa = mdp.nodes.SFANode(output_dim = 1)
    sfa.train(mat)
    out = sfa.execute(mat)
    assert out.shape[1]==1, 'Wrong output_dim'
    correlation = old_div(mult(des_mat[:-1,:1].T,out[:-1,:]),(dim - 2))
    assert_array_almost_equal(abs(correlation),
                              numx.eye(1), decimal - 3)