def autocorr(x):
     """
     Computes the ( normalised) auto-correlation function of a
     one dimensional sequence of numbers.
     
     Utilises the numpy correlate function that is based on an efficient
     convolution implementation.
     
     Inputs:
     x - one dimensional numpy array
     
     Outputs:
     Vector of autocorrelation values for a lag from zero to max possible
     """
     
     GenericTests.check_type(x, "x", np.ndarray, 1)
     
     # normalise, compute norm
     xunbiased = x - np.mean(x)
     xnorm = np.sum(xunbiased ** 2)
     
     # convolve with itself
     acor = np.correlate(xunbiased, xunbiased, mode='same')
     
     # use only second half, normalise
     acor = acor[len(acor) / 2:] / xnorm
     
     return acor
Example #2
0
    def weights_from_patterns(P):
        """
        Computes a weight matrix so that the network has stationary points at a
        number of given patterns,
        
        W_{ij} = \sum_{i=1}^n (2 p_i^n - 1)(2 p_j^n - 1) and
        W_{ii} = 0
        
        where p_i^n is the i-th bit of the n-th pattern vector given in P.
        (Note the conversion from {0,1} to {-1,+1} via 2x-1.)) 
        
        (Bias should be set to zero.)
        """
        GenericTests.check_type(P, "P", numpy.ndarray, required_shapelen=2)
        
        dim = P.shape[1]
        n = P.shape[0]
        
        if n <= 0:
            raise ValueError("Need at least one pattern.")

        # train network
        W = zeros((dim, dim))
        for i in range(dim):
            for j in range(i, dim):
                for mu in range(n):
                    W[i, j] += (2 * P[mu][i] - 1) * (2 * P[mu][j] - 1)
                
                # W[i,j] /= n
                W[j, i] = W[i, j]
        
        fill_diagonal(W, 0)
        return W
Example #3
0
    def weights_from_patterns(P):
        """
        Computes a weight matrix so that the network has stationary points at a
        number of given patterns,
        
        W_{ij} = \sum_{i=1}^n (2 p_i^n - 1)(2 p_j^n - 1) and
        W_{ii} = 0
        
        where p_i^n is the i-th bit of the n-th pattern vector given in P.
        (Note the conversion from {0,1} to {-1,+1} via 2x-1.)) 
        
        (Bias should be set to zero.)
        """
        GenericTests.check_type(P, "P", numpy.ndarray, required_shapelen=2)

        dim = P.shape[1]
        n = P.shape[0]

        if n <= 0:
            raise ValueError("Need at least one pattern.")

        # train network
        W = zeros((dim, dim))
        for i in range(dim):
            for j in range(i, dim):
                for mu in range(n):
                    W[i, j] += (2 * P[mu][i] - 1) * (2 * P[mu][j] - 1)

                # W[i,j] /= n
                W[j, i] = W[i, j]

        fill_diagonal(W, 0)
        return W
 def kernel(self, X, Y=None):
     
     GenericTests.check_type(X,'X',np.ndarray,2)
     if Y is None:
         Y=X
            
     nX=np.shape(X)[0]
     nY=np.shape(Y)[0]
     K=np.zeros((nX,nY))
     ii=0        
     for x in X:
         jj=0
         for y in Y:
             Ax,Bx=self.formVARmatrices(x)
             degx=np.shape(Bx)[1]
             Ay,By=self.formVARmatrices(y)
             degy=np.shape(By)[1]
             deltaMat = np.diag(np.concatenate((0.5*np.ones(degx)/degx,0.5*np.ones(degy)/degy)))
             A=np.concatenate((Ax,Ay),axis=1)
             B=np.concatenate((Bx,By),axis=1)
             Adel = A.dot(deltaMat)
             AdelAT = Adel.dot(A.T) 
             foo=np.linalg.solve(AdelAT+np.eye(self.p),Adel)
             precomputedMat=deltaMat-(deltaMat.dot(A.T)).dot(foo) 
             _,first_term = np.linalg.slogdet(AdelAT+np.eye(self.p))
             second_term = (B.dot(precomputedMat)).dot(B.T)+1.
             K[ii,jj]+= -(1-self.alpha)*first_term-self.alpha*second_term
             jj+=1
         ii+=1
     return np.exp(-0.5*K/(self.sigma**2.))
Example #5
0
    def autocorr(x):
        """
        Computes the ( normalised) auto-correlation function of a
        one dimensional sequence of numbers.
        
        Utilises the numpy correlate function that is based on an efficient
        convolution implementation.
        
        Inputs:
        x - one dimensional numpy array
        
        Outputs:
        Vector of autocorrelation values for a lag from zero to max possible
        """

        GenericTests.check_type(x, "x", np.ndarray, 1)

        # normalise, compute norm
        xunbiased = x - np.mean(x)
        xnorm = np.sum(xunbiased**2)

        # convolve with itself
        acor = np.correlate(xunbiased, xunbiased, mode='same')

        # use only second half, normalise
        acor = acor[len(acor) / 2:] / xnorm

        return acor
    def kernel(self, X, Y=None):

        GenericTests.check_type(X, 'X', np.ndarray, 2)
        if Y is None:
            Y = X

        nX = np.shape(X)[0]
        nY = np.shape(Y)[0]
        K = np.zeros((nX, nY))
        ii = 0
        for x in X:
            jj = 0
            for y in Y:
                Ax, Bx = self.formVARmatrices(x)
                degx = np.shape(Bx)[1]
                Ay, By = self.formVARmatrices(y)
                degy = np.shape(By)[1]
                deltaMat = np.diag(
                    np.concatenate((0.5 * np.ones(degx) / degx,
                                    0.5 * np.ones(degy) / degy)))
                A = np.concatenate((Ax, Ay), axis=1)
                B = np.concatenate((Bx, By), axis=1)
                Adel = A.dot(deltaMat)
                AdelAT = Adel.dot(A.T)
                foo = np.linalg.solve(AdelAT + np.eye(self.p), Adel)
                precomputedMat = deltaMat - (deltaMat.dot(A.T)).dot(foo)
                _, first_term = np.linalg.slogdet(AdelAT + np.eye(self.p))
                second_term = (B.dot(precomputedMat)).dot(B.T) + 1.
                K[ii,
                  jj] += -(1 -
                           self.alpha) * first_term - self.alpha * second_term
                jj += 1
            ii += 1
        return np.exp(-0.5 * K / (self.sigma**2.))
Example #7
0
 def __init__(self, rho, nu=1.5, sigma=1.0):
     Kernel.__init__(self)
     #GenericTests.check_type(rho,'rho',float)
     GenericTests.check_type(nu,'nu',float)
     GenericTests.check_type(sigma,'sigma',float)
     
     self.rho = rho
     self.nu = nu
     self.sigma = sigma
Example #8
0
    def __init__(self, rho, nu=1.5, sigma=1.0):
        Kernel.__init__(self)
        #GenericTests.check_type(rho,'rho',float)
        GenericTests.check_type(nu, 'nu', float)
        GenericTests.check_type(sigma, 'sigma', float)

        self.rho = rho
        self.nu = nu
        self.sigma = sigma
Example #9
0
 def log_pdf(self, X):
     GenericTests.check_type(X, 'X', numpy.ndarray, 2)
     # this also enforces correct data ranges
     if X.dtype != numpy.bool8:
         raise ValueError("X must be a bool8 numpy array")
         
     if not X.shape[1] == self.dimension:
         raise ValueError("Dimension of X does not match own dimension")
         
     result = zeros(len(X))
     for i in range(len(X)):
         result[i] = inner(X[i], self.bias + self.W.dot(X[i]))
     return result
 def log_pdf(self, X):
     GenericTests.check_type(X,'X',numpy.ndarray,2)
     # this also enforce correct data ranges
     if X.dtype != numpy.bool8:
         raise ValueError("X must be a bool8 numpy array")
         
     if not X.shape[1] == self.dimension:
         raise ValueError("Dimension of X does not match own dimension")
         
     result = zeros(len(X))
     for i in range(len(X)):
         result[i]= inner(self.biasx,X[i])+ sum([logaddexp(0,inner(self.W[j],X[i])+self.biash[j]) for j in range(self.num_hidden_units)])
     return result
Example #11
0
    def log_pdf(self, X):
        GenericTests.check_type(X, 'X', numpy.ndarray, 2)
        # this also enforces correct data ranges
        if X.dtype != numpy.bool8:
            raise ValueError("X must be a bool8 numpy array")

        if not X.shape[1] == self.dimension:
            raise ValueError("Dimension of X does not match own dimension")

        result = zeros(len(X))
        for i in range(len(X)):
            result[i] = inner(X[i], self.bias + self.W.dot(X[i]))
        return result
Example #12
0
 def kernel(self, X, Y=None):
     
     GenericTests.check_type(X,'X',np.ndarray,2)
     # if X=Y, use more efficient pdist call which exploits symmetry
     normX=reshape(np.linalg.norm(X,axis=1),(len(X),1))
     if Y is None:
         dists = squareform(pdist(X, 'euclidean'))
         normY=normX.T
     else:
         GenericTests.check_type(Y,'Y',np.ndarray,2)
         assert(shape(X)[1]==shape(Y)[1])
         normY=reshape(np.linalg.norm(Y,axis=1),(1,len(Y)))
         dists = cdist(X, Y, 'euclidean')
     K=0.5*(normX**self.alpha+normY**self.alpha-dists**self.alpha)
     return K
    def log_pdf(self, X):
        GenericTests.check_type(X, 'X', numpy.ndarray, 2)
        # this also enforce correct data ranges
        if X.dtype != numpy.bool8:
            raise ValueError("X must be a bool8 numpy array")

        if not X.shape[1] == self.dimension:
            raise ValueError("Dimension of X does not match own dimension")

        result = zeros(len(X))
        for i in range(len(X)):
            result[i] = inner(self.biasx, X[i]) + sum([
                logaddexp(0,
                          inner(self.W[j], X[i]) + self.biash[j])
                for j in range(self.num_hidden_units)
            ])
        return result
Example #14
0
 def __init__(self, W, bias):
     GenericTests.check_type(W, 'W', numpy.ndarray, 2)
     GenericTests.check_type(bias, 'bias', numpy.ndarray, 1)
     
     if not W.shape[0] == W.shape[1]:
         raise ValueError("W must be square")
     
     if not bias.shape[0] == W.shape[0]:
         raise ValueError("dimensions of W and bias must agree")
     
     if not all(diag(W) == 0):
         raise ValueError("W must have zeros along the diagonal")
     
     if not allclose(W, W.T):
         raise ValueError("W must be symmetric")
     
     Distribution.__init__(self, W.shape[0])
     
     self.W = W
     self.bias = bias
Example #15
0
 def kernel(self, X, Y=None):
     
     GenericTests.check_type(X,'X',numpy.ndarray,2)
     # if X=Y, use more efficient pdist call which exploits symmetry
     if Y is None:
         dists = squareform(pdist(X, 'euclidean'))
     else:
         GenericTests.check_type(Y,'Y',numpy.ndarray,2)
         assert(shape(X)[1]==shape(Y)[1])
         dists = cdist(X, Y, 'euclidean')
     if self.nu==0.5:
         #for nu=1/2, Matern class corresponds to Ornstein-Uhlenbeck Process
         K = (self.sigma**2.) * exp( -dists / self.rho )                 
     elif self.nu==1.5:
         K = (self.sigma**2.) * (1+ sqrt(3.)*dists / self.rho) * exp( -sqrt(3.)*dists / self.rho )
     elif self.nu==2.5:
         K = (self.sigma**2.) * (1+ sqrt(5.)*dists / self.rho + 5.0*(dists**2.) / (3.0*self.rho**2.) ) * exp( -sqrt(5.)*dists / self.rho )
     else:
         raise NotImplementedError()
     return K
Example #16
0
    def __init__(self, W, bias):
        GenericTests.check_type(W, 'W', numpy.ndarray, 2)
        GenericTests.check_type(bias, 'bias', numpy.ndarray, 1)

        if not W.shape[0] == W.shape[1]:
            raise ValueError("W must be square")

        if not bias.shape[0] == W.shape[0]:
            raise ValueError("dimensions of W and bias must agree")

        if not all(diag(W) == 0):
            raise ValueError("W must have zeros along the diagonal")

        if not allclose(W, W.T):
            raise ValueError("W must be symmetric")

        Distribution.__init__(self, W.shape[0])

        self.W = W
        self.bias = bias
Example #17
0
    def kernel(self, X, Y=None):

        GenericTests.check_type(X, 'X', numpy.ndarray, 2)
        # if X=Y, use more efficient pdist call which exploits symmetry
        if Y is None:
            dists = squareform(pdist(X, 'euclidean'))
        else:
            GenericTests.check_type(Y, 'Y', numpy.ndarray, 2)
            assert (shape(X)[1] == shape(Y)[1])
            dists = cdist(X, Y, 'euclidean')
        if self.nu == 0.5:
            #for nu=1/2, Matern class corresponds to Ornstein-Uhlenbeck Process
            K = (self.sigma**2.) * exp(-dists / self.rho)
        elif self.nu == 1.5:
            K = (self.sigma**2.) * (1 + sqrt(3.) * dists / self.rho) * exp(
                -sqrt(3.) * dists / self.rho)
        elif self.nu == 2.5:
            K = (self.sigma**2.) * (1 + sqrt(5.) * dists / self.rho + 5.0 *
                                    (dists**2.) / (3.0 * self.rho**2.)) * exp(
                                        -sqrt(5.) * dists / self.rho)
        else:
            raise NotImplementedError()
        return K
 def __init__(self, W, biasx, biash):
     GenericTests.check_type(W,'W',numpy.ndarray,2)
     GenericTests.check_type(biasx,'biasx',numpy.ndarray,1)
     GenericTests.check_type(biash,'biash',numpy.ndarray,1)
     if not biash.shape[0]==W.shape[0]:
         raise ValueError("dimensions of W and biash must agree along # of hidden units")
     if not biasx.shape[0]==W.shape[1]:
         raise ValueError("dimensions of W and biasx must agree along # of visible units")
     
     Distribution.__init__(self, W.shape[1])
     
     self.W = W
     self.biasx = biasx
     self.biash = biash
     self.num_hidden_units = W.shape[0]
 def __init__(self, mu, spread, N=3):
     GenericTests.check_type(mu, 'mu', numpy.ndarray, 1)
     GenericTests.check_type(spread, 'spread', float)
     GenericTests.check_type(N, 'N', int)
     
     if mu.dtype != numpy.bool8:
         raise ValueError("Mean must be a bool8 numpy array")
     
     Distribution.__init__(self, len(mu))
     
     
     if not (spread > 0. and spread < 1.):
         raise ValueError("Spread must be a probability")
     
     self.mu = mu
     self.spread = spread
     self.N = N
    def __init__(self, W, biasx, biash):
        GenericTests.check_type(W, 'W', numpy.ndarray, 2)
        GenericTests.check_type(biasx, 'biasx', numpy.ndarray, 1)
        GenericTests.check_type(biash, 'biash', numpy.ndarray, 1)
        if not biash.shape[0] == W.shape[0]:
            raise ValueError(
                "dimensions of W and biash must agree along # of hidden units")
        if not biasx.shape[0] == W.shape[1]:
            raise ValueError(
                "dimensions of W and biasx must agree along # of visible units"
            )

        Distribution.__init__(self, W.shape[1])

        self.W = W
        self.biasx = biasx
        self.biash = biash
        self.num_hidden_units = W.shape[0]
Example #21
0
 def __init__(self, alpha=1.0):
     Kernel.__init__(self)
     GenericTests.check_type(alpha,'alpha',float)
     
     self.alpha = alpha