コード例 #1
0
    def evaluate(self, g1, g2, debug=False):
        """
        Find the kernel evaluation between two graphs
        """
        #W1 is always the smallest graph
        if g1.getNumVertices() > g2.getNumVertices():
            return self.evaluate(g2, g1)

        #We ought to have something that makes the matrices the same size 
        W1, W2 = self.__getWeightMatrices(g1, g2)
        K1, K2 = self.__getKernelMatrices(g1, g2)

        #Find common eigenspace
        S1, U = numpy.linalg.eigh(self.tau*W1 + (1-self.tau)*K1)
        S2, V = numpy.linalg.eigh(self.tau*W2 + (1-self.tau)*K2)

        #Find appoximate diagonals
        SK1 = numpy.diag(Util.mdot(U.T, K1, U))
        SW1 = numpy.diag(Util.mdot(U.T, W1, U))
        SK2 = numpy.diag(Util.mdot(V.T, K2, V))
        SW2 = numpy.diag(Util.mdot(V.T, W2, V))

        evaluation = self.tau * numpy.dot(SW1, SW2) + (1-self.tau)*numpy.dot(SK1, SK2)
        
        if debug:
            P = numpy.dot(V, U.T)
            f = self.getObjectiveValue(self.tau, P, g1, g2)
            return (evaluation, f, P, SW1, SW2, SK1, SK2)
        else:
            return evaluation
コード例 #2
0
ファイル: PrimalDualCCA.py プロジェクト: rezaarmand/sandbox
    def learnModel(self, X, Y):
        """
        Learn the CCA primal-dual directions.
        """
        self.trainX = X
        self.trainY = Y

        numExamples = X.shape[0]
        numFeatures = Y.shape[1]

        a = 10**-5
        I = numpy.eye(numExamples)
        I2 = numpy.eye(numFeatures)
        Kx = self.kernelX.evaluate(X, X) + a * I
        Kxx = numpy.dot(Kx, Kx)
        Kxy = numpy.dot(Kx, Y)
        Cyy = numpy.dot(Y.T, Y) + a * I2

        Z1 = numpy.zeros((numExamples, numExamples))
        Z2 = numpy.zeros((numFeatures, numFeatures))
        Z3 = numpy.zeros((numExamples, numFeatures))

        #Note we add a small value to the diagonal of A and B to deal with low-rank
        A = numpy.c_[Z1, Kxy]
        A1 = numpy.c_[Kxy.T, Z2]
        A = numpy.r_[A, A1]
        A = (A + A.T) / 2  #Stupid stupidness

        B = numpy.c_[(1 - self.tau1) * Kxx - self.tau1 * Kx, Z3]
        B1 = numpy.c_[Z3.T, (1 - self.tau2) * Cyy - self.tau2 * I2]
        B = numpy.r_[B, B1]
        B = (B + B.T) / 2

        (D, W) = scipy.linalg.eig(A, B)

        #Only select eigenvalues which are greater than zero
        W = W[:, D > 0]

        #We need to return those eigenvectors corresponding to positive eigenvalues
        self.alpha = W[0:numExamples, :]
        self.V = W[numExamples:, :]
        self.lmbdas = D[D > 0]

        alphaDiag = Util.mdot(self.alpha.T, Kxx, self.alpha)
        alphaDiag = alphaDiag + numpy.array(alphaDiag < 0, numpy.int)
        vDiag = Util.mdot(self.V.T, Cyy, self.V)
        vDiag = vDiag + numpy.array(vDiag < 0, numpy.int)
        self.alpha = numpy.dot(
            self.alpha, numpy.diag(1 / numpy.sqrt(numpy.diag(alphaDiag))))
        self.V = numpy.dot(self.V,
                           numpy.diag(1 / numpy.sqrt(numpy.diag(vDiag))))

        return self.alpha, self.V, self.lmbdas
コード例 #3
0
ファイル: PrimalDualCCA.py プロジェクト: charanpald/sandbox
    def learnModel(self, X, Y):
        """
        Learn the CCA primal-dual directions.
        """
        self.trainX = X
        self.trainY = Y

        numExamples = X.shape[0]
        numFeatures = Y.shape[1]

        a = 10**-5
        I = numpy.eye(numExamples)
        I2 = numpy.eye(numFeatures)
        Kx = self.kernelX.evaluate(X, X) + a*I
        Kxx = numpy.dot(Kx, Kx)
        Kxy = numpy.dot(Kx, Y) 
        Cyy = numpy.dot(Y.T, Y) + a*I2

        Z1 = numpy.zeros((numExamples, numExamples))
        Z2 = numpy.zeros((numFeatures, numFeatures))
        Z3 = numpy.zeros((numExamples, numFeatures))

        #Note we add a small value to the diagonal of A and B to deal with low-rank
        A = numpy.c_[Z1, Kxy]
        A1 = numpy.c_[Kxy.T, Z2]
        A = numpy.r_[A, A1]
        A = (A+A.T)/2 #Stupid stupidness 

        B = numpy.c_[(1-self.tau1)*Kxx - self.tau1*Kx, Z3]
        B1 = numpy.c_[Z3.T, (1-self.tau2)*Cyy - self.tau2*I2]
        B = numpy.r_[B, B1]
        B = (B+B.T)/2

        (D, W) = scipy.linalg.eig(A, B)

        #Only select eigenvalues which are greater than zero
        W = W[:, D>0]

        #We need to return those eigenvectors corresponding to positive eigenvalues
        self.alpha = W[0:numExamples, :]
        self.V = W[numExamples:, :]
        self.lmbdas = D[D>0]

        alphaDiag = Util.mdot(self.alpha.T, Kxx, self.alpha)
        alphaDiag = alphaDiag + numpy.array(alphaDiag < 0, numpy.int)
        vDiag = Util.mdot(self.V.T, Cyy, self.V)
        vDiag = vDiag + numpy.array(vDiag < 0, numpy.int)
        self.alpha = numpy.dot(self.alpha, numpy.diag(1/numpy.sqrt(numpy.diag(alphaDiag))))
        self.V = numpy.dot(self.V, numpy.diag(1/numpy.sqrt(numpy.diag(vDiag))))

        return self.alpha, self.V, self.lmbdas
コード例 #4
0
    def testCenter(self):
        numExamples = 10
        numFeatures = 30
        X = numpy.random.rand(numExamples, numFeatures)
        K = numpy.dot(X, X.T)

        kernel = LinearKernel()
        c = 10 

        sparseCenterer = SparseCenterer()
        KTilde = sparseCenterer.centerArray(X, kernel, c)

        j = numpy.ones((numExamples, 1))

        KTilde2 = K - Util.mdot(j, j.T, K)/numExamples - Util.mdot(K, j, j.T)/numExamples + Util.mdot(j.T, K, j)*numpy.ones((numExamples, numExamples))/(numExamples**2)

        self.assertTrue(numpy.linalg.norm(KTilde-KTilde2) < 0.001)

        #Now test low rank case
        c = 8 
        KTilde = sparseCenterer.centerArray(X, kernel, c)
コード例 #5
0
    def learnModel(self, KHat, K, k):
        """
        A function to learn missing data based on Khat (partial kernel) and K
        (full kernel). 
        """

        numExamples = KHat.shape[0]
        Z = numpy.zeros((numExamples, numExamples))

        Ki = K.copy()
        alpha = numpy.zeros((numExamples, k))
        beta = numpy.zeros((numExamples, k))
        lmdba = numpy.zeros(k)
        a = 0.1

        for i in range(k):
            A = numpy.dot(numpy.dot(KHat, Ki), KHat)
            KHatSq = numpy.dot(KHat, KHat)
            KHatSqInv = numpy.linalg.inv(KHatSq)

            [D, V] = numpy.linalg.eig(numpy.dot(KHatSqInv, A))

            lmdba[i] = numpy.sqrt(D[0])
            alpha[:, i] = V[0:numExamples, 0]
            beta[:, i] = numpy.dot(KHat, alpha[:, i])

            #alpha[:, i] = alpha[:, i]/math.sqrt(Util.mdot((alpha[:, i].T, KHatSq, alpha[:, i])))

            #Note: Check this
            beta[:, i] = beta[:, i] / numpy.sqrt(
                Util.mdot(beta[:, i].T, KHat, beta[:, i]))

            #Deflate Ki
            #print(Ki)
            Ki = Ki - Util.mdot(Ki, beta[:, i], beta[:, i].T,
                                Ki.T) / Util.mdot(beta[:, i].T, Ki, beta[:, i])

            #print(Ki)

        return alpha, beta
コード例 #6
0
    def testEvaluate2(self):
        tau = 1.0
        linearKernel = LinearKernel()

        graphKernel = PermutationGraphKernel(tau, linearKernel)

        (evaluation, f, P, SW1, SW2, SK1, SK2) = graphKernel.evaluate(self.sGraph1, self.sGraph3, True)

        W1 = self.sGraph1.getWeightMatrix()
        W2 = self.sGraph3.getWeightMatrix()

        self.assertTrue(numpy.linalg.norm(Util.mdot(P, W1, P.T)-W2) <= self.tol)
        self.assertAlmostEquals(f, 0, 7)
コード例 #7
0
    def learnModel(self, KHat, K, k):
        """
        A function to learn missing data based on Khat (partial kernel) and K
        (full kernel). 
        """

        numExamples = KHat.shape[0]
        Z = numpy.zeros((numExamples, numExamples))

        Ki = K.copy()
        alpha = numpy.zeros((numExamples, k))
        beta = numpy.zeros((numExamples, k))
        lmdba = numpy.zeros(k)
        a = 0.1

        for i in range(k):
            A = numpy.dot(numpy.dot(KHat, Ki), KHat)
            KHatSq = numpy.dot(KHat, KHat)
            KHatSqInv = numpy.linalg.inv(KHatSq)

            [D, V] = numpy.linalg.eig(numpy.dot(KHatSqInv, A))

            lmdba[i] = numpy.sqrt(D[0])
            alpha[:, i] = V[0:numExamples, 0]
            beta[:, i] = numpy.dot(KHat, alpha[:, i])

            #alpha[:, i] = alpha[:, i]/math.sqrt(Util.mdot((alpha[:, i].T, KHatSq, alpha[:, i])))

            #Note: Check this 
            beta[:, i] = beta[:, i]/numpy.sqrt(Util.mdot(beta[:, i].T, KHat, beta[:, i]))

            #Deflate Ki
            #print(Ki)
            Ki = Ki - Util.mdot(Ki, beta[:, i], beta[:, i].T, Ki.T)/Util.mdot(beta[:, i].T, Ki, beta[:, i])

            #print(Ki)
            
        return alpha, beta 
コード例 #8
0
    def testCenter(self):
        numExamples = 10
        numFeatures = 30
        X = numpy.random.rand(numExamples, numFeatures)
        K = numpy.dot(X, X.T)

        kernel = LinearKernel()
        c = 10

        sparseCenterer = SparseCenterer()
        KTilde = sparseCenterer.centerArray(X, kernel, c)

        j = numpy.ones((numExamples, 1))

        KTilde2 = K - Util.mdot(j, j.T, K) / numExamples - Util.mdot(
            K, j, j.T) / numExamples + Util.mdot(j.T, K, j) * numpy.ones(
                (numExamples, numExamples)) / (numExamples**2)

        self.assertTrue(numpy.linalg.norm(KTilde - KTilde2) < 0.001)

        #Now test low rank case
        c = 8
        KTilde = sparseCenterer.centerArray(X, kernel, c)
コード例 #9
0
    def testEvaluate2(self):
        tau = 1.0
        linearKernel = LinearKernel()

        graphKernel = PermutationGraphKernel(tau, linearKernel)

        (evaluation, f, P, SW1, SW2, SK1,
         SK2) = graphKernel.evaluate(self.sGraph1, self.sGraph3, True)

        W1 = self.sGraph1.getWeightMatrix()
        W2 = self.sGraph3.getWeightMatrix()

        self.assertTrue(
            numpy.linalg.norm(Util.mdot(P, W1, P.T) - W2) <= self.tol)
        self.assertAlmostEquals(f, 0, 7)
コード例 #10
0
    def centerArray(self, X, kernel, c, returnAlpha=False):
        """
        A method to center a kernel matrix.
        """
        #Note the following check does not ensure the inverse exists
        Parameter.checkInt(c, 0, X.shape[0])
        numExamples = X.shape[0]
        K = kernel.evaluate(X, X)
        j = numpy.ones((numExamples, 1))

        #inds = self.__chooseRandomIndices(X, kernel, c)
        inds = self.__chooseIndices(X, kernel, c)
        epsilon = 0.01

        alphaT = Util.mdot(numpy.linalg.inv(K[numpy.ix_(inds, inds)] + epsilon*numpy.eye(c)), K[inds, :], j)/numExamples

        KTilde = K - Util.mdot(j, alphaT.T, K[inds, :]) -  Util.mdot(K[:, inds], alphaT, j.T) + Util.mdot(alphaT.T, K[numpy.ix_(inds, inds)], alphaT)*numpy.ones((numExamples, numExamples))

        if returnAlpha:
            alpha = numpy.zeros((numExamples, 1))
            alpha[inds, :] = alphaT
            return KTilde, alpha
        else:
            return KTilde 
コード例 #11
0
    def learnModel(self, X, Y):
        """
        Learn the weight matrix which matches X and Y.
        """
        Parameter.checkClass(X, numpy.ndarray)
        Parameter.checkClass(Y, numpy.ndarray)
        Parameter.checkInt(X.shape[0], 1, float('inf'))
        Parameter.checkInt(X.shape[1], 1, float('inf'))

        self.pdcca = PrimalDualCCA(self.kernel, self.tau1, self.tau2)
        alpha, V, lmbdas = self.pdcca.learnModel(X, Y)

        a = 10**-5
        I = numpy.eye(V.shape[0])
        VV = numpy.dot(V, V.T) + a*I

        self.A = Util.mdot(alpha, V.T, numpy.linalg.inv(VV))
        self.X = X

        return self.A
コード例 #12
0
ファイル: PrimalDualCCATest.py プロジェクト: kentwang/sandbox
    def testGetY(self):
        # Test if we can recover Y from X
        numExamples = 10
        numFeatures = 5
        X = numpy.random.rand(numExamples, numFeatures)

        Z = numpy.random.rand(numFeatures, numFeatures)
        ZZ = numpy.dot(Z.T, Z)
        (D, W) = scipy.linalg.eig(ZZ)
        Y = numpy.dot(X, W)

        tol = 10 ** --6
        tau = 0.1
        kernel = LinearKernel()
        cca = PrimalDualCCA(kernel, tau, tau)
        alpha, V, lmbdas = cca.learnModel(X, Y)
        Kx = numpy.dot(X, X.T)

        Yhat = Util.mdot(Kx, alpha, V.T, numpy.linalg.inv(numpy.dot(V, V.T)))
        self.assertTrue(numpy.linalg.norm(Yhat - Y) < tol)
コード例 #13
0
    def learnModel(self, X, Y):
        """
        Learn the weight matrix which matches X and Y.
        """
        Parameter.checkClass(X, numpy.ndarray)
        Parameter.checkClass(Y, numpy.ndarray)
        Parameter.checkInt(X.shape[0], 1, float('inf'))
        Parameter.checkInt(X.shape[1], 1, float('inf'))

        self.pdcca = PrimalDualCCA(self.kernel, self.tau1, self.tau2)
        alpha, V, lmbdas = self.pdcca.learnModel(X, Y)

        a = 10**-5
        I = numpy.eye(V.shape[0])
        VV = numpy.dot(V, V.T) + a * I

        self.A = Util.mdot(alpha, V.T, numpy.linalg.inv(VV))
        self.X = X

        return self.A
コード例 #14
0
    def testGetY(self):
        #Test if we can recover Y from X
        numExamples = 10
        numFeatures = 5
        X = numpy.random.rand(numExamples, numFeatures)

        
        Z = numpy.random.rand(numFeatures, numFeatures)
        ZZ = numpy.dot(Z.T, Z)
        (D, W) = scipy.linalg.eig(ZZ)
        Y = numpy.dot(X, W)

        tol = 10**--6
        tau = 0.1
        kernel = LinearKernel()
        cca = PrimalDualCCA(kernel, tau, tau)
        alpha, V, lmbdas = cca.learnModel(X, Y)
        Kx = numpy.dot(X, X.T)

        Yhat = Util.mdot(Kx, alpha, V.T, numpy.linalg.inv(numpy.dot(V, V.T)))
        self.assertTrue(numpy.linalg.norm(Yhat- Y) < tol)
コード例 #15
0
    def getObjectiveValue(self, tau, P, g1, g2):
        W1, W2 = self.__getWeightMatrices(g1, g2)
        K1, K2 = self.__getKernelMatrices(g1, g2)

        f = tau * numpy.linalg.norm(Util.mdot(P, W1, P.T) - W2) + (1-tau)* numpy.linalg.norm(Util.mdot(P, K1, P.T) - K2)
        return f 
コード例 #16
0
ファイル: CenteringError.py プロジェクト: charanpald/sandbox
print(("Norm of the shift vector: " + str(numpy.linalg.norm(centerVector))))
X = X + centerVector
print(("Worst case of centering " + str(numpy.sqrt(numpy.sum(X**2)/numExamples))))

K = numpy.dot(X, X.T)

#Now, let's center the data using standard centering 
preprocessor = Standardiser()
Xc1 = preprocessor.centreArray(X)
Kc1 = numpy.dot(Xc1, Xc1.T)
alpha1 = j/numExamples

linearKernel = LinearKernel()
sparseCenterer = SparseCenterer()

error1 = numpy.trace(K)/numExamples - 2*Util.mdot(j.T, K, alpha1)/numExamples + Util.mdot(alpha1.T, K, alpha1)
#error1 = numpy.sqrt(error1)

step = int(numpy.floor(numExamples/20))
cs = numpy.array(list(range(step, numExamples, step)))
error2s = numpy.ones(cs.shape[0])

for i in range(cs.shape[0]):
    c = cs[i]
    Kc2, alpha2 = sparseCenterer.centerArray(X, linearKernel, c, True)
    #print(alpha2)
    error2s[i] = numpy.trace(K)/numExamples - 2*Util.mdot(j.T, K, alpha2)/numExamples + Util.mdot(alpha2.T, K, alpha2)
    #error2s[i] = numpy.sqrt(error2s[i])
    
print(error1)
print(cs)
コード例 #17
0
ファイル: CenteringError.py プロジェクト: rezaarmand/sandbox
X = X + centerVector
print(("Worst case of centering " +
       str(numpy.sqrt(numpy.sum(X**2) / numExamples))))

K = numpy.dot(X, X.T)

#Now, let's center the data using standard centering
preprocessor = Standardiser()
Xc1 = preprocessor.centreArray(X)
Kc1 = numpy.dot(Xc1, Xc1.T)
alpha1 = j / numExamples

linearKernel = LinearKernel()
sparseCenterer = SparseCenterer()

error1 = numpy.trace(K) / numExamples - 2 * Util.mdot(
    j.T, K, alpha1) / numExamples + Util.mdot(alpha1.T, K, alpha1)
#error1 = numpy.sqrt(error1)

step = int(numpy.floor(numExamples / 20))
cs = numpy.array(list(range(step, numExamples, step)))
error2s = numpy.ones(cs.shape[0])

for i in range(cs.shape[0]):
    c = cs[i]
    Kc2, alpha2 = sparseCenterer.centerArray(X, linearKernel, c, True)
    #print(alpha2)
    error2s[i] = numpy.trace(K) / numExamples - 2 * Util.mdot(
        j.T, K, alpha2) / numExamples + Util.mdot(alpha2.T, K, alpha2)
    #error2s[i] = numpy.sqrt(error2s[i])

print(error1)