def kernel_matrix(X, kernel, n1, n2): (n, d) = X.shape assert n == n1 + n2 K = mat.zeros((n, n)) for i in xrange(n): for j in xrange(i + 1): K[i, j] = kernel(X[i, :], X[j, :]) K[j, i] = K[i, j] U1 = mat.sum(K[0:n1, :], 0) / n1 U2 = mat.sum(K[n1:n, :], 0) / n2 U1m = mat.tile(U1, (n1, 1)) U2m = mat.tile(U2, (n2, 1)) U = mat.bmat('U1m; U2m') m1m1 = mat.sum(K[0:n1, 0:n1]) / (n1 * n1) m1m2 = mat.sum(K[0:n1, n1:n]) / (n1 * n2) m2m2 = mat.sum(K[n1:n, n1:n]) / (n2 * n2) mumu = mat.zeros((n, n)) mumu[0:n1, 0:n1] = m1m1 mumu[0:n1, n1:n] = m1m2 mumu[n1:n, 0:n1] = m1m2 mumu[n1:n, n1:n] = m2m2 Kcu = K - U Kuc = Kcu.T N = mat.ones((n, n)) / n Kc = K - U - U.T + mumu return (K, Kuc, Kc)
def kernel_matrix(X, kernel, n1, n2): (n, d) = X.shape assert n == n1 + n2 K = mat.zeros((n,n)) for i in xrange(n): for j in xrange(i+1): K[i,j] = kernel(X[i,:], X[j,:]) K[j,i] = K[i,j] U1 = mat.sum(K[0:n1,:],0) / n1 U2 = mat.sum(K[n1:n,:],0) / n2 U1m = mat.tile(U1, (n1,1)) U2m = mat.tile(U2, (n2,1)) U = mat.bmat('U1m; U2m') m1m1 = mat.sum(K[0:n1, 0:n1]) / (n1*n1) m1m2 = mat.sum(K[0:n1, n1:n]) / (n1*n2) m2m2 = mat.sum(K[n1:n, n1:n]) / (n2*n2) mumu = mat.zeros((n,n)) mumu[0:n1, 0:n1] = m1m1 mumu[0:n1, n1:n] = m1m2 mumu[n1:n, 0:n1] = m1m2 mumu[n1:n, n1:n] = m2m2 Kcu = K - U Kuc = Kcu.T N = mat.ones((n,n))/n Kc = K - U - U.T + mumu return (K, Kuc, Kc)
def kernel_supermatrix(self, i, j): kernel = self.kernel D1 = self.datasets[i] D2 = self.datasets[j] X1 = D1.X X2 = D2.X (n1, d) = X1.shape (n2, d) = X2.shape n = n1 + n2 X = mat.bmat('X1; X2') K1 = D1.K K2 = D2.K K = mat.zeros((n,n)) K[0:n1, 0:n1] = K1 K[n1:n, n1:n] = K2 for i in xrange(n1): for j in xrange(n1, n): K[i,j] = kernel(X[i,:], X[j,:]) K[j,i] = K[i,j] # Inelegant - improve later U1 = mat.sum(K[0:n1,:],0) / n1 U2 = mat.sum(K[n1:n,:],0) / n2 U1m = mat.tile(U1, (n1,1)) U2m = mat.tile(U2, (n2,1)) U = mat.bmat('U1m; U2m') m1m1 = mat.sum(K[0:n1, 0:n1]) / (n1*n1) m1m2 = mat.sum(K[0:n1, n1:n]) / (n1*n2) m2m2 = mat.sum(K[n1:n, n1:n]) / (n2*n2) mumu = mat.zeros((n,n)) mumu[0:n1, 0:n1] = m1m1 mumu[0:n1, n1:n] = m1m2 mumu[n1:n, 0:n1] = m1m2 mumu[n1:n, n1:n] = m2m2 Kcu = K - U Kuc = Kcu.T N = mat.ones((n,n))/n Kc = K - U - U.T + mumu return (K, Kuc, Kc)
def kernel_supermatrix(self, i, j): kernel = self.kernel D1 = self.datasets[i] D2 = self.datasets[j] X1 = D1.X X2 = D2.X (n1, d) = X1.shape (n2, d) = X2.shape n = n1 + n2 X = mat.bmat('X1; X2') K1 = D1.K K2 = D2.K K = mat.zeros((n, n)) K[0:n1, 0:n1] = K1 K[n1:n, n1:n] = K2 for i in xrange(n1): for j in xrange(n1, n): K[i, j] = kernel(X[i, :], X[j, :]) K[j, i] = K[i, j] # Inelegant - improve later U1 = mat.sum(K[0:n1, :], 0) / n1 U2 = mat.sum(K[n1:n, :], 0) / n2 U1m = mat.tile(U1, (n1, 1)) U2m = mat.tile(U2, (n2, 1)) U = mat.bmat('U1m; U2m') m1m1 = mat.sum(K[0:n1, 0:n1]) / (n1 * n1) m1m2 = mat.sum(K[0:n1, n1:n]) / (n1 * n2) m2m2 = mat.sum(K[n1:n, n1:n]) / (n2 * n2) mumu = mat.zeros((n, n)) mumu[0:n1, 0:n1] = m1m1 mumu[0:n1, n1:n] = m1m2 mumu[n1:n, 0:n1] = m1m2 mumu[n1:n, n1:n] = m2m2 Kcu = K - U Kuc = Kcu.T N = mat.ones((n, n)) / n Kc = K - U - U.T + mumu return (K, Kuc, Kc)
def prepare_bhatta(X1, X2, kernel, eta, verbose=False): (n1, d1) = X1.shape (n2, d ) = X2.shape assert d1 == d n = n1 + n2 X = mat.bmat('X1;X2') (K, Kuc, Kc) = kernel_matrix(X, kernel, n1, n2) G = GS_basis(Kc, verbose) (null, g_dim) = G.shape mu1 = mat.sum(Kuc[0:n1,:] * G,0) / n1 mu2 = mat.sum(Kuc[n1:n,:] * G,0) / n2 return (Kc, G, mu1, mu2)
def prepare_bhatta(X1, X2, kernel, eta, verbose=False): (n1, d1) = X1.shape (n2, d) = X2.shape assert d1 == d n = n1 + n2 X = mat.bmat('X1;X2') (K, Kuc, Kc) = kernel_matrix(X, kernel, n1, n2) G = GS_basis(Kc, verbose) (null, g_dim) = G.shape mu1 = mat.sum(Kuc[0:n1, :] * G, 0) / n1 mu2 = mat.sum(Kuc[n1:n, :] * G, 0) / n2 return (Kc, G, mu1, mu2)
def eig_bhatta(X1, X2, kernel, eta, r): # Tested. Verified: # Poly-kernel RKHS representations of all objects are roughly equal to eigenbasis representations (slight differences for S3) # Correctness for X1 ~= X2 # Close results to empirical bhatta in test_suite_1 # Remaining issues: Eigendecomposition of centered kernel matrices # occasionally produces negative-value eigenvalues (n1, d1) = X1.shape (n2, d2) = X2.shape assert d1==d2 n = n1+n2 X = mat.bmat("X1;X2") (K, Kuc, Kc) = kernel_matrix(X, kernel, n1, n2) Kc1 = Kc[0:n1, 0:n1] Kc2 = Kc[n1:n, n1:n] (Lam1, Alpha1) = eigsh(Kc1, r) (Lam2, Alpha2) = eigsh(Kc2, r) Alpha1 = matrix(Alpha1) Alpha2 = matrix(Alpha2) Lam1 = Lam1 / n1 Lam2 = Lam2 / n2 Beta1 = mat.zeros((n,r)) Beta2 = mat.zeros((n,r)) for i in xrange(r): Beta1[0:n1, i] = Alpha1[:,i] / (n1 * Lam1[i])**.5 Beta2[n1:n, i] = Alpha2[:,i] / (n2 * Lam2[i])**.5 #Eta = mat.eye((gamma, gamma)) * eta Beta = mat.bmat('Beta1, Beta2') assert not(any(math.isnan(Beta))) Omega = eig_ortho(Kc, Beta) mu1_w = mat.sum(Kuc[0:n1, :] * Omega, 0) / n1 mu2_w = mat.sum(Kuc[n1:n, :] * Omega, 0) / n2 Eta_w = eta * mat.eye(2*r) S1_w = Omega.T * Kc[:,0:n1] * Kc[0:n1,:] * Omega / n1 S2_w = Omega.T * Kc[:,n1:n] * Kc[n1:n,:] * Omega / n2 S1_w += Eta_w S2_w += Eta_w mu3_w = .5 * (S1_w.I * mu1_w.T + S2_w.I * mu2_w.T).T S3_w = 2 * (S1_w.I + S2_w.I).I d1 = la.det(S1_w) ** -.25 d2 = la.det(S2_w) ** -.25 e1 = exp(-mu1_w * S1_w.I * mu1_w.T / 4) e2 = exp(-mu2_w * S2_w.I * mu2_w.T / 4) d3 = la.det(S3_w) ** .5 e3 = exp(mu3_w * S3_w * mu3_w.T / 2) dterm = d1*d2*d3 eterm = e1*e2*e3 rval = float(dterm*eterm) if math.isnan(rval): rval = -1 return rval
def FRAHST_M(streams, energyThresh, alpha): """ Fast rank adaptive row-Householder Subspace Traking Algorithm """ #Initialise N = streams.shape[1] rr = [1] hiddenV = npm.zeros((streams.shape[0], N)) # generate random orthonormal - N x r qq,RR = qr(rand(N,1)) Q_t = [mat(qq)] S_t = [mat([0.000001])] E_t = [0] E_dash_t = [0] z_dash = npm.zeros(N) RSRE = mat([0]) No_inp_count = 0 iter_streams = iter(streams) for t in range(1, streams.shape[0] + 1): z_vec = mat(iter_streams.next()) z_vec = z_vec.T # Now a column Vector hh = Q_t[t-1].T * z_vec # 13a Z = z_vec.T * z_vec - hh.T * hh # 13b Z = float(Z) # cheak that Z is really scalar if Z > 0.0000001: X = alpha * S_t[t-1] + hh * hh.T # 13c # X.T * b = sqrt(Z) * hh # 13d b = multiply(inv(X.T), sqrt(Z)) * hh # inverse method phi_sq_t = 0.5 + (1 / sqrt(4 *((b.T * b) + 1))) # 13e phi_t = sqrt(phi_sq_t) delta = phi_t / sqrt(Z) # 13f gamma = (1 - 2 * phi_sq_t) / (2 * phi_t) #13 g v = multiply(gamma, b) S_t.append(X - multiply(1/delta , v * hh.T)) # 13 h S_t[t] = e = multiply(delta, z_vec) - (Q_t[t-1] * (multiply(delta, hh) - v)) # 13 i Q_t.append(Q_t[t-1] - 2 * (e * v.T)) # 13 j Q[t] = # Record hidden variables hiddenV[t-1,:hh.shape[0]] = hh.T # Record reconstrunted z new_z_dash = Q_t[t-1] * hh z_dash = npm.vstack((z_dash, new_z_dash.T)) # Record RSRE new_RSRE = RSRE[0,-1] + (((norm(new_z_dash - z_vec)) ** 2) / (norm(z_vec) ** 2)) RSRE = npm.vstack((RSRE, mat(new_RSRE))) E_t.append(alpha * E_t[-1] + norm(z_vec) ** 2) # 13 k E_dash_t.append( alpha * E_dash_t[-1] + norm(hh) ** 2) # 13 l if E_dash_t[-1] < energyThresh[0] * E_t[-1] and rr[-1] < N: # 13 m z_dag_orthog = z_vec - Q_t[t] * Q_t[t].T * z_vec # try Q[t], not Q[t + 1] Q_t[t] = npm.bmat([Q_t[t], z_dag_orthog/norm(z_dag_orthog)]) TR = npm.zeros((S_t[t].shape[0], 1)) BL = npm.zeros((1 ,S_t[t].shape[1])) BR = mat(norm(z_dag_orthog) ** 2 ) S_t[t] = npm.bmat([[S_t[t], TR], [ BL , BR]]) rr.append(rr[-1] + 1) elif E_dash_t[-1] > energyThresh[1] * E_t[-1] and rr[-1] > 1 : Q_t[t] = Q_t[t][:, :-1] # delete the last column of Q_t S_t[t] = S_t[t][:-1, :-1] # delete last row and colum of S_t rr.append(rr[-1] - 1) else: # Record hidden variables hiddenV[t-1,:hh.shape[0]] = hh.T # Record reconstrunted z new_z_dash = Q_t[t-1] * hh z_dash = npm.vstack((z_dash, new_z_dash.T)) # Record RSRE new_RSRE = RSRE[0,-1] + (((norm(new_z_dash - z_vec)) ** 2) / (norm(z_vec) ** 2)) RSRE = npm.vstack((RSRE, mat(new_RSRE))) # Repeat last entries Q_t.append(Q_t[-1]) S_t.append(S_t[-1]) rr.append(rr[-1]) E_t.append(E_t[-1]) E_dash_t.append(E_dash_t[-1]) # increment count No_inp_count += 1 return Q_t, S_t, rr, E_t, E_dash_t, hiddenV, z_dash, RSRE, No_inp_count
def eig_bhatta(X1, X2, kernel, eta, r): # Tested. Verified: # Poly-kernel RKHS representations of all objects are roughly equal to eigenbasis representations (slight differences for S3) # Correctness for X1 ~= X2 # Close results to empirical bhatta in test_suite_1 # Remaining issues: Eigendecomposition of centered kernel matrices # occasionally produces negative-value eigenvalues (n1, d1) = X1.shape (n2, d2) = X2.shape assert d1 == d2 n = n1 + n2 X = mat.bmat("X1;X2") (K, Kuc, Kc) = kernel_matrix(X, kernel, n1, n2) Kc1 = Kc[0:n1, 0:n1] Kc2 = Kc[n1:n, n1:n] (Lam1, Alpha1) = eigsh(Kc1, r) (Lam2, Alpha2) = eigsh(Kc2, r) Alpha1 = matrix(Alpha1) Alpha2 = matrix(Alpha2) Lam1 = Lam1 / n1 Lam2 = Lam2 / n2 Beta1 = mat.zeros((n, r)) Beta2 = mat.zeros((n, r)) for i in xrange(r): Beta1[0:n1, i] = Alpha1[:, i] / (n1 * Lam1[i])**.5 Beta2[n1:n, i] = Alpha2[:, i] / (n2 * Lam2[i])**.5 #Eta = mat.eye((gamma, gamma)) * eta Beta = mat.bmat('Beta1, Beta2') assert not (any(math.isnan(Beta))) Omega = eig_ortho(Kc, Beta) mu1_w = mat.sum(Kuc[0:n1, :] * Omega, 0) / n1 mu2_w = mat.sum(Kuc[n1:n, :] * Omega, 0) / n2 Eta_w = eta * mat.eye(2 * r) S1_w = Omega.T * Kc[:, 0:n1] * Kc[0:n1, :] * Omega / n1 S2_w = Omega.T * Kc[:, n1:n] * Kc[n1:n, :] * Omega / n2 S1_w += Eta_w S2_w += Eta_w mu3_w = .5 * (S1_w.I * mu1_w.T + S2_w.I * mu2_w.T).T S3_w = 2 * (S1_w.I + S2_w.I).I d1 = la.det(S1_w)**-.25 d2 = la.det(S2_w)**-.25 e1 = exp(-mu1_w * S1_w.I * mu1_w.T / 4) e2 = exp(-mu2_w * S2_w.I * mu2_w.T / 4) d3 = la.det(S3_w)**.5 e3 = exp(mu3_w * S3_w * mu3_w.T / 2) dterm = d1 * d2 * d3 eterm = e1 * e2 * e3 rval = float(dterm * eterm) if math.isnan(rval): rval = -1 return rval
def entangle(Q, k): # Q must be an isometry D = mix(Q, k) C = mix(Q, k) return mat.bmat([[D, C], [C, -D]]) / math.sqrt(2.0)
def entangle_with_knives(Q): # Q must be an isometry D = mix_with_knives(Q) C = mix_with_knives(Q) return mat.bmat([[D, C], [C, -D]]) / math.sqrt(2.0)