def nextTimeStep( Q, epsilon ): #returns the next timestep and a 0 to continue loop, or the same value and a 1 to stop the loop n = Q.shape[0] NextQ = np.zeros((n, n)) for i in range(n): for j in range(n): NextQ[i, j] = expectation( Q, i, j) #as defined in paper, see above functions NextQN = matrixFunctions2d.normalize2dMatrix(NextQ) #Cannot be weighted adjust, investigate why later NextQ = matrixFunctions2d.weightedAdjustment2d(NextQN) NextQ = matrixFunctions2d.diagonalAdjustment2d(NextQN) # print NextQ dist = frobenius(NextQ, Q) if (dist < epsilon): return Q, dist, 1 else: return NextQ, dist, 0
#!/usr/bin/python import sys import numpy as np import matrixFunctions2d from scipy.linalg import expm, logm matrix = matrixFunctions2d.read2dMatrix(sys.argv[1]) Norm = matrixFunctions2d.normalize2dMatrix(matrix, 0) logd = logm(Norm) #mat = expm(matrix) #matrixFunctions2d.printDetailedBalanceftxt(mat, "quickout.txt") matrixFunctions2d.printDetailedBalanceftxt( logd, "detailedBalance_%s.dat" % sys.argv[1]) #matrixFunctions2d.printDetailedBalanceftxt(matrix, "quickout.txt")
y = z switch = 0 else: z = hops[index - 1] switch = -1 if (iters > maxIters): value = 0 iters += 1 # print index, value, iters, x, y return hops[index] mat = matrixFunctions2d.read2dMatrix(sys.argv[1]) mat = matrixFunctions2d.normalize2dMatrix(mat, 0) P = np.copy(mat) #initial Probability Matrix mat = logm(mat) Q = matrixFunctions2d.diagonalAdjustment2d(mat, 0) #this is our Qnaught #Q = matrixFunctions2d.weightedAdjustment2d(log) def func(x, i, j, Q, P): #x is exp(Q(q_{i,j})) # P = expm(Q) Q_e = expm(Q) n = Q.shape[0] copy = np.copy(Q) #diff = np.real(Q[i,j] - x)#initially zero for first iteration, should change as x changes diff = np.real( Q_e[i, j] - P[i,
#!/usr/bin/python import sys import numpy as np from scipy.linalg import expm, logm import scipy as sp import matrixFunctions2d k = 0 matrix = matrixFunctions2d.read2dMatrix(sys.argv[1]) n = matrix.shape[0] matrix = matrixFunctions2d.normalize2dMatrix(matrix, k) matrix = logm(matrix) matrix = np.real(matrix) diag = matrixFunctions2d.diagonalAdjustment2d(matrix, k) #print free energy to f.txt matrixFunctions2d.write2dMatrix(diag, sys.argv[2]) matrixFunctions2d.writeDetailedBalanceftxt(diag)
y = hops[(index+1)%n] if(switch==-1): y = z switch = 0 else: z = hops[index-1] switch = -1 if(iters>maxIters): value =0 iters +=1 print index, value, iters, x, y return hops[index] mat = matrixFunctions2d.read2dMatrix(sys.argv[1]) norm = matrixFunctions2d.normalize2dMatrix(mat, 1)#along row log = logm(norm) Q = matrixFunctions2d.diagonalAdjustment2d(log) #this is our Qnaught #Q = matrixFunctions2d.weightedAdjustment2d(log) def func(x, i, j, Q): P = expm(Q) n = Q.shape[0] copy = np.copy(Q) for k in range(n): diff = Q[i,j] - x#initially zero for first iteration, should change as x changes for u in range(n): if(u!=j): copy[i,u] += float(diff/n) copy[i,i] = -1 * (np.sum(copy[i,:]) - copy[i,i]) #set diagonal to neg sum return distance(copy, Q)
#!/usr/bin/python import matrixFunctions2d import EMHelper import sys from scipy.linalg import logm, expm #quite simple, read in a matrix, normalize it, take the log and set it as Qo input = matrixFunctions2d.read2dMatrix(sys.argv[1]) Norm = matrixFunctions2d.normalize2dMatrix( input, 0) #currently works along rows, as does this whole algorithm logd = logm(Norm) #logd = logm(input) diag = matrixFunctions2d.diagonalAdjustment2d(logd) #matrixFunctions2d.print2dMatrix(diag) #print #print epsilon = 0.0001 i = 0 exit = 0 maxIters = 200 while (exit == 0): diag, dist, exit = EMHelper.nextTimeStep(diag, epsilon) i += 1 print i, dist if (i == maxIters): print "Maximum Iterations Reached: %s" % i print "Exiting" exit = 1 # print
import math import EMHelper def gammaFunction(R, N, alpha=1, beta=1): beta += R beta = 1 / beta alpha += N return np.random.gamma(alpha, beta, 1) ##################initialize sampling #read in distribution to match fname = sys.argv[1] MATRIX = matrixFunctions2d.read2dMatrix(fname) distribution = matrixFunctions2d.normalize2dMatrix(MATRIX, 0) #normalize along rows n = MATRIX.shape[0] #make random initial generator matrix Q = np.zeros_like(MATRIX) #generate alpha matrix using an EM, set alpha[i,j] = 1 if EM[i,j]>1e-14 else alpha[i,j]=0 #code from expectationMaximization.py logd = logm(distribution) diag = matrixFunctions2d.diagonalAdjustment2d(logd) epsilon = 0.0001 i = 0 maxIters = 200 exit = 0 maxiters = 200 while (exit == 0): diag, dist, exit = EMHelper.nextTimeStep(diag, epsilon)