def distmat(X, Y): n = len(X) m = len(Y) xx = ml.sum(X*X, axis=1) yy = ml.sum(Y*Y, axis=1) xy = ml.dot(X, Y.T) return npy.tile(xx, (m, 1)).T+npy.tile(yy, (n, 1)) - 2*xy
def zl_distvec(X, Y): n = len(X) m = len(Y) xx = ml.sum(X*X, axis=1) yy = ml.sum(Y*Y, axis=1) xy = ml.dot(X, Y.T) return tile(xx, (m, 1)).T+tile(yy, (n, 1)) - 2*xy
def nk_bhatta(X1, X2, eta): # Returns the non-kernelized Bhattacharrya #I.e. fits normal distributions in input space and calculates Bhattacharrya overlap between them (n1, d1) = X1.shape (n2, d) = X2.shape assert d1 == d mu1 = mat.sum(X1, 0) / n1 mu2 = mat.sum(X2, 0) / n2 X1c = X1 - mat.tile(mu1, (n1, 1)) X2c = X2 - mat.tile(mu2, (n2, 1)) Eta = mat.eye(d) * eta S1 = X1c.T * X1c / n1 + Eta S2 = X2c.T * X2c / n2 + Eta mu3 = .5 * (S1.I * mu1.T + S2.I * mu2.T).T S3 = 2 * (S1.I + S2.I).I d1 = la.det(S1)**-.25 d2 = la.det(S2)**-.25 d3 = la.det(S3)**.5 dterm = d1 * d2 * d3 e1 = -.25 * mu1 * S1.I * mu1.T e2 = -.25 * mu2 * S2.I * mu2.T e3 = .5 * mu3 * S3 * mu3.T eterm = math.exp(e1 + e2 + e3) return float(dterm * eterm)
def nk_bhatta(X1, X2, eta): # Returns the non-kernelized Bhattacharrya #I.e. fits normal distributions in input space and calculates Bhattacharrya overlap between them (n1, d1) = X1.shape (n2, d ) = X2.shape assert d1 == d mu1 = mat.sum(X1,0) / n1 mu2 = mat.sum(X2,0) / n2 X1c = X1 - mat.tile(mu1, (n1,1)) X2c = X2 - mat.tile(mu2, (n2,1)) Eta = mat.eye(d) * eta S1 = X1c.T * X1c / n1 + Eta S2 = X2c.T * X2c / n2 + Eta mu3 = .5 * (S1.I * mu1.T + S2.I * mu2.T).T S3 = 2 * (S1.I + S2.I).I d1 = la.det(S1) ** -.25 d2 = la.det(S2) ** -.25 d3 = la.det(S3) ** .5 dterm = d1 * d2 * d3 e1 = -.25 * mu1 * S1.I * mu1.T e2 = -.25 * mu2 * S2.I * mu2.T e3 = .5 * mu3 * S3 * mu3.T eterm = math.exp(e1 + e2 + e3) return float(dterm * eterm)
def zl_distvec(X, Y): n = len(X) m = len(Y) xx = ml.sum(X * X, axis=1) yy = ml.sum(Y * Y, axis=1) xy = ml.dot(X, Y.T) return tile(xx, (m, 1)).T + tile(yy, (n, 1)) - 2 * xy
def kernel_matrix(X, kernel, n1, n2): (n, d) = X.shape assert n == n1 + n2 K = mat.zeros((n,n)) for i in xrange(n): for j in xrange(i+1): K[i,j] = kernel(X[i,:], X[j,:]) K[j,i] = K[i,j] U1 = mat.sum(K[0:n1,:],0) / n1 U2 = mat.sum(K[n1:n,:],0) / n2 U1m = mat.tile(U1, (n1,1)) U2m = mat.tile(U2, (n2,1)) U = mat.bmat('U1m; U2m') m1m1 = mat.sum(K[0:n1, 0:n1]) / (n1*n1) m1m2 = mat.sum(K[0:n1, n1:n]) / (n1*n2) m2m2 = mat.sum(K[n1:n, n1:n]) / (n2*n2) mumu = mat.zeros((n,n)) mumu[0:n1, 0:n1] = m1m1 mumu[0:n1, n1:n] = m1m2 mumu[n1:n, 0:n1] = m1m2 mumu[n1:n, n1:n] = m2m2 Kcu = K - U Kuc = Kcu.T N = mat.ones((n,n))/n Kc = K - U - U.T + mumu return (K, Kuc, Kc)
def distance(X, Y): n = len(X) m = len(Y) xx = matlib.sum(X*X, axis=1) yy = matlib.sum(Y*Y, axis=1) xy = matlib.dot(X, Y.T) return tile(xx, (m, 1)).T+tile(yy, (n, 1)) - 2*xy
def kernel_matrix(X, kernel, n1, n2): (n, d) = X.shape assert n == n1 + n2 K = mat.zeros((n, n)) for i in xrange(n): for j in xrange(i + 1): K[i, j] = kernel(X[i, :], X[j, :]) K[j, i] = K[i, j] U1 = mat.sum(K[0:n1, :], 0) / n1 U2 = mat.sum(K[n1:n, :], 0) / n2 U1m = mat.tile(U1, (n1, 1)) U2m = mat.tile(U2, (n2, 1)) U = mat.bmat('U1m; U2m') m1m1 = mat.sum(K[0:n1, 0:n1]) / (n1 * n1) m1m2 = mat.sum(K[0:n1, n1:n]) / (n1 * n2) m2m2 = mat.sum(K[n1:n, n1:n]) / (n2 * n2) mumu = mat.zeros((n, n)) mumu[0:n1, 0:n1] = m1m1 mumu[0:n1, n1:n] = m1m2 mumu[n1:n, 0:n1] = m1m2 mumu[n1:n, n1:n] = m2m2 Kcu = K - U Kuc = Kcu.T N = mat.ones((n, n)) / n Kc = K - U - U.T + mumu return (K, Kuc, Kc)
def distMat(X, Y): n = len(X) m = len(Y) xx = ml.sum(X * X, axis=1) yy = ml.sum(Y * Y, axis=1) xy = ml.dot(X, Y.T) return np.tile(xx, (m, 1)).T + np.tile(yy, (n, 1)) - 2 * xy
def distmat(X,Y): n = len(X) m = len(Y) xx = ml.sum(X*X,axis=1) # #axis=1 是按行求和 print "xx:{}".format(xx) yy = ml.sum(Y*Y,axis=1) print "yy:{}".format(yy) xy = ml.dot(X,Y.T) #dot矩阵相乘 return tile(xx,(m,1)).T + tile(yy,(n,1)) - 2*xy #tile矩阵复制
def prepare_bhatta(X1, X2, kernel, eta, verbose=False): (n1, d1) = X1.shape (n2, d) = X2.shape assert d1 == d n = n1 + n2 X = mat.bmat('X1;X2') (K, Kuc, Kc) = kernel_matrix(X, kernel, n1, n2) G = GS_basis(Kc, verbose) (null, g_dim) = G.shape mu1 = mat.sum(Kuc[0:n1, :] * G, 0) / n1 mu2 = mat.sum(Kuc[n1:n, :] * G, 0) / n2 return (Kc, G, mu1, mu2)
def prepare_bhatta(X1, X2, kernel, eta, verbose=False): (n1, d1) = X1.shape (n2, d ) = X2.shape assert d1 == d n = n1 + n2 X = mat.bmat('X1;X2') (K, Kuc, Kc) = kernel_matrix(X, kernel, n1, n2) G = GS_basis(Kc, verbose) (null, g_dim) = G.shape mu1 = mat.sum(Kuc[0:n1,:] * G,0) / n1 mu2 = mat.sum(Kuc[n1:n,:] * G,0) / n2 return (Kc, G, mu1, mu2)
def kernel_submatrix(self): # Cache kernel evaluations between vectors in this dataset so we don't repeat this work every # time we call Bhattacharrya X = self.X (n,d) = X.shape K = mat.zeros((n,n)) for i in xrange(n): for j in xrange(i+1): K[i,j] = self.kernel(X[i,:], X[j,:]) K[j,i] = K[i,j] Ki = mat.sum(K,1) / n k = mat.sum(K) / (n*n) Kc = K - Ki * mat.ones((1,n)) - mat.ones((n,1)) * Ki.T + k * mat.ones((n,n)) return (K, Kc)
def compute_merit(B, U, model, penalty_coeff): xDim = model.xDim T = model.T merit = 0 for t in xrange(0,T-1): x, s = belief.decompose_belief(B[:,t], model) merit += model.alpha_belief*ml.trace(s*s) merit += model.alpha_control*ml.sum(U[:,t].T*U[:,t]) merit += penalty_coeff*ml.sum(np.abs(B[:,t+1]-belief.belief_dynamics(B[:,t],U[:,t],None,model))) x, s = belief.decompose_belief(B[:,T-1], model) merit += model.alpha_final_belief*ml.trace(s*s) return merit
def kernel_submatrix(self): # Cache kernel evaluations between vectors in this dataset so we don't repeat this work every # time we call Bhattacharrya X = self.X (n, d) = X.shape K = mat.zeros((n, n)) for i in xrange(n): for j in xrange(i + 1): K[i, j] = self.kernel(X[i, :], X[j, :]) K[j, i] = K[i, j] Ki = mat.sum(K, 1) / n k = mat.sum(K) / (n * n) Kc = K - Ki * mat.ones((1, n)) - mat.ones( (n, 1)) * Ki.T + k * mat.ones((n, n)) return (K, Kc)
def compute_probability(S, K, model): T = model.T h = 10 Smin = np.zeros(T) Smax = np.zeros(T) P = ml.zeros([T, K]) for i in range(T): Smin[i] = np.min(S[i, :]) Smax[i] = np.max(S[i, :]) #IPython.embed() if (Smin[i] == Smax[i]): Smax[i] = 1 for i in range(T): P[i, :] = np.exp(-h * (S[i, :] - Smin[i]) / (Smax[i] - Smin[i])) normalize = ml.sum(P[i, :]) # if(normalize == K): # P[i,:] = 0.005; P[i, :] = P[i, :] / normalize return P
def cost_func(B, model, profile, profiler): cost = ml.zeros([model.T, 1]) U = ml.zeros([model.uDim, model.T]) T = model.T for t in range(T - 1): U[:, t] = B[0:model.xDim, t + 1] - B[0:model.xDim, t] B[:, t + 1] = belief.belief_dynamics(B[:, t], U[:, t], None, model) if max(U[:, t]) > 1: cost[t] = 1000 elif abs(B[0, t]) > model.xMax[0]: cost[t] = 1000 elif abs(B[1, t]) > model.xMax[1]: cost[t] = 1000 else: null, s = belief.decompose_belief(B[:, t], model) cost[t] = model.alpha_belief * ml.trace( s * s) + model.alpha_control * ml.sum(U[:, t].T * U[:, t]) x, s = belief.decompose_belief(B[:, T - 1], model) cost[T - 1] = model.alpha_final_belief * ml.trace(s * s) return cost, B, U
def compute_probability(S,K,model): T = model.T; h = 10; Smin = np.zeros(T) Smax = np.zeros(T) P = ml.zeros([T,K]) for i in range(T): Smin[i] = np.min(S[i,:]); Smax[i] = np.max(S[i,:]); #IPython.embed() if(Smin[i] == Smax[i]): Smax[i] = 1; for i in range(T): P[i,:] = np.exp(-h*(S[i,:] - Smin[i])/(Smax[i]-Smin[i])); normalize = ml.sum(P[i,:]); # if(normalize == K): # P[i,:] = 0.005; P[i,:] = P[i,:]/normalize; return P
def inf(self, x, meanonly=False): x = np.asmatrix(x) if x.shape[1] != self.d: if x.shape[0] == self.d: x = x.T else: raise Exception('Invalid test-set dimension -- ' 'expected d = ' + str(self.d) + '.') n = x.shape[0] # Handle empty test set if n == 0: return (np.zeros((0, 1)), np.zeros((0, 1))) ms = self.kernel.mean*np.ones((n, 1)) Kbb = self.kernel(x, diag=True) # Handle empty training set if len(self) == 0: return (ms, np.asmatrix(Kbb)) Kba = self.kernel(x, self.x) m = self.kernel.mean*np.ones((len(self), 1)) fm = ms + Kba*scipy.linalg.cho_solve((self.L, True), self.y - m, overwrite_b=True) if meanonly: return fm else: W = scipy.linalg.cho_solve((self.L, True), Kba.T) fv = np.asmatrix(Kbb - np.sum(np.multiply(Kba.T, W), axis=0).T) # W = np.asmatrix(scipy.linalg.solve(self.L, Kba.T, lower=True)) # fv = np.asmatrix(Kbb - np.sum(np.power(W, 2), axis=0).T) return (fm, fv)
def cost_func(B,model,profile,profiler): cost = ml.zeros([model.T,1]) U = ml.zeros([model.uDim,model.T]) T = model.T for t in range(T-1): U[:,t] = B[0:model.xDim,t+1]-B[0:model.xDim,t]; B[:,t+1] = belief.belief_dynamics(B[:,t],U[:,t],None,model); if max(U[:,t])> 1: cost[t] = 1000 elif abs(B[0,t]) > model.xMax[0]: cost[t] = 1000 elif abs(B[1,t]) > model.xMax[1]: cost[t] = 1000 else: null, s = belief.decompose_belief(B[:,t], model) cost[t] = model.alpha_belief*ml.trace(s*s)+model.alpha_control*ml.sum(U[:,t].T*U[:,t]) x, s = belief.decompose_belief(B[:,T-1], model) cost[T-1] = model.alpha_final_belief*ml.trace(s*s) return cost, B, U
def _kmedoids(distmat, threshold, imedoids, verbose): """\ The *raw* version of k-medoids. """ # initialize J Jprev = inf # initialize iteration count iter = 0 # iterations while True: # distance from medoids to all other points dist = distmat[imedoids] # assign x to nearst medoid labels = dist.argmin(axis=0) J = 0 # re-choose each medoids for j in range(len(imedoids)): idx_j = (labels == j).nonzero()[0] distj = distmat[idx_j][:, idx_j] distsum = ml.sum(distj, axis=1) idxmin = distsum.argmin() imedoids[j] = idx_j[idxmin] J += distsum[idxmin] iter += 1 if verbose: print '[kmedoids] iter %d (J=%.4f)' % (iter, J) if Jprev-J < threshold: break Jprev = J return imedoids, labels, J
def bhatta(self,i,j): """Here is the actual Bhattacharrya algorithm""" eta = self.eta D1 = self.datasets[i] D2 = self.datasets[j] Beta1 = D1.Beta Beta2 = D2.Beta (n1, r) = Beta1.shape (n2, r) = Beta2.shape n = n1 + n2 Beta = mat.zeros((n,2*r)) Beta[0:n1,0:r] = Beta1 Beta[n1:n,r:2*r] = Beta2 (K, Kuc, Kc) = self.kernel_supermatrix(i,j) # K = uncentered kernel matrix # Kuc = Matrix between centered and uncentered vectors # Kc = Centered kernel matrix Omega = eig_ortho(Kc, Beta) mu1 = mat.sum(Kuc[0:n1, :] * Omega, 0) / n1 mu2 = mat.sum(Kuc[n1:n, :] * Omega, 0) / n2 S1 = Omega.T * Kc[:,0:n1] * Kc[0:n1,:] * Omega / n1 S2 = Omega.T * Kc[:,n1:n] * Kc[n1:n,:] * Omega / n2 Eta = eta * mat.eye(2*r) S1 += Eta S2 += Eta mu3 = .5 * (S1.I * mu1.T + S2.I * mu2.T).T S3 = 2 * (S1.I + S2.I).I d1 = la.det(S1) ** -.25 d2 = la.det(S2) ** -.25 d3 = la.det(S3) ** .5 e1 = exp(-mu1 * S1.I * mu1.T / 4) e2 = exp(-mu2 * S2.I * mu2.T / 4) e3 = exp(mu3 * S3 * mu3.T / 2) dterm = d1 * d2 * d3 eterm = e1 * e2 * e3 rval = dterm * eterm if math.isnan(rval): rval = -1 print "Warning: Kernel failed on datasets ({},{})".format(i,j) return rval
def bhatta(self, i, j): """Here is the actual Bhattacharrya algorithm""" eta = self.eta D1 = self.datasets[i] D2 = self.datasets[j] Beta1 = D1.Beta Beta2 = D2.Beta (n1, r) = Beta1.shape (n2, r) = Beta2.shape n = n1 + n2 Beta = mat.zeros((n, 2 * r)) Beta[0:n1, 0:r] = Beta1 Beta[n1:n, r:2 * r] = Beta2 (K, Kuc, Kc) = self.kernel_supermatrix(i, j) # K = uncentered kernel matrix # Kuc = Matrix between centered and uncentered vectors # Kc = Centered kernel matrix Omega = eig_ortho(Kc, Beta) mu1 = mat.sum(Kuc[0:n1, :] * Omega, 0) / n1 mu2 = mat.sum(Kuc[n1:n, :] * Omega, 0) / n2 S1 = Omega.T * Kc[:, 0:n1] * Kc[0:n1, :] * Omega / n1 S2 = Omega.T * Kc[:, n1:n] * Kc[n1:n, :] * Omega / n2 Eta = eta * mat.eye(2 * r) S1 += Eta S2 += Eta mu3 = .5 * (S1.I * mu1.T + S2.I * mu2.T).T S3 = 2 * (S1.I + S2.I).I d1 = la.det(S1)**-.25 d2 = la.det(S2)**-.25 d3 = la.det(S3)**.5 e1 = exp(-mu1 * S1.I * mu1.T / 4) e2 = exp(-mu2 * S2.I * mu2.T / 4) e3 = exp(mu3 * S3 * mu3.T / 2) dterm = d1 * d2 * d3 eterm = e1 * e2 * e3 rval = dterm * eterm if math.isnan(rval): rval = -1 print "Warning: Kernel failed on datasets ({},{})".format(i, j) return rval
def kmedoids(X, k, observer=None, threshold=1e-15, maxiter=300): ''' Cluster and show data X X: N * 2 array, data to be clustered k: k means observer: the plot function ''' N = len(X) labels = zeros(N, dtype=int) centers = array(random.sample(X, k)) # Choose k unique elements from X iter = 0 def calc_J(): sum = 0 for i in xrange(N): sum += norm(X[i]-centers[labels[i]]) return sum def distmat(X, Y): n = len(X) m = len(Y) xx = ml.sum(X*X, axis=1) yy = ml.sum(Y*Y, axis=1) xy = ml.dot(X, Y.T) return tile(xx, (m, 1)).T+tile(yy, (n, 1)) - 2*xy Jprev = calc_J() while True: # notify the observer if observer is not None: observer(iter, labels, centers) # calculate distance from x to each center # distance_matrix is only available in scipy newer than 0.7 # dist = distance_matrix(X, centers) dist = distmat(X, centers) # assign x to nearst center labels = dist.argmin(axis=1) # re-calculate each center for j in range(k): idx_j = (labels == j).nonzero() distj = distmat(X[idx_j], X[idx_j]) distsum = ml.sum(distj, axis=1) icenter = distsum.argmin() centers[j] = X[idx_j[0][icenter]] J = calc_J() iter += 1 if Jprev-J < threshold: break Jprev = J if iter >= maxiter: break # final notification if observer is not None: observer(iter, labels, centers)
def __init__(self, points, masses): """ Inputs: points: list of 3 dimensional points masses: list of float (same length as points) """ self.nb_points = len(points) if self.nb_points == 0: raise (Exception, "Provide at least one point") if len(masses) != self.nb_points: raise (Exception, "Same number of points and masses must be provided") self.masses = masses self.initial_points = [Vect(p) for p in points] # Center of mass p and its velocity self.center_position = npmat.sum(self.initial_points, axis=0) / self.nb_points self.center_velocity = npmat.zeros((3, 1)) # Position of the points relative to the center of mass, stacked into a big matrix self.centered_initial_points_hstack = npmat.hstack( self.initial_points) - self.center_position # Total mass and its inverse self.total_mass = npmat.sum(self.masses) self.total_mass_inverse = 1. / self.total_mass # Inertia matrix and its inverse self.initial_inertia_matrix = Solid._inertia_matrix( self.masses, self.centered_initial_points_hstack) self.initial_inertia_matrix_inverse = npmat.linalg.pinv( self.initial_inertia_matrix) # Rotation quaternion (used to construct matrix representing orientation) and angular momentum (sigma) self.rotation_quaternion = Vect([1., 0., 0., 0.]) self.rotation_matrix = rotation_matrix_from_quaternion( self.rotation_quaternion) self.angular_momentum = npmat.zeros((3, 1)) self._compute_AABB()
def dist_matrix(pts): """Calculate the euclidean distance matrix (EDM) for a set of points. Parameters ---------- pts : np.ndarray (shape = (2,)) Returns ------- dist_mat : np.ndarray The distance matrix as 2d ndarray. Implementation Details ---------------------- Uses two auxiliary matrixes to easily calculate the distance from each point to every other point in the list using this approach: (1) aux matrixes: repmat(l, n1, n2): l is repeated n1 times, along axis 1, and n2 times along axis 2, so repmat(pts, len(pts), 1) = array( [ [1, 2], [4, 6], [1, 2], [4, 6] ] ) repeat(l, n, a): each element of l is repeated n times along axis a (w/o 'a' a plain list is generated), so repeat(pts, 2, 1) = array( [ [1, 2], [1, 2], [4, 6], [4, 6] ] ) (2) Pythagoras: Then, the element-wise difference of the generated matrixes is calculated each value is squared: array( [ [ 0, 0], [ 9, 16], [ 9, 16], [ 0, 0] ] ) These squares are then summed up (linewise) using sum(..., axis=1): array([ 0, 25, 25, 0]) Finally the square root is taken for each element: array([ 0., 5., 5., 0.]) To transform the list into a distance matrix reshape() is used. Example ------- >>> dist_matrix([ [1, 2], [4, 6] ]) array([[ 0., 5.], [ 5., 0.]]) >>> dist_matrix([ [1.8, 4.1, 4.0], [2.8, 4.7, 4.5], [5.2, 4.2, 4.7], ... [4.1, 4.5, 4.6], [5.7, 3.4, 4.5]]) array([[ 0. , 1.26885775, 3.47275107, 2.41039416, 3.99374511], [ 1.26885775, 0. , 2.45967478, 1.3190906 , 3.17804972], [ 3.47275107, 2.45967478, 0. , 1.14455231, 0.96436508], [ 2.41039416, 1.3190906 , 1.14455231, 0. , 1.94422221], [ 3.99374511, 3.17804972, 0.96436508, 1.94422221, 0. ]]) """ dist_mat = scipy.sqrt( matlib.sum( ( matlib.repmat(pts, len(pts), 1) - matlib.repeat(pts, len(pts), axis=0) ) ** 2, axis=1 ) ) return dist_mat.reshape((len(pts), len(pts)))
def BhattaFromDataset(D1, D2, eta): assert D1.r == D2.r kernel = D1.kernel Beta1 = D1.Beta Beta2 = D2.Beta (n1, r) = Beta1.shape (n2, r) = Beta2.shape n = n1 + n2 Beta = mat.zeros((n,2*r)) Beta[0:n1,0:r] = Beta1 Beta[n1:n,r:2*r] = Beta2 (K, Kuc, Kc) = kernel_supermatrix(D1,D2) Omega = eig_ortho(Kc, Beta) mu1 = mat.sum(Kuc[0:n1, :] * Omega, 0) / n1 mu2 = mat.sum(Kuc[n1:n, :] * Omega, 0) / n2 S1 = Omega.T * Kc[:,0:n1] * Kc[0:n1,:] * Omega / n1 S2 = Omega.T * Kc[:,n1:n] * Kc[n1:n,:] * Omega / n2 Eta= eta * mat.eye(2*r) S1 += Eta S2 += Eta mu3 = .5 * (S1.I * mu1.T + S2.I * mu2.T).T S3 = 2 * (S1.I + S2.I).I d1 = la.det(S1) ** -.25 d2 = la.det(S2) ** -.25 d3 = la.det(S3) ** .5 e1 = math.exp(-mu1 * S1.I * mu1.T / 4) e2 = math.exp(-mu2 * S2.I * mu2.T / 4) e3 = math.exp(mu3 * S3 * mu3.T / 2) dterm = d1*d2*d3 eterm = e1*e2*e3 rval = dterm*eterm if math.isnan(rval): rval = -1 print "Warning: Kernel failed on datasets ({},{})".format(i,j) return rval
def BhattaFromDataset(D1, D2, eta): assert D1.r == D2.r kernel = D1.kernel Beta1 = D1.Beta Beta2 = D2.Beta (n1, r) = Beta1.shape (n2, r) = Beta2.shape n = n1 + n2 Beta = mat.zeros((n, 2 * r)) Beta[0:n1, 0:r] = Beta1 Beta[n1:n, r:2 * r] = Beta2 (K, Kuc, Kc) = kernel_supermatrix(D1, D2) Omega = eig_ortho(Kc, Beta) mu1 = mat.sum(Kuc[0:n1, :] * Omega, 0) / n1 mu2 = mat.sum(Kuc[n1:n, :] * Omega, 0) / n2 S1 = Omega.T * Kc[:, 0:n1] * Kc[0:n1, :] * Omega / n1 S2 = Omega.T * Kc[:, n1:n] * Kc[n1:n, :] * Omega / n2 Eta = eta * mat.eye(2 * r) S1 += Eta S2 += Eta mu3 = .5 * (S1.I * mu1.T + S2.I * mu2.T).T S3 = 2 * (S1.I + S2.I).I d1 = la.det(S1)**-.25 d2 = la.det(S2)**-.25 d3 = la.det(S3)**.5 e1 = math.exp(-mu1 * S1.I * mu1.T / 4) e2 = math.exp(-mu2 * S2.I * mu2.T / 4) e3 = math.exp(mu3 * S3 * mu3.T / 2) dterm = d1 * d2 * d3 eterm = e1 * e2 * e3 rval = dterm * eterm if math.isnan(rval): rval = -1 print "Warning: Kernel failed on datasets ({},{})".format(i, j) return rval
def EMForMoG(datam, n, pl, muvl, covml, max_iters=1): assert len(pl)==len(muvl) and len(muvl)==len(covml) d=datam.shape[0] N=datam.shape[1] #start EM posteriorm n*N posteriorm=matlib.zeros((n, N), dtype=np.float32) for it in xrange(max_iters): print 'Iter: '+str(it) #create Gaussian kernels NormalFl=[makeNormalF(muv, covm) for muv, covm in zip(muvl, covml)] #probability px=matlib.zeros((1, N), dtype=np.float32) posteriorm.fill(0) #caculate posteriorm for j in xrange(N): cur_data=datam[:, j] for i in xrange(n): #print i, j posteriorm[i, j]=pl[i]*NormalFl[i](cur_data) px[0, j]+=posteriorm[i, j] # print 'px:', px posteriorm/=px #update parameters #soft num n*1 softnum=matlib.sum(posteriorm, 1) print softnum softnum_inv=1.0/softnum pl=np.array((softnum/N)).reshape(-1).tolist() mum=datam*posteriorm.T*matlib.diag(np.array(softnum_inv).reshape(-1)) muvl=[mum[:, k] for k in range(n)] mum=[]#release for k in range(n): datam_temp=datam-muvl[k] covml[k]=softnum_inv[k, 0]*datam_temp*matlib.diag(np.array(posteriorm[k, :]).reshape(-1))\ *datam_temp.T return pl, muvl, covml
def verify_kernel_matrix(): n1 = 10 n2 = 10 n = n1+n2 d = 5 degree = 3 X = randn(n,d) Phi = poly.phi(X, degree) (K, Kuc, Kc) = kernel_matrix(X, polyk(degree), n1, n2) P1 = Phi[0:n1,:] P2 = Phi[n1:n,:] mu1 = mat.sum(P1,0) / n1 mu2 = mat.sum(P2,0) / n2 P1c = P1 - mat.tile(mu1, (n1,1)) P2c = P2 - mat.tile(mu2, (n2,1)) Pc = bmat('P1c; P2c') KP = mat.zeros((n,n)) for i in xrange(n): for j in xrange(i+1): KP[i,j] = dotp(Phi[i,:], Phi[j,:]) KP[j,i] = KP[i,j] KucP = mat.zeros((n,n)) for i in xrange(n): for j in xrange(n): KucP[i,j] = dotp(Phi[i,:], Pc[j,:]) KcP = mat.zeros((n,n)) for i in xrange(n): for j in xrange(n): KcP[i,j] = dotp(Pc[i,:], Pc[j,:]) #KcP[j,i] = KcP[i,j] #debug() print "Div1: " + str(sum(abs(K-KP))) print "Div2: " + str(sum(abs(Kuc-KucP))) print "Div3: " + str(sum(abs(Kc-KcP)))
def kernel_supermatrix(self, i, j): kernel = self.kernel D1 = self.datasets[i] D2 = self.datasets[j] X1 = D1.X X2 = D2.X (n1, d) = X1.shape (n2, d) = X2.shape n = n1 + n2 X = mat.bmat('X1; X2') K1 = D1.K K2 = D2.K K = mat.zeros((n, n)) K[0:n1, 0:n1] = K1 K[n1:n, n1:n] = K2 for i in xrange(n1): for j in xrange(n1, n): K[i, j] = kernel(X[i, :], X[j, :]) K[j, i] = K[i, j] # Inelegant - improve later U1 = mat.sum(K[0:n1, :], 0) / n1 U2 = mat.sum(K[n1:n, :], 0) / n2 U1m = mat.tile(U1, (n1, 1)) U2m = mat.tile(U2, (n2, 1)) U = mat.bmat('U1m; U2m') m1m1 = mat.sum(K[0:n1, 0:n1]) / (n1 * n1) m1m2 = mat.sum(K[0:n1, n1:n]) / (n1 * n2) m2m2 = mat.sum(K[n1:n, n1:n]) / (n2 * n2) mumu = mat.zeros((n, n)) mumu[0:n1, 0:n1] = m1m1 mumu[0:n1, n1:n] = m1m2 mumu[n1:n, 0:n1] = m1m2 mumu[n1:n, n1:n] = m2m2 Kcu = K - U Kuc = Kcu.T N = mat.ones((n, n)) / n Kc = K - U - U.T + mumu return (K, Kuc, Kc)
def kernel_supermatrix(self, i, j): kernel = self.kernel D1 = self.datasets[i] D2 = self.datasets[j] X1 = D1.X X2 = D2.X (n1, d) = X1.shape (n2, d) = X2.shape n = n1 + n2 X = mat.bmat('X1; X2') K1 = D1.K K2 = D2.K K = mat.zeros((n,n)) K[0:n1, 0:n1] = K1 K[n1:n, n1:n] = K2 for i in xrange(n1): for j in xrange(n1, n): K[i,j] = kernel(X[i,:], X[j,:]) K[j,i] = K[i,j] # Inelegant - improve later U1 = mat.sum(K[0:n1,:],0) / n1 U2 = mat.sum(K[n1:n,:],0) / n2 U1m = mat.tile(U1, (n1,1)) U2m = mat.tile(U2, (n2,1)) U = mat.bmat('U1m; U2m') m1m1 = mat.sum(K[0:n1, 0:n1]) / (n1*n1) m1m2 = mat.sum(K[0:n1, n1:n]) / (n1*n2) m2m2 = mat.sum(K[n1:n, n1:n]) / (n2*n2) mumu = mat.zeros((n,n)) mumu[0:n1, 0:n1] = m1m1 mumu[0:n1, n1:n] = m1m2 mumu[n1:n, 0:n1] = m1m2 mumu[n1:n, n1:n] = m2m2 Kcu = K - U Kuc = Kcu.T N = mat.ones((n,n))/n Kc = K - U - U.T + mumu return (K, Kuc, Kc)
def kmeans(X, k, observer=None, threshold=1e-15, maxiter=300): N = len(X) labels = zeros(N, dtype=int) centers = array(random.sample(X, k)) iter = 0 def calc_J(): sum = 0 for i in xrange(N): sum += norm(X[i]-centers[labels[i]]) return sum def distmat(X, Y): n = len(X) m = len(Y) xx = ml.sum(X*X, axis=1) yy = ml.sum(Y*Y, axis=1) xy = ml.dot(X, Y.T) return tile(xx, (m, 1)).T+tile(yy, (n, 1)) - 2*xy Jprev = calc_J() while True: #notify the observer if observer is not None: observer(iter, labels, centers) dist = distmat(X, centers) labels = dist.argmin(axis=1) for j in range(k): idx_j = (labels == j).nonzero() distj = distmat(X[idx_j], X[idx_j]) distsum = ml.sum(distj, axis=1) icenter = distsum.argmin() centers[j] = X[idx_j[0][icenter]] J = calc_J() iter += 1 if Jprev-J < threshold: break Jprev = J if iter >= maxiter: break if observer is not None: observer(iter, labels, centers)
def compute_forward_simulated_cost(b, U, model): T = model.T cost = 0 b_t = b for t in xrange(0,T-1): x_t, s_t = decompose_belief(b_t, model) cost += model.alpha_belief*ml.trace(s_t*s_t) cost += model.alpha_control*ml.sum(U[:,t].T*U[:,t]) b_t = belief_dynamics(b_t, U[:,t], None, model) x_T, s_T = decompose_belief(b_t, model) cost += model.alpha_final_belief*ml.trace(s_T*s_T) return cost
def compute_forward_simulated_cost(b, U, model): T = model.T cost = 0 b_t = b for t in xrange(0, T - 1): x_t, s_t = decompose_belief(b_t, model) cost += model.alpha_belief * ml.trace(s_t * s_t) cost += model.alpha_control * ml.sum(U[:, t].T * U[:, t]) b_t = belief_dynamics(b_t, U[:, t], None, model) x_T, s_T = decompose_belief(b_t, model) cost += model.alpha_final_belief * ml.trace(s_T * s_T) return cost
def constraints_satisfied(B, U, model, tolerance): xDim = model.xDim xMax = model.xMax xMin = model.xMin uMax = model.uMax uMin = model.uMin T = model.T done = True constraint_violations = 0 for t in xrange(0, T-1): constraint_violations += ml.sum(np.abs(B[:,t+1] - belief.belief_dynamics(B[:,t],U[:,t],None,model))) done &= constraint_violations < tolerance done &= np.max(B[1:xDim,t] <= xMax) done &= np.min(B[1:xDim,t] >= xMin) done &= np.max(U[:,t] <= uMax) done &= np.min(U[:,t] >= uMin) if not done: break #print('Constraint violations: %g' % constraint_violations) return done
def inf(self, x, meanonly=False): x = np.asmatrix(x) assert x.shape[1] == self.d n = x.shape[0] # Handle empty test set if n == 0: return (np.zeros((0, 1)), np.zeros((0, 1))) ms = self.kernel.mean*np.ones((n, 1)) Kbb = self.kernel(x, diag=True) # Handle empty training set if len(self) == 0: return (ms, np.asmatrix(np.diag(Kbb)).T) Kba = self.kernel(x, self.x) m = self.kernel.mean*np.ones((len(self), 1)) fm = ms + Kba*scipy.linalg.cho_solve((self.L, True), self.y - m, overwrite_b=True) if meanonly: return fm else: W = scipy.linalg.cho_solve((self.L, True), Kba.T) fv = np.asmatrix(Kbb - np.sum(np.multiply(Kba.T, W), axis=0).T) # W = np.asmatrix(scipy.linalg.solve(self.L, Kba.T, lower=True)) # fv = np.asmatrix(Kbb - np.sum(np.power(W, 2), axis=0).T) return (fm, fv)
def eig_bhatta(X1, X2, kernel, eta, r): # Tested. Verified: # Poly-kernel RKHS representations of all objects are roughly equal to eigenbasis representations (slight differences for S3) # Correctness for X1 ~= X2 # Close results to empirical bhatta in test_suite_1 # Remaining issues: Eigendecomposition of centered kernel matrices # occasionally produces negative-value eigenvalues (n1, d1) = X1.shape (n2, d2) = X2.shape assert d1==d2 n = n1+n2 X = mat.bmat("X1;X2") (K, Kuc, Kc) = kernel_matrix(X, kernel, n1, n2) Kc1 = Kc[0:n1, 0:n1] Kc2 = Kc[n1:n, n1:n] (Lam1, Alpha1) = eigsh(Kc1, r) (Lam2, Alpha2) = eigsh(Kc2, r) Alpha1 = matrix(Alpha1) Alpha2 = matrix(Alpha2) Lam1 = Lam1 / n1 Lam2 = Lam2 / n2 Beta1 = mat.zeros((n,r)) Beta2 = mat.zeros((n,r)) for i in xrange(r): Beta1[0:n1, i] = Alpha1[:,i] / (n1 * Lam1[i])**.5 Beta2[n1:n, i] = Alpha2[:,i] / (n2 * Lam2[i])**.5 #Eta = mat.eye((gamma, gamma)) * eta Beta = mat.bmat('Beta1, Beta2') assert not(any(math.isnan(Beta))) Omega = eig_ortho(Kc, Beta) mu1_w = mat.sum(Kuc[0:n1, :] * Omega, 0) / n1 mu2_w = mat.sum(Kuc[n1:n, :] * Omega, 0) / n2 Eta_w = eta * mat.eye(2*r) S1_w = Omega.T * Kc[:,0:n1] * Kc[0:n1,:] * Omega / n1 S2_w = Omega.T * Kc[:,n1:n] * Kc[n1:n,:] * Omega / n2 S1_w += Eta_w S2_w += Eta_w mu3_w = .5 * (S1_w.I * mu1_w.T + S2_w.I * mu2_w.T).T S3_w = 2 * (S1_w.I + S2_w.I).I d1 = la.det(S1_w) ** -.25 d2 = la.det(S2_w) ** -.25 e1 = exp(-mu1_w * S1_w.I * mu1_w.T / 4) e2 = exp(-mu2_w * S2_w.I * mu2_w.T / 4) d3 = la.det(S3_w) ** .5 e3 = exp(mu3_w * S3_w * mu3_w.T / 2) dterm = d1*d2*d3 eterm = e1*e2*e3 rval = float(dterm*eterm) if math.isnan(rval): rval = -1 return rval
def kmeans(X, k, observer=None, threshold=1e-15, maxiter=300, style="kmeans"): N = len(X) labels = np.zeros(N, dtype=int) centers = X[np.random.choice(len(X), k)] itr = 0 def calc_J(): """ 计算所有点距离和 """ sums = 0 for i in range(N): sums += norm(X[i] - centers[labels[i]]) return sums def distmat(X, Y): """ 计算距离 """ n = len(X) m = len(Y) xx = ml.sum(X * X, axis=1) yy = ml.sum(Y * Y, axis=1) xy = ml.dot(X, Y.T) return np.tile(xx, (m, 1)).T + np.tile(yy, (n, 1)) - 2 * xy Jprev = calc_J() while True: # 绘图 observer(itr, labels, centers) dist = distmat(X, centers) labels = dist.argmin(axis=1) # 再次绘图 observer(itr, labels, centers) # 重新计算聚类中心 if style == "kmeans": for j in range(k): idx_j = (labels == j).nonzero() centers[j] = X[idx_j].mean(axis=0) elif style == "kmedoids": for j in range(k): idx_j = (labels == j).nonzero() distj = distmat(X[idx_j], X[idx_j]) distsum = ml.sum(distj, axis=1) icenter = distsum.argmin() centers[j] = X[idx_j[0][icenter]] J = calc_J() itr += 1 if Jprev - J < threshold: """ 当中心不再变化停止迭代 """ break Jprev = J if itr >= maxiter: break
def eig_bhatta(X1, X2, kernel, eta, r): # Tested. Verified: # Poly-kernel RKHS representations of all objects are roughly equal to eigenbasis representations (slight differences for S3) # Correctness for X1 ~= X2 # Close results to empirical bhatta in test_suite_1 # Remaining issues: Eigendecomposition of centered kernel matrices # occasionally produces negative-value eigenvalues (n1, d1) = X1.shape (n2, d2) = X2.shape assert d1 == d2 n = n1 + n2 X = mat.bmat("X1;X2") (K, Kuc, Kc) = kernel_matrix(X, kernel, n1, n2) Kc1 = Kc[0:n1, 0:n1] Kc2 = Kc[n1:n, n1:n] (Lam1, Alpha1) = eigsh(Kc1, r) (Lam2, Alpha2) = eigsh(Kc2, r) Alpha1 = matrix(Alpha1) Alpha2 = matrix(Alpha2) Lam1 = Lam1 / n1 Lam2 = Lam2 / n2 Beta1 = mat.zeros((n, r)) Beta2 = mat.zeros((n, r)) for i in xrange(r): Beta1[0:n1, i] = Alpha1[:, i] / (n1 * Lam1[i])**.5 Beta2[n1:n, i] = Alpha2[:, i] / (n2 * Lam2[i])**.5 #Eta = mat.eye((gamma, gamma)) * eta Beta = mat.bmat('Beta1, Beta2') assert not (any(math.isnan(Beta))) Omega = eig_ortho(Kc, Beta) mu1_w = mat.sum(Kuc[0:n1, :] * Omega, 0) / n1 mu2_w = mat.sum(Kuc[n1:n, :] * Omega, 0) / n2 Eta_w = eta * mat.eye(2 * r) S1_w = Omega.T * Kc[:, 0:n1] * Kc[0:n1, :] * Omega / n1 S2_w = Omega.T * Kc[:, n1:n] * Kc[n1:n, :] * Omega / n2 S1_w += Eta_w S2_w += Eta_w mu3_w = .5 * (S1_w.I * mu1_w.T + S2_w.I * mu2_w.T).T S3_w = 2 * (S1_w.I + S2_w.I).I d1 = la.det(S1_w)**-.25 d2 = la.det(S2_w)**-.25 e1 = exp(-mu1_w * S1_w.I * mu1_w.T / 4) e2 = exp(-mu2_w * S2_w.I * mu2_w.T / 4) d3 = la.det(S3_w)**.5 e3 = exp(mu3_w * S3_w * mu3_w.T / 2) dterm = d1 * d2 * d3 eterm = e1 * e2 * e3 rval = float(dterm * eterm) if math.isnan(rval): rval = -1 return rval
def SPIRIT(A, lamb, energy, k0=1, holdOffTime=0, reorthog=False, evalMetrics="F"): A = np.mat(A) n = A.shape[1] totalTime = A.shape[0] Proj = npm.ones((totalTime, n)) * np.nan recon = npm.zeros((totalTime, n)) # initialize w_i to unit vectors W = npm.eye(n) d = 0.01 * npm.ones((n, 1)) m = k0 # number of eigencomponents relErrors = npm.zeros((totalTime, 1)) sumYSq = 0.0 E_t = [] sumXSq = 0.0 E_dash_t = [] res = {} k_hist = [] W_hist = [] anomalies = [] # incremental update W lastChangeAt = 0 for t in range(totalTime): k_hist.append(m) # update W for each y_t x = A[t, :].T # new data as column vector for j in range(m): W[:, j], d[j], x = updateW(x, W[:, j], d[j], lamb) Wj = W[:, j] # Grams smit reorthog if reorthog == True: W[:, :m], R = npm.linalg.qr(W[:, :m]) # compute low-D projection, reconstruction and relative error Y = W[:, :m].T * A[t, :].T # project to m-dimensional space xActual = A[t, :].T # actual vector of the current time xProj = W[:, :m] * Y # reconstruction of the current time Proj[t, :m] = Y.T recon[t, :] = xProj.T xOrth = xActual - xProj relErrors[t] = npm.sum(npm.power(xOrth, 2)) / npm.sum(npm.power(xActual, 2)) # update energy sumYSq = lamb * sumYSq + npm.sum(npm.power(Y, 2)) E_dash_t.append(sumYSq) sumXSq = lamb * sumXSq + npm.sum(npm.power(A[t, :], 2)) E_t.append(sumXSq) # Record RSRE if t == 0: top = 0.0 bot = 0.0 top = top + npm.power(npm.linalg.norm(xActual - xProj), 2) bot = bot + npm.power(npm.linalg.norm(xActual), 2) new_RSRE = top / bot if t == 0: RSRE = new_RSRE else: RSRE = npm.vstack((RSRE, new_RSRE)) ### Metric EVALUATION ### # deviation from truth if evalMetrics == "T": Qt = W[:, :m] if t == 0: res["subspace_error"] = npm.zeros((totalTime, 1)) res["orthog_error"] = npm.zeros((totalTime, 1)) res["angle_error"] = npm.zeros((totalTime, 1)) Cov_mat = npm.zeros([n, n]) # Calculate Covarentce Matrix of data up to time t Cov_mat = lamb * Cov_mat + npm.dot(xActual, xActual.T) # Get eigenvalues and eigenvectors WW, V = npm.linalg.eig(Cov_mat) # Use this to sort eigenVectors in according to deccending eigenvalue eig_idx = WW.argsort() # Get sort index eig_idx = eig_idx[::-1] # Reverse order (default is accending) # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted). V_k = V[:, eig_idx[:m]] # Calculate subspace error C = npm.dot(V_k, V_k.T) - npm.dot(Qt, Qt.T) res["subspace_error"][t, 0] = 10 * np.log10(npm.trace(npm.dot(C.T, C))) # frobenius norm in dB # Calculate angle between projection matrixes D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k) eigVal, eigVec = npm.linalg.eig(D) angle = npm.arccos(np.sqrt(max(eigVal))) res["angle_error"][t, 0] = angle # Calculate deviation from orthonormality F = npm.dot(Qt.T, Qt) - npm.eye(m) res["orthog_error"][t, 0] = 10 * np.log10(npm.trace(npm.dot(F.T, F))) # frobenius norm in dB # Energy thresholding ###################### # check the lower bound of energy level if sumYSq < energy[0] * sumXSq and lastChangeAt < t - holdOffTime and m < n: lastChangeAt = t m = m + 1 anomalies.append(t) # print 'Increasing m to %d at time %d (ratio %6.2f)\n' % (m, t, 100 * sumYSq/sumXSq) # check the upper bound of energy level elif sumYSq > energy[1] * sumXSq and lastChangeAt < t - holdOffTime and m < n and m > 1: lastChangeAt = t m = m - 1 # print 'Decreasing m to %d at time %d (ratio %6.2f)\n' % (m, t, 100 * sumYSq/sumXSq) W_hist.append(W[:, :m]) # set outputs # Grams smit reorthog if reorthog == True: W[:, :m], R = npm.linalg.qr(W[:, :m]) # Data Stores res2 = { "hidden": Proj, # Array for hidden Variables "E_t": np.array(E_t), # total energy of data "E_dash_t": np.array(E_dash_t), # hidden var energy "e_ratio": np.array(E_dash_t) / np.array(E_t), # Energy ratio "rel_orth_err": relErrors, # orthoX error "RSRE": RSRE, # Relative squared Reconstruction error "recon": recon, # reconstructed data "r_hist": k_hist, # history of r values "W_hist": W_hist, # history of Weights "anomalies": anomalies, } res.update(res2) return res
def SPIRIT(streams, energyThresh, lamb, evalMetrics): # Make if type(streams) == np.ndarray: streams_iter = iter(streams) # Max No. Streams if streams.ndim == 1: streams = np.expand_dims(streams, axis=1) num_streams = streams.shape[1] else: num_streams = streams.shape[1] count_over = 0 count_under = 0 #=============================================================================== # Initalise k, w and d, lamb #=============================================================================== k = 1 # Hidden Variables, initialise to one # Weights pc_weights = npm.zeros(num_streams) pc_weights[0, 0] = 1 # initialise outputs res = {} all_weights = [] k_hist = [] anomalies = [] x_dash = npm.zeros((1,num_streams)) Eng = mat([0.00000001, 0.00000001]) E_xt = 0 # Energy of X at time t E_rec_i = mat([0.000000000000001]) # Energy of reconstruction Y = npm.zeros(num_streams) timeSteps = streams.shape[0] #=============================================================================== # Main Loop #=============================================================================== for t in range(1, timeSteps + 1): # t = 1,...,200 k_hist.append(k) x_t_plus_1 = mat(streams_iter.next()) # Read in next signals d_i = E_rec_i * t # Step 1 - Update Weights pc_weights, y_t_i, error = track_W(x_t_plus_1, k, pc_weights, d_i, num_streams, lamb) # Record hidden variables padding = num_streams - k y_bar_t = npm.hstack((y_t_i, mat([nan] * padding))) Y = npm.vstack((Y,y_bar_t)) # Record Weights all_weights.append(pc_weights) # Record reconstrunted z and RSRE x_dash = npm.vstack((x_dash, y_t_i * pc_weights)) # Record RSRE if t == 1: top = 0.0 bot = 0.0 top = top + (norm(x_t_plus_1 - x_dash) ** 2 ) bot = bot + (norm(x_t_plus_1) ** 2) new_RSRE = top / bot if t == 1: RSRE = new_RSRE else: RSRE = npm.vstack((RSRE, new_RSRE)) ### FOR EVALUATION ### #deviation from truth if evalMetrics == 'T' : Qt = pc_weights.T if t == 1 : res['subspace_error'] = npm.zeros((timeSteps,1)) res['orthog_error'] = npm.zeros((timeSteps,1)) res['angle_error'] = npm.zeros((timeSteps,1)) Cov_mat = npm.zeros([num_streams,num_streams]) # Calculate Covarentce Matrix of data up to time t Cov_mat = lamb * Cov_mat + npm.dot(x_t_plus_1, x_t_plus_1.T) # Get eigenvalues and eigenvectors W , V = eig(Cov_mat) # Use this to sort eigenVectors in according to deccending eigenvalue eig_idx = W.argsort() # Get sort index eig_idx = eig_idx[::-1] # Reverse order (default is accending) # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted). V_k = V[:, eig_idx[:k]] # Calculate subspace error C = npm.dot(V_k , V_k.T) - npm.dot(Qt , Qt.T) res['subspace_error'][t-1,0] = 10 * np.log10(npm.trace(npm.dot(C.T , C))) #frobenius norm in dB # Calculate angle between projection matrixes D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k) eigVal, eigVec = eig(D) angle = npm.arccos(np.sqrt(max(eigVal))) res['angle_error'][t-1,0] = angle # Calculate deviation from orthonormality F = npm.dot(Qt.T , Qt) - npm.eye(k) res['orthog_error'][t-1,0] = 10 * np.log10(npm.trace(npm.dot(F.T , F))) #frobenius norm in dB # Step 2 - Update Energy estimate E_xt = ((lamb * (t-1) * E_xt) + norm(x_t_plus_1) ** 2) / t for i in range(k): E_rec_i[0, i] = ((lamb * (t-1) * E_rec_i[0, i]) + (y_t_i[0, i] ** 2)) / t # Step 3 - Estimate the retained energy E_retained = npm.sum(E_rec_i,1) # Record Energy Eng_new = npm.hstack((E_xt, E_retained[0,0])) Eng = npm.vstack((Eng, Eng_new)) if E_retained < energyThresh[0] * E_xt: if k != num_streams: k = k + 1 # Initalise Ek+1 <-- 0 E_rec_i = npm.hstack((E_rec_i, mat([0]))) # Initialise W_i+1 new_weight_vec = npm.zeros(num_streams) new_weight_vec[0, k-1] = 1 pc_weights = npm.vstack((pc_weights, new_weight_vec)) anomalies.append(t -1) else: count_over += 1 elif E_retained > energyThresh[1] * E_xt: if k > 1 : k = k - 1 # discard w_k and error pc_weights = delete(pc_weights, -1, 0) # Discard E_rec_i[k] E_rec_i = delete(E_rec_i, -1) else: count_under += 1 # Data Stores res2 = {'hidden' : Y, # Array for hidden Variables 'weights' : all_weights, 'E_t' : Eng[:,0], # total energy of data 'E_dash_t' : Eng[:,1], # hidden var energy 'e_ratio' : np.divide(Eng[:,1], Eng[:,0]), # Energy ratio 'RSRE' : RSRE, # Relative squared Reconstruction error 'recon' : x_dash, # reconstructed data 'r_hist' : k_hist, # history of r values 'anomalies' : anomalies} res.update(res2) return res, all_weights
def SPIRIT(A, lamb, energy, k0=1, holdOffTime=0, reorthog=False, evalMetrics='F'): A = np.mat(A) n = A.shape[1] totalTime = A.shape[0] Proj = npm.ones((totalTime, n)) * np.nan recon = npm.zeros((totalTime, n)) # initialize w_i to unit vectors W = npm.eye(n) d = 0.01 * npm.ones((n, 1)) m = k0 # number of eigencomponents relErrors = npm.zeros((totalTime, 1)) sumYSq = 0. E_t = [] sumXSq = 0. E_dash_t = [] res = {} k_hist = [] W_hist = [] anomalies = [] # incremental update W lastChangeAt = 0 for t in range(totalTime): k_hist.append(m) # update W for each y_t x = A[t, :].T # new data as column vector for j in range(m): W[:, j], d[j], x = updateW(x, W[:, j], d[j], lamb) Wj = W[:, j] # Grams smit reorthog if reorthog == True: W[:, :m], R = npm.linalg.qr(W[:, :m]) # compute low-D projection, reconstruction and relative error Y = W[:, :m].T * A[t, :].T # project to m-dimensional space xActual = A[t, :].T # actual vector of the current time xProj = W[:, :m] * Y # reconstruction of the current time Proj[t, :m] = Y.T recon[t, :] = xProj.T xOrth = xActual - xProj relErrors[t] = npm.sum(npm.power(xOrth, 2)) / npm.sum( npm.power(xActual, 2)) # update energy sumYSq = lamb * sumYSq + npm.sum(npm.power(Y, 2)) E_dash_t.append(sumYSq) sumXSq = lamb * sumXSq + npm.sum(npm.power(A[t, :], 2)) E_t.append(sumXSq) # Record RSRE if t == 0: top = 0.0 bot = 0.0 top = top + npm.power(npm.linalg.norm(xActual - xProj), 2) bot = bot + npm.power(npm.linalg.norm(xActual), 2) new_RSRE = top / bot if t == 0: RSRE = new_RSRE else: RSRE = npm.vstack((RSRE, new_RSRE)) ### Metric EVALUATION ### #deviation from truth if evalMetrics == 'T': Qt = W[:, :m] if t == 0: res['subspace_error'] = npm.zeros((totalTime, 1)) res['orthog_error'] = npm.zeros((totalTime, 1)) res['angle_error'] = npm.zeros((totalTime, 1)) Cov_mat = npm.zeros([n, n]) # Calculate Covarentce Matrix of data up to time t Cov_mat = lamb * Cov_mat + npm.dot(xActual, xActual.T) # Get eigenvalues and eigenvectors WW, V = npm.linalg.eig(Cov_mat) # Use this to sort eigenVectors in according to deccending eigenvalue eig_idx = WW.argsort() # Get sort index eig_idx = eig_idx[::-1] # Reverse order (default is accending) # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted). V_k = V[:, eig_idx[:m]] # Calculate subspace error C = npm.dot(V_k, V_k.T) - npm.dot(Qt, Qt.T) res['subspace_error'][t, 0] = 10 * np.log10( npm.trace(npm.dot(C.T, C))) #frobenius norm in dB # Calculate angle between projection matrixes D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k) eigVal, eigVec = npm.linalg.eig(D) angle = npm.arccos(np.sqrt(max(eigVal))) res['angle_error'][t, 0] = angle # Calculate deviation from orthonormality F = npm.dot(Qt.T, Qt) - npm.eye(m) res['orthog_error'][t, 0] = 10 * np.log10( npm.trace(npm.dot(F.T, F))) #frobenius norm in dB # Energy thresholding ###################### # check the lower bound of energy level if sumYSq < energy[ 0] * sumXSq and lastChangeAt < t - holdOffTime and m < n: lastChangeAt = t m = m + 1 anomalies.append(t) # print 'Increasing m to %d at time %d (ratio %6.2f)\n' % (m, t, 100 * sumYSq/sumXSq) # check the upper bound of energy level elif sumYSq > energy[ 1] * sumXSq and lastChangeAt < t - holdOffTime and m < n and m > 1: lastChangeAt = t m = m - 1 # print 'Decreasing m to %d at time %d (ratio %6.2f)\n' % (m, t, 100 * sumYSq/sumXSq) W_hist.append(W[:, :m]) # set outputs # Grams smit reorthog if reorthog == True: W[:, :m], R = npm.linalg.qr(W[:, :m]) # Data Stores res2 = { 'hidden': Proj, # Array for hidden Variables 'E_t': np.array(E_t), # total energy of data 'E_dash_t': np.array(E_dash_t), # hidden var energy 'e_ratio': np.array(E_dash_t) / np.array(E_t), # Energy ratio 'rel_orth_err': relErrors, # orthoX error 'RSRE': RSRE, # Relative squared Reconstruction error 'recon': recon, # reconstructed data 'r_hist': k_hist, # history of r values 'W_hist': W_hist, # history of Weights 'anomalies': anomalies } res.update(res2) return res
def MMAPPH1FCFS(D, sigma, S, *argv): """ Returns various performane measures of a MMAP[K]/PH[K]/1 first-come-first-serve queue, see [1]_. Parameters ---------- D : list of matrices of shape (N,N), length (K+1) The D0...DK matrices of the arrival process. sigma : list of row vectors, length (K) The list containing the initial probability vectors of the service time distributions of the various customer types. The length of the vectors does not have to be the same. S : list of square matrices, length (K) The transient generators of the phase type distributions representing the service time of the jobs belonging to various types. further parameters : The rest of the function parameters specify the options and the performance measures to be computed. The supported performance measures and options in this function are: +----------------+--------------------+----------------------------------------+ | Parameter name | Input parameters | Output | +================+====================+========================================+ | "ncMoms" | Number of moments | The moments of the number of customers | +----------------+--------------------+----------------------------------------+ | "ncDistr" | Upper limit K | The distribution of the number of | | | | customers from level 0 to level K-1 | +----------------+--------------------+----------------------------------------+ | "stMoms" | Number of moments | The sojourn time moments | +----------------+--------------------+----------------------------------------+ | "stDistr" | A vector of points | The sojourn time distribution at the | | | | requested points (cummulative, cdf) | +----------------+--------------------+----------------------------------------+ | "stDistrME" | None | The vector-matrix parameters of the | | | | matrix-exponentially distributed | | | | sojourn time distribution | +----------------+--------------------+----------------------------------------+ | "stDistrPH" | None | The vector-matrix parameters of the | | | | matrix-exponentially distributed | | | | sojourn time distribution, converted | | | | to a continuous PH representation | +----------------+--------------------+----------------------------------------+ | "prec" | The precision | Numerical precision used as a stopping | | | | condition when solving the Riccati | | | | equation | +----------------+--------------------+----------------------------------------+ | "classes" | Vector of integers | Only the performance measures | | | | belonging to these classes are | | | | returned. If not given, all classes | | | | are analyzed. | +----------------+--------------------+----------------------------------------+ (The quantities related to the number of customers in the system include the customer in the server, and the sojourn time related quantities include the service times as well) Returns ------- Ret : list of the performance measures Each entry of the list corresponds to a performance measure requested. Each entry is a matrix, where the columns belong to the various job types. If there is just a single item, then it is not put into a list. References ---------- .. [1] Qiming He, "Analysis of a continuous time SM[K]/PH[K]/1/FCFS queue: Age process, sojourn times, and queue lengths", Journal of Systems Science and Complexity, 25(1), pp 133-155, 2012. """ K = len(D) - 1 # parse options eaten = [] precision = 1e-14 classes = np.arange(0, K) for i in range(len(argv)): if argv[i] == "prec": precision = argv[i + 1] eaten.append(i) eaten.append(i + 1) elif argv[i] == "classes": classes = np.array(argv[i + 1]) - 1 eaten.append(i) eaten.append(i + 1) if butools.checkInput and not CheckMMAPRepresentation(D): raise Exception( 'MMAPPH1FCFS: The arrival process is not a valid MMAP representation!' ) if butools.checkInput: for k in range(K): if not CheckPHRepresentation(sigma[k], S[k]): raise Exception( 'MMAPPH1FCFS: the vector and matrix describing the service times is not a valid PH representation!' ) # some preparation D0 = D[0] N = D0.shape[0] Ia = ml.eye(N) Da = ml.zeros((N, N)) for q in range(K): Da += D[q + 1] theta = CTMCSolve(D0 + Da) beta = [CTMCSolve(S[k] + ml.sum(-S[k], 1) * sigma[k]) for k in range(K)] lambd = [np.sum(theta * D[k + 1]) for k in range(K)] mu = [np.sum(beta[k] * (-S[k])) for k in range(K)] Nsk = [S[k].shape[0] for k in range(K)] ro = np.sum(np.array(lambd) / np.array(mu)) alpha = theta * Da / sum(lambd) D0i = (-D0).I Sa = S[0] sa = [ml.zeros(sigma[0].shape)] * K sa[0] = sigma[0] ba = [ml.zeros(beta[0].shape)] * K ba[0] = beta[0] sv = [ml.zeros((Nsk[0], 1))] * K sv[0] = ml.sum(-S[0], 1) Pk = [D0i * D[q + 1] for q in range(K)] for k in range(1, K): Sa = la.block_diag(Sa, S[k]) for q in range(K): if q == k: sa[q] = ml.hstack((sa[q], sigma[k])) ba[q] = ml.hstack((ba[q], beta[k])) sv[q] = ml.vstack((sv[q], -np.sum(S[k], 1))) else: sa[q] = ml.hstack((sa[q], ml.zeros(sigma[k].shape))) ba[q] = ml.hstack((ba[q], ml.zeros(beta[k].shape))) sv[q] = ml.vstack((sv[q], ml.zeros((Nsk[k], 1)))) Sa = ml.matrix(Sa) P = D0i * Da iVec = ml.kron(D[1], sa[0]) for k in range(1, K): iVec += ml.kron(D[k + 1], sa[k]) Ns = Sa.shape[0] Is = ml.eye(Ns) # step 1. solve the age process of the queue # ========================================== # solve Y0 and calculate T Y0 = FluidFundamentalMatrices(ml.kron(Ia, Sa), ml.kron(Ia, -ml.sum(Sa, 1)), iVec, D0, "P", precision) T = ml.kron(Ia, Sa) + Y0 * iVec # calculate pi0 and v0 pi0 = ml.zeros((1, T.shape[0])) for k in range(K): pi0 += ml.kron(theta * D[k + 1], ba[k] / mu[k]) pi0 = -pi0 * T iT = (-T).I oa = ml.ones((N, 1)) # step 2. calculate performance measures # ====================================== Ret = [] for k in classes: argIx = 0 clo = iT * ml.kron(oa, sv[k]) while argIx < len(argv): if argIx in eaten: argIx += 1 continue elif type(argv[argIx]) is str and argv[argIx] == "stMoms": numOfSTMoms = argv[argIx + 1] rtMoms = [] for m in range(1, numOfSTMoms + 1): rtMoms.append( math.factorial(m) * np.sum(pi0 * iT**m * clo / (pi0 * clo))) Ret.append(rtMoms) argIx += 1 elif type(argv[argIx]) is str and argv[argIx] == "stDistr": stCdfPoints = argv[argIx + 1] cdf = [] for t in stCdfPoints: pr = 1 - np.sum(pi0 * la.expm(T * t) * clo / (pi0 * clo)) cdf.append(pr) Ret.append(np.array(cdf)) argIx += 1 elif type(argv[argIx]) is str and argv[argIx] == "stDistrME": Bm = SimilarityMatrixForVectors(clo / (pi0 * clo), ml.ones((N * Ns, 1))) Bmi = Bm.I A = Bm * T * Bmi alpha = pi0 * Bmi Ret.append(alpha) Ret.append(A) elif type(argv[argIx]) is str and argv[argIx] == "stDistrPH": vv = pi0 * iT ix = np.arange(N * Ns) nz = ix[vv.flat > precision] delta = Diag(vv[:, nz]) cl = -T * clo / (pi0 * clo) alpha = cl[nz, :].T * delta A = delta.I * T[nz, :][:, nz].T * delta Ret.append(alpha) Ret.append(A) elif type(argv[argIx]) is str and argv[argIx] == "ncDistr": numOfQLProbs = argv[argIx + 1] argIx += 1 values = np.empty(numOfQLProbs) jm = ml.zeros((Ns, 1)) jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k + 1]), :] = 1 jmc = ml.ones((Ns, 1)) jmc[np.sum(Nsk[0:k]):np.sum(Nsk[0:k + 1]), :] = 0 LmCurr = la.solve_sylvester(T, ml.kron(D0 + Da - D[k + 1], Is), -ml.eye(N * Ns)) values[0] = 1 - ro + np.sum(pi0 * LmCurr * ml.kron(oa, jmc)) for i in range(1, numOfQLProbs): LmPrev = LmCurr LmCurr = la.solve_sylvester( T, ml.kron(D0 + Da - D[k + 1], Is), -LmPrev * ml.kron(D[k + 1], Is)) values[i] = np.sum(pi0 * LmCurr * ml.kron(oa, jmc) + pi0 * LmPrev * ml.kron(oa, jm)) Ret.append(values) elif type(argv[argIx]) is str and argv[argIx] == "ncMoms": numOfQLMoms = argv[argIx + 1] argIx += 1 jm = ml.zeros((Ns, 1)) jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k + 1]), :] = 1 ELn = [ la.solve_sylvester(T, ml.kron(D0 + Da, Is), -ml.eye(N * Ns)) ] qlMoms = [] for n in range(1, numOfQLMoms + 1): bino = 1 Btag = ml.zeros((N * Ns, N * Ns)) for i in range(n): Btag += bino * ELn[i] bino *= (n - i) / (i + 1) ELn.append( la.solve_sylvester(T, ml.kron(D0 + Da, Is), -Btag * ml.kron(D[k + 1], Is))) qlMoms.append( np.sum(pi0 * ELn[n]) + np.sum(pi0 * Btag * ml.kron(oa, jm))) Ret.append(qlMoms) else: raise Exception("MMAPPH1FCFS: Unknown parameter " + str(argv[argIx])) argIx += 1 if len(Ret) == 1: return Ret[0] else: return Ret
def SPIRIT(streams, energyThresh, lamb, evalMetrics): # Make if type(streams) == np.ndarray: streams_iter = iter(streams) # Max No. Streams if streams.ndim == 1: streams = np.expand_dims(streams, axis=1) num_streams = streams.shape[1] else: num_streams = streams.shape[1] count_over = 0 count_under = 0 #=============================================================================== # Initalise k, w and d, lamb #=============================================================================== k = 1 # Hidden Variables, initialise to one # Weights pc_weights = npm.zeros(num_streams) pc_weights[0, 0] = 1 # initialise outputs res = {} all_weights = [] k_hist = [] anomalies = [] x_dash = npm.zeros((1, num_streams)) Eng = mat([0.00000001, 0.00000001]) E_xt = 0 # Energy of X at time t E_rec_i = mat([0.000000000000001]) # Energy of reconstruction Y = npm.zeros(num_streams) timeSteps = streams.shape[0] #=============================================================================== # Main Loop #=============================================================================== for t in range(1, timeSteps + 1): # t = 1,...,200 k_hist.append(k) x_t_plus_1 = mat(streams_iter.next()) # Read in next signals d_i = E_rec_i * t # Step 1 - Update Weights pc_weights, y_t_i, error = track_W(x_t_plus_1, k, pc_weights, d_i, num_streams, lamb) # Record hidden variables padding = num_streams - k y_bar_t = npm.hstack((y_t_i, mat([nan] * padding))) Y = npm.vstack((Y, y_bar_t)) # Record Weights all_weights.append(pc_weights) # Record reconstrunted z and RSRE x_dash = npm.vstack((x_dash, y_t_i * pc_weights)) # Record RSRE if t == 1: top = 0.0 bot = 0.0 top = top + (norm(x_t_plus_1 - x_dash)**2) bot = bot + (norm(x_t_plus_1)**2) new_RSRE = top / bot if t == 1: RSRE = new_RSRE else: RSRE = npm.vstack((RSRE, new_RSRE)) ### FOR EVALUATION ### #deviation from truth if evalMetrics == 'T': Qt = pc_weights.T if t == 1: res['subspace_error'] = npm.zeros((timeSteps, 1)) res['orthog_error'] = npm.zeros((timeSteps, 1)) res['angle_error'] = npm.zeros((timeSteps, 1)) Cov_mat = npm.zeros([num_streams, num_streams]) # Calculate Covarentce Matrix of data up to time t Cov_mat = lamb * Cov_mat + npm.dot(x_t_plus_1, x_t_plus_1.T) # Get eigenvalues and eigenvectors W, V = eig(Cov_mat) # Use this to sort eigenVectors in according to deccending eigenvalue eig_idx = W.argsort() # Get sort index eig_idx = eig_idx[::-1] # Reverse order (default is accending) # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted). V_k = V[:, eig_idx[:k]] # Calculate subspace error C = npm.dot(V_k, V_k.T) - npm.dot(Qt, Qt.T) res['subspace_error'][t - 1, 0] = 10 * np.log10( npm.trace(npm.dot(C.T, C))) #frobenius norm in dB # Calculate angle between projection matrixes D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k) eigVal, eigVec = eig(D) angle = npm.arccos(np.sqrt(max(eigVal))) res['angle_error'][t - 1, 0] = angle # Calculate deviation from orthonormality F = npm.dot(Qt.T, Qt) - npm.eye(k) res['orthog_error'][t - 1, 0] = 10 * np.log10( npm.trace(npm.dot(F.T, F))) #frobenius norm in dB # Step 2 - Update Energy estimate E_xt = ((lamb * (t - 1) * E_xt) + norm(x_t_plus_1)**2) / t for i in range(k): E_rec_i[0, i] = ((lamb * (t - 1) * E_rec_i[0, i]) + (y_t_i[0, i]**2)) / t # Step 3 - Estimate the retained energy E_retained = npm.sum(E_rec_i, 1) # Record Energy Eng_new = npm.hstack((E_xt, E_retained[0, 0])) Eng = npm.vstack((Eng, Eng_new)) if E_retained < energyThresh[0] * E_xt: if k != num_streams: k = k + 1 # Initalise Ek+1 <-- 0 E_rec_i = npm.hstack((E_rec_i, mat([0]))) # Initialise W_i+1 new_weight_vec = npm.zeros(num_streams) new_weight_vec[0, k - 1] = 1 pc_weights = npm.vstack((pc_weights, new_weight_vec)) anomalies.append(t - 1) else: count_over += 1 elif E_retained > energyThresh[1] * E_xt: if k > 1: k = k - 1 # discard w_k and error pc_weights = delete(pc_weights, -1, 0) # Discard E_rec_i[k] E_rec_i = delete(E_rec_i, -1) else: count_under += 1 # Data Stores res2 = { 'hidden': Y, # Array for hidden Variables 'weights': all_weights, 'E_t': Eng[:, 0], # total energy of data 'E_dash_t': Eng[:, 1], # hidden var energy 'e_ratio': np.divide(Eng[:, 1], Eng[:, 0]), # Energy ratio 'RSRE': RSRE, # Relative squared Reconstruction error 'recon': x_dash, # reconstructed data 'r_hist': k_hist, # history of r values 'anomalies': anomalies } res.update(res2) return res, all_weights
def MMAPPH1FCFS(D, sigma, S, *argv): """ Returns various performane measures of a MMAP[K]/PH[K]/1 first-come-first-serve queue, see [1]_. Parameters ---------- D : list of matrices of shape (N,N), length (K+1) The D0...DK matrices of the arrival process. sigma : list of row vectors, length (K) The list containing the initial probability vectors of the service time distributions of the various customer types. The length of the vectors does not have to be the same. S : list of square matrices, length (K) The transient generators of the phase type distributions representing the service time of the jobs belonging to various types. further parameters : The rest of the function parameters specify the options and the performance measures to be computed. The supported performance measures and options in this function are: +----------------+--------------------+----------------------------------------+ | Parameter name | Input parameters | Output | +================+====================+========================================+ | "ncMoms" | Number of moments | The moments of the number of customers | +----------------+--------------------+----------------------------------------+ | "ncDistr" | Upper limit K | The distribution of the number of | | | | customers from level 0 to level K-1 | +----------------+--------------------+----------------------------------------+ | "stMoms" | Number of moments | The sojourn time moments | +----------------+--------------------+----------------------------------------+ | "stDistr" | A vector of points | The sojourn time distribution at the | | | | requested points (cummulative, cdf) | +----------------+--------------------+----------------------------------------+ | "stDistrME" | None | The vector-matrix parameters of the | | | | matrix-exponentially distributed | | | | sojourn time distribution | +----------------+--------------------+----------------------------------------+ | "stDistrPH" | None | The vector-matrix parameters of the | | | | matrix-exponentially distributed | | | | sojourn time distribution, converted | | | | to a continuous PH representation | +----------------+--------------------+----------------------------------------+ | "prec" | The precision | Numerical precision used as a stopping | | | | condition when solving the Riccati | | | | equation | +----------------+--------------------+----------------------------------------+ | "classes" | Vector of integers | Only the performance measures | | | | belonging to these classes are | | | | returned. If not given, all classes | | | | are analyzed. | +----------------+--------------------+----------------------------------------+ (The quantities related to the number of customers in the system include the customer in the server, and the sojourn time related quantities include the service times as well) Returns ------- Ret : list of the performance measures Each entry of the list corresponds to a performance measure requested. Each entry is a matrix, where the columns belong to the various job types. If there is just a single item, then it is not put into a list. References ---------- .. [1] Qiming He, "Analysis of a continuous time SM[K]/PH[K]/1/FCFS queue: Age process, sojourn times, and queue lengths", Journal of Systems Science and Complexity, 25(1), pp 133-155, 2012. """ K = len(D)-1 # parse options eaten = [] precision = 1e-14; classes = np.arange(0,K) for i in range(len(argv)): if argv[i]=="prec": precision = argv[i+1] eaten.append(i) eaten.append(i+1) elif argv[i]=="classes": classes = np.array(argv[i+1])-1 eaten.append(i) eaten.append(i+1) if butools.checkInput and not CheckMMAPRepresentation(D): raise Exception('MMAPPH1FCFS: The arrival process is not a valid MMAP representation!') if butools.checkInput: for k in range(K): if not CheckPHRepresentation(sigma[k],S[k]): raise Exception('MMAPPH1FCFS: the vector and matrix describing the service times is not a valid PH representation!') # some preparation D0 = D[0] N = D0.shape[0] Ia = ml.eye(N); Da = ml.zeros((N,N)) for q in range(K): Da += D[q+1] theta = CTMCSolve(D0+Da) beta = [CTMCSolve(S[k]+ml.sum(-S[k],1)*sigma[k]) for k in range(K)] lambd = [np.sum(theta*D[k+1]) for k in range(K)] mu = [np.sum(beta[k]*(-S[k])) for k in range(K)] Nsk = [S[k].shape[0] for k in range(K)] ro = np.sum(np.array(lambd)/np.array(mu)) alpha = theta*Da/sum(lambd) D0i = (-D0).I Sa = S[0]; sa = [ml.zeros(sigma[0].shape)]*K sa[0] = sigma[0] ba = [ml.zeros(beta[0].shape)]*K ba[0] = beta[0] sv = [ml.zeros((Nsk[0],1))]*K sv[0] = ml.sum(-S[0],1) Pk = [D0i*D[q+1] for q in range(K)] for k in range(1,K): Sa = la.block_diag(Sa, S[k]) for q in range(K): if q==k: sa[q] = ml.hstack((sa[q], sigma[k])) ba[q] = ml.hstack((ba[q], beta[k])) sv[q] = ml.vstack((sv[q], -np.sum(S[k],1))) else: sa[q] = ml.hstack((sa[q], ml.zeros(sigma[k].shape))) ba[q] = ml.hstack((ba[q], ml.zeros(beta[k].shape))) sv[q] = ml.vstack((sv[q], ml.zeros((Nsk[k],1)))) Sa = ml.matrix(Sa) P = D0i*Da iVec = ml.kron(D[1],sa[0]) for k in range(1,K): iVec += ml.kron(D[k+1],sa[k]) Ns = Sa.shape[0] Is = ml.eye(Ns) # step 1. solve the age process of the queue # ========================================== # solve Y0 and calculate T Y0 = FluidFundamentalMatrices (ml.kron(Ia,Sa), ml.kron(Ia,-ml.sum(Sa,1)), iVec, D0, "P", precision) T = ml.kron(Ia,Sa) + Y0 * iVec # calculate pi0 and v0 pi0 = ml.zeros((1,T.shape[0])) for k in range(K): pi0 += ml.kron(theta*D[k+1],ba[k]/mu[k]) pi0 = - pi0 * T iT = (-T).I oa = ml.ones((N,1)) # step 2. calculate performance measures # ====================================== Ret = [] for k in classes: argIx = 0 clo = iT*ml.kron(oa,sv[k]) while argIx<len(argv): if argIx in eaten: argIx += 1 continue elif type(argv[argIx]) is str and argv[argIx]=="stMoms": numOfSTMoms = argv[argIx+1] rtMoms = [] for m in range(1,numOfSTMoms+1): rtMoms.append(math.factorial(m) * np.sum(pi0 * iT**m * clo / (pi0*clo))) Ret.append(rtMoms) argIx += 1 elif type(argv[argIx]) is str and argv[argIx]=="stDistr": stCdfPoints = argv[argIx+1] cdf = []; for t in stCdfPoints: pr = 1 - np.sum(pi0 * la.expm(T*t) * clo / (pi0*clo)) cdf.append(pr) Ret.append(np.array(cdf)) argIx += 1 elif type(argv[argIx]) is str and argv[argIx]=="stDistrME": Bm = SimilarityMatrixForVectors(clo/(pi0*clo),ml.ones((N*Ns,1))) Bmi = Bm.I A = Bm * T * Bmi alpha = pi0 * Bmi Ret.append(alpha) Ret.append(A) elif type(argv[argIx]) is str and argv[argIx]=="stDistrPH": vv = pi0*iT ix = np.arange(N*Ns) nz = ix[vv.flat>precision] delta = Diag(vv[:,nz]) cl = -T*clo/(pi0*clo) alpha = cl[nz,:].T*delta A = delta.I*T[nz,:][:,nz].T*delta Ret.append(alpha) Ret.append(A) elif type(argv[argIx]) is str and argv[argIx]=="ncDistr": numOfQLProbs = argv[argIx+1] argIx += 1 values = np.empty(numOfQLProbs) jm = ml.zeros((Ns,1)) jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 1 jmc = ml.ones((Ns,1)) jmc[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 0 LmCurr = la.solve_sylvester(T, ml.kron(D0+Da-D[k+1],Is), -ml.eye(N*Ns)) values[0] = 1-ro+np.sum(pi0*LmCurr*ml.kron(oa,jmc)) for i in range(1,numOfQLProbs): LmPrev = LmCurr LmCurr = la.solve_sylvester(T, ml.kron(D0+Da-D[k+1],Is), -LmPrev*ml.kron(D[k+1],Is)) values[i] = np.sum(pi0*LmCurr*ml.kron(oa,jmc) + pi0*LmPrev*ml.kron(oa,jm)); Ret.append(values) elif type(argv[argIx]) is str and argv[argIx]=="ncMoms": numOfQLMoms = argv[argIx+1] argIx += 1 jm = ml.zeros((Ns,1)) jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 1 ELn = [la.solve_sylvester(T, ml.kron(D0+Da,Is), -ml.eye(N*Ns))] qlMoms = [] for n in range(1,numOfQLMoms+1): bino = 1 Btag = ml.zeros((N*Ns,N*Ns)) for i in range(n): Btag += bino * ELn[i] bino *= (n-i) / (i+1) ELn.append(la.solve_sylvester(T, ml.kron(D0+Da,Is), -Btag*ml.kron(D[k+1],Is))) qlMoms.append(np.sum(pi0*ELn[n]) + np.sum(pi0*Btag*ml.kron(oa,jm))) Ret.append(qlMoms) else: raise Exception("MMAPPH1FCFS: Unknown parameter "+str(argv[argIx])) argIx += 1 if len(Ret)==1: return Ret[0] else: return Ret