def initialize(self, M=None): """ Initialize the tensor decomposition """ if M == None: AU = tensorTools.randomInit(self.X.shape, 1) F = tensorTools.randomInit(self.X.shape, self.R) self.M[REG_LOCATION] = ktensor.ktensor(np.ones(self.R), F) self.M[AUG_LOCATION] = ktensor.ktensor(np.ones(1), AU) else: ## do a quick sanity check if len(M) != 2: raise ValueError("Initialization needs to be of size 2") if M[0].__class__ != ktensor.ktensor and M[1].__class__ != ktensor.ktensor: raise ValueError("Not ktensor type") self.M = M
def cp_apr(X, R, Minit=None, tol=1e-4, maxiters=1000, maxinner=10, epsilon=1e-10, kappatol=1e-10, kappa=1e-2): """ Compute nonnegative CP with alternative Poisson regression. Code is the python implementation of cp_apr in the MATLAB Tensor Toolbox Parameters ---------- X : input tensor of the class tensor or sptensor R : the rank of the CP Minit : the initial guess (in the form of a ktensor), if None random guess tol : tolerance on the inner KKT violation maxiters : maximum number of iterations maxinner : maximum number of inner iterations epsilon : parameter to avoid dividing by zero kappatol : tolerance on complementary slackness kappa : offset to fix complementary slackness Returns ------- M : the CP model as a ktensor cpStats: the statistics for each inner iteration modelStats: a dictionary item with the final statistics for this tensor factorization """ N = X.ndims() ## Random initialization if Minit == None: F = tensorTools.randomInit(X.shape, R) Minit = ktensor.ktensor(np.ones(R), F); nInnerIters = np.zeros(maxiters); ## Initialize M and Phi for iterations M = Minit M.normalize(1) Phi = [[] for i in range(N)] kktModeViolations = np.zeros(N) kktViolations = -np.ones(maxiters) nViolations = np.zeros(maxiters) ## statistics cpStats = np.zeros(7) for iteration in range(maxiters): startIter = time.time() isConverged = True; for n in range(N): startMode = time.time() ## Make adjustments to M[n] entries that violate complementary slackness if iteration > 0: V = np.logical_and(Phi[n] > 1, M.U[n] < kappatol) if np.count_nonzero(V) > 0: nViolations[iteration] = nViolations[iteration] + 1 M.U[n][V > 0] = M.U[n][V > 0] + kappa M, Phi[n], inner, kktModeViolations[n], isConverged = __solveSubproblem(X, M, n, maxinner, isConverged, epsilon, tol) elapsed = time.time() - startMode # only write the outer iterations for now cpStats = np.vstack((cpStats, np.array([iteration, n, inner, tensorTools.lsqrFit(X,M), tensorTools.loglikelihood(X,[M]), kktModeViolations[n], elapsed]))) kktViolations[iteration] = np.max(kktModeViolations); elapsed = time.time()-startIter #cpStats = np.vstack((cpStats, np.array([iter, -1, -1, kktViolations[iter], __loglikelihood(X,M), elapsed]))) print("Iteration {0}: Inner Its={1} with KKT violation={2}, nViolations={3}, and elapsed time={4}".format(iteration, nInnerIters[iteration], kktViolations[iteration], nViolations[iteration], elapsed)); if isConverged: break; cpStats = np.delete(cpStats, (0), axis=0) # delete the first row which was superfluous ### Print the statistics fit = tensorTools.lsqrFit(X,M) ll = tensorTools.loglikelihood(X,[M]) print("Number of iterations = {0}".format(iteration)) print("Final least squares fit = {0}".format(fit)) print("Final log-likelihood = {0}".format(ll)) print("Final KKT Violation = {0}".format(kktViolations[iteration])) print("Total inner iterations = {0}".format(np.sum(nInnerIters))) modelStats = {"Iters" : iter, "LS" : fit, "LL" : ll, "KKT" : kktViolations[iteration]} return M, cpStats, modelStats
def cp_apr(X, R, Minit=None, tol=1e-4, maxiters=1000, maxinner=10, epsilon=1e-10, kappatol=1e-10, kappa=1e-2): """ Compute nonnegative CP with alternative Poisson regression. Code is the python implementation of cp_apr in the MATLAB Tensor Toolbox Parameters ---------- X : input tensor of the class tensor or sptensor R : the rank of the CP Minit : the initial guess (in the form of a ktensor), if None random guess tol : tolerance on the inner KKT violation maxiters : maximum number of iterations maxinner : maximum number of inner iterations epsilon : parameter to avoid dividing by zero kappatol : tolerance on complementary slackness kappa : offset to fix complementary slackness Returns ------- M : the CP model as a ktensor cpStats: the statistics for each inner iteration modelStats: a dictionary item with the final statistics for this tensor factorization """ N = X.ndims() ## Random initialization if Minit == None: F = tensorTools.randomInit(X.shape, R) Minit = ktensor.ktensor(np.ones(R), F) nInnerIters = np.zeros(maxiters) ## Initialize M and Phi for iterations M = Minit M.normalize(1) Phi = [[] for i in range(N)] kktModeViolations = np.zeros(N) kktViolations = -np.ones(maxiters) nViolations = np.zeros(maxiters) ## statistics cpStats = np.zeros(7) for iteration in range(maxiters): startIter = time.time() isConverged = True for n in range(N): startMode = time.time() ## Make adjustments to M[n] entries that violate complementary slackness if iteration > 0: V = np.logical_and(Phi[n] > 1, M.U[n] < kappatol) if np.count_nonzero(V) > 0: nViolations[iteration] = nViolations[iteration] + 1 M.U[n][V > 0] = M.U[n][V > 0] + kappa M, Phi[n], inner, kktModeViolations[ n], isConverged = __solveSubproblem(X, M, n, maxinner, isConverged, epsilon, tol) #print '****************************************' #print M.U[0][1,:] #print M.U[0].shape #print '****************************************' elapsed = time.time() - startMode # only write the outer iterations for now #cpStats = np.vstack((cpStats, np.array([iteration, n, inner, tensorTools.lsqrFit(X,M), tensorTools.loglikelihood(X,[M]), kktModeViolations[n], elapsed]))) kktViolations[iteration] = np.max(kktModeViolations) elapsed = time.time() - startIter #cpStats = np.vstack((cpStats, np.array([iter, -1, -1, kktViolations[iter], __loglikelihood(X,M), elapsed]))) print( "Iteration {0}: Inner Its={1} with KKT violation={2}, nViolations={3}, and elapsed time={4}" .format(iteration, nInnerIters[iteration], kktViolations[iteration], nViolations[iteration], elapsed)) if isConverged: break cpStats = np.delete(cpStats, (0), axis=0) # delete the first row which was superfluous ### Print the statistics fit = tensorTools.lsqrFit(X, M) ll = tensorTools.loglikelihood(X, [M]) print("Number of iterations = {0}".format(iteration)) print("Final least squares fit = {0}".format(fit)) print("Final log-likelihood = {0}".format(ll)) print("Final KKT Violation = {0}".format(kktViolations[iteration])) print("Total inner iterations = {0}".format(np.sum(nInnerIters))) modelStats = { "Iters": iter, "LS": fit, "LL": ll, "KKT": kktViolations[iteration] } return M, cpStats, modelStats
def cp_apr(X, Y1, R, Minit=None, tol=1e-4, maxiters=1000, maxinner=50, epsilon=1e-10, kappatol=1e-10, kappa=1e-2): """ Compute nonnegative CP with alternative Poisson regression. Code is the python implementation of cp_apr in the MATLAB Tensor Toolbox Parameters ---------- X : input tensor of the class tensor or sptensor R : the rank of the CP lambta1 is the parameter of docomposition of demographic information lambta4 is the patameter of penalty item of demoU Minit : the initial guess (in the form of a ktensor), if None random guess tol : tolerance on the inner KKT violation maxiters : maximum number of iterations maxinner : maximum number of inner iterations epsilon : parameter to avoid dividing by zero kappatol : tolerance on complementary slackness kappa : offset to fix complementary slackness Returns ------- M : the CP model as a ktensor cpStats: the statistics for each inner iteration modelStats: a dictionary item with the final statistics for this tensor factorization """ N = X.ndims() ## Random initialization if Minit == None: F = tensorTools.randomInit(X.shape, R) Minit = ktensor.ktensor(np.ones(R), F); nInnerIters = np.zeros(maxiters); ## Initialize M and Phi for iterations M = Minit M.normalize(1) Phi = [[] for i in range(N)] kktModeViolations = np.zeros(N) kktViolations = -np.ones(maxiters) nViolations = np.zeros(maxiters) lambda2=1 lambda3=1 sita=np.random.rand(R+1,1); ## statistics cpStats = np.zeros(7) for iteration in range(maxiters): startIter = time.time() isConverged = True; for n in range(N): startMode = time.time() ## Make adjustments to M[n] entries that violate complementary slackness if iteration > 0: V = np.logical_and(Phi[n] > 1, M.U[n] < kappatol) if np.count_nonzero(V) > 0: nViolations[iteration] = nViolations[iteration] + 1 print 'V:',V.shape,V.dtype print 'M.U[n]',M.U[n].shape,M.U[n].dtype M.U[n][V > 0] = M.U[n][V > 0] + kappa if n==0: sita=__solveLogis(M.U[n],Y1,200,epsilon,lambda2,lambda3,sita) M, Phi[n], inner, kktModeViolations[n], isConverged = __solveSubproblem1(X, M, n, maxinner, isConverged, epsilon, tol,sita,Y1, lambda2) else: M, Phi[n], inner, kktModeViolations[n], isConverged = __solveSubproblem0(X, M, n, maxinner, isConverged, epsilon, tol) elapsed = time.time() - startMode kktViolations[iteration] = np.max(kktModeViolations); elapsed = time.time()-startIter print("Iteration {0}: Inner Its={1} with KKT violation={2}, nViolations={3}, and elapsed time={4}".format(iteration, nInnerIters[iteration], kktViolations[iteration], nViolations[iteration], elapsed)); if isConverged: break; cpStats = np.delete(cpStats, (0), axis=0) # delete the first row which was superfluous ### Print the statistics #fit = tensorTools.lsqrFit(X,M) #ll = tensorTools.loglikelihood(X,[M]) print("Number of iterations = {0}".format(iteration)) #print("Final least squares fit = {0}".format(fit)) #print("Final log-likelihood = {0}".format(ll)) print("Final KKT Violation = {0}".format(kktViolations[iteration])) print("Total inner iterations = {0}".format(np.sum(nInnerIters))) #modelStats = {"Iters" : iter, "LS" : fit, "LL" : ll, "KKT" : kktViolations[iteration]} return M, cpStats