def __full_embedding_update(self,resource,args): verbose = False n = resource.get('n') d = resource.get('d') S = resource.get_list('S') X_old = numpy.array(resource.get('X')) # set maximum time allowed to update embedding t_max = 5.0 epsilon = 0.00001 # a relative convergence criterion, see computeEmbeddingWithGD documentation alpha = 1 emp_loss_old,hinge_loss_old,log_loss_old = utilsSTE.getLoss(X_old,S,alpha) X,tmp = utilsSTE.computeEmbeddingWithEpochSGD(n,d,S,alpha,max_num_passes=16,epsilon=0,verbose=verbose) t_start = time.time() X,emp_loss_new,hinge_loss_new,log_loss_new,acc = utilsSTE.computeEmbeddingWithGD(X,S,alpha,max_iters=1, epsilon=epsilon,verbose=verbose) k = 1 while (time.time()-t_start<.5*t_max) and (acc > epsilon): X,emp_loss_new,hinge_loss_new,log_loss_new,acc = utilsSTE.computeEmbeddingWithGD(X,S,alpha,max_iters=2**k, epsilon=epsilon,verbose=verbose) k += 1 emp_loss_new,hinge_loss_new,log_loss_new = utilsSTE.getLoss(X,S, alpha) if emp_loss_old < emp_loss_new: X = X_old tau = utilsSTE.getSTETauDistribution(X,S,alpha) resource.set('X',X.tolist()) resource.set('tau',tau.tolist())
def __full_embedding_update(self,resource,args): verbose = False n = resource.get('n') d = resource.get('d') S = resource.get_list('S') X_old = numpy.array(resource.get('X')) # set maximum time allowed to update embedding t_max = 5.0 epsilon = 0.00001 # a relative convergence criterion, see computeEmbeddingWithGD documentation alpha = 1 emp_loss_old,hinge_loss_old,log_loss_old = utilsSTE.getLoss(X_old,S,alpha) X,tmp = utilsSTE.computeEmbeddingWithEpochSGD(n,d,S,alpha,max_num_passes=16,epsilon=0,verbose=verbose) t_start = time.time() X,emp_loss_new,hinge_loss_new,log_loss_new,acc = utilsSTE.computeEmbeddingWithGD(X,S,alpha,max_iters=1, epsilon=epsilon,verbose=verbose) k = 1 while (time.time()-t_start<.5*t_max) and (acc > epsilon): X,emp_loss_new,hinge_loss_new,log_loss_new,acc = utilsSTE.computeEmbeddingWithGD(X,S,alpha,max_iters=2**k, epsilon=epsilon,verbose=verbose) k += 1 emp_loss_new,hinge_loss_new,log_loss_new = utilsSTE.getLoss(X,S, alpha) if emp_loss_old < emp_loss_new: X = X_old tau = utilsSTE.getSTETauDistribution(X,S,alpha) resource.set('X',X.tolist()) resource.set('tau',tau.tolist())
def __incremental_embedding_update(self,resource,args): verbose = False n = resource.get('n') d = resource.get('d') S = resource.get_list('S') X = numpy.array(resource.get('X')) # set maximum time allowed to update embedding t_max = 1.0 epsilon = 0.00001 # a relative convergence criterion, see computeEmbeddingWithGD documentation alpha = 1 t_start = time.time() X,emp_loss_new,hinge_loss_new,log_loss_new,acc = utilsSTE.computeEmbeddingWithGD(X,S,alpha,max_iters=1, epsilon=epsilon,verbose=verbose) k = 1 while (time.time()-t_start<.5*t_max) and (acc > epsilon): X,emp_loss_new,hinge_loss_new,log_loss_new,acc = utilsSTE.computeEmbeddingWithGD(X,S,alpha,max_iters=2**k, epsilon=epsilon,verbose=verbose) k+=1 tau = utilsSTE.getSTETauDistribution(X,S,alpha) resource.set('X',X.tolist()) resource.set('tau',tau.tolist())
def __incremental_embedding_update(self,resource,args): verbose = False n = resource.get('n') d = resource.get('d') S = resource.get_list('S') X = numpy.array(resource.get('X')) # set maximum time allowed to update embedding t_max = 1.0 epsilon = 0.00001 # a relative convergence criterion, see computeEmbeddingWithGD documentation alpha = 1 t_start = time.time() X,emp_loss_new,hinge_loss_new,log_loss_new,acc = utilsSTE.computeEmbeddingWithGD(X,S,alpha,max_iters=1, epsilon=epsilon,verbose=verbose) k = 1 while (time.time()-t_start<.5*t_max) and (acc > epsilon): X,emp_loss_new,hinge_loss_new,log_loss_new,acc = utilsSTE.computeEmbeddingWithGD(X,S,alpha,max_iters=2**k, epsilon=epsilon,verbose=verbose) k+=1 tau = utilsSTE.getSTETauDistribution(X,S,alpha) resource.set('X',X.tolist()) resource.set('tau',tau.tolist())