def __incremental_embedding_update(self,resource,args): verbose = False n = resource.get('n') d = resource.get('d') S = resource.get_list('S') X = numpy.array(resource.get('X')) # set maximum time allowed to update embedding t_max = 1.0 epsilon = 0.01 # a relative convergence criterion, see computeEmbeddingWithGD documentation # take a single gradient step t_start = time.time() X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=1) k = 1 while (time.time()-t_start<0.5*t_max) and (acc > epsilon): X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=2**k) k += 1 resource.set('X',X.tolist())
def __full_embedding_update(self,resource,args): verbose = False n = resource.get('n') d = resource.get('d') S = resource.get_list('S') X_old = numpy.array(resource.get('X')) t_max = 5.0 epsilon = 0.01 # a relative convergence criterion, see computeEmbeddingWithGD documentation emp_loss_old,hinge_loss_old = utilsMDS.getLoss(X_old,S) X,tmp = utilsMDS.computeEmbeddingWithEpochSGD(n,d,S,max_num_passes=16,epsilon=0,verbose=verbose) t_start = time.time() X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=1) k = 1 while (time.time()-t_start<0.5*t_max) and (acc > epsilon): X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=2**k) k += 1 emp_loss_new,hinge_loss_new = utilsMDS.getLoss(X,S) if emp_loss_old < emp_loss_new: X = X_old resource.set('X',X.tolist())