def __full_embedding_update(self, resource, args): n = resource.get("n") d = resource.get("d") S = resource.get_list("S") verbose = False X_old = numpy.array(resource.get("X")) X2_old = numpy.array(resource.get("X2")) # set maximum time allowed to update embedding t_max = 5.0 epsilon = 0.00001 # a relative convergence criterion, see computeEmbeddingWithGD documentation mu = 0.05 emp_loss_old, hinge_loss_old, log_loss_old = utilsCrowdKernel.getLoss(X_old, S) X, tmp = utilsCrowdKernel.computeEmbeddingWithEpochSGD( n, d, S, mu, max_num_passes=16, epsilon=0, verbose=verbose ) t_start = time.time() X, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X, S, mu, max_iters=1, epsilon=epsilon, verbose=verbose ) k = 1 while (time.time() - t_start < 0.5 * t_max) and (acc > epsilon): # take a single gradient step X, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X, S, mu, max_iters=2 ** k, epsilon=epsilon, verbose=verbose ) k += 1 emp_loss_new, hinge_loss_new, log_loss_new = utilsCrowdKernel.getLoss(X, S) if emp_loss_old < emp_loss_new: X = X_old if d == 2: X2 = X else: emp_loss_old, hinge_loss_old, log_loss_old = utilsCrowdKernel.getLoss(X2_old, S) X2, tmp = utilsCrowdKernel.computeEmbeddingWithEpochSGD( n, 2, S, mu, max_num_passes=16, epsilon=0, verbose=verbose ) t_start = time.time() X2, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X2, S, mu, max_iters=1, epsilon=epsilon, verbose=verbose ) k = 1 while (time.time() - t_start < 0.5 * t_max) and (acc > epsilon): # take a single gradient step X2, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X2, S, mu, max_iters=2 ** k, epsilon=epsilon, verbose=verbose ) k += 1 emp_loss_new, hinge_loss_new, log_loss_new = utilsCrowdKernel.getLoss(X2, S) if emp_loss_old < emp_loss_new: X2 = X2_old _ts = time.time() tau = utilsCrowdKernel.getCrowdKernelTauDistribution(X, S, mu) _te = time.time() resource.set("X", X.tolist()) resource.set("X2", X2.tolist()) resource.set("tau", tau.tolist())
def __full_embedding_update(self, resource, args): n = resource.get('n') d = resource.get('d') S = resource.get_list('S') verbose = False X_old = numpy.array(resource.get('X')) X2_old = numpy.array(resource.get('X2')) # set maximum time allowed to update embedding t_max = 5.0 epsilon = 0.00001 # a relative convergence criterion, see computeEmbeddingWithGD documentation mu = .05 emp_loss_old, hinge_loss_old, log_loss_old = utilsCrowdKernel.getLoss( X_old, S) X, tmp = utilsCrowdKernel.computeEmbeddingWithEpochSGD( n, d, S, mu, max_num_passes=16, epsilon=0, verbose=verbose) t_start = time.time() X, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X, S, mu, max_iters=1, epsilon=epsilon, verbose=verbose) k = 1 while (time.time() - t_start < .5 * t_max) and (acc > epsilon): # take a single gradient step X, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X, S, mu, max_iters=2**k, epsilon=epsilon, verbose=verbose) k += 1 emp_loss_new, hinge_loss_new, log_loss_new = utilsCrowdKernel.getLoss( X, S) if emp_loss_old < emp_loss_new: X = X_old if d == 2: X2 = X else: emp_loss_old, hinge_loss_old, log_loss_old = utilsCrowdKernel.getLoss( X2_old, S) X2, tmp = utilsCrowdKernel.computeEmbeddingWithEpochSGD( n, 2, S, mu, max_num_passes=16, epsilon=0, verbose=verbose) t_start = time.time() X2, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X2, S, mu, max_iters=1, epsilon=epsilon, verbose=verbose) k = 1 while (time.time() - t_start < .5 * t_max) and (acc > epsilon): # take a single gradient step X2, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X2, S, mu, max_iters=2**k, epsilon=epsilon, verbose=verbose) k += 1 emp_loss_new, hinge_loss_new, log_loss_new = utilsCrowdKernel.getLoss( X2, S) if emp_loss_old < emp_loss_new: X2 = X2_old _ts = time.time() tau = utilsCrowdKernel.getCrowdKernelTauDistribution(X, S, mu) _te = time.time() resource.set('X', X.tolist()) resource.set('X2', X2.tolist()) resource.set('tau', tau.tolist())
def __incremental_embedding_update(self, resource, args): n = resource.get("n") d = resource.get("d") S = resource.get_list("S") verbose = False X = numpy.array(resource.get("X")) X2 = numpy.array(resource.get("X2")) # set maximum time allowed to update embedding t_max = 1.0 epsilon = 0.00001 # a relative convergence criterion, see computeEmbeddingWithGD documentation mu = 0.05 t_start = time.time() X, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X, S, mu, epsilon=epsilon, max_iters=1 ) _te = time.time() k = 1 while (time.time() - t_start < 0.5 * t_max) and (acc > epsilon): # take a single gradient step ts = time.time() X, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X, S, mu, max_iters=2 ** k, epsilon=epsilon, verbose=verbose ) k += 1 if verbose == True: print "Incremental embedding time of X gradient step at iteration %s is %s" % ( str(k), str(time.time() - ts), ) if d == 2: X2 = X else: t_start = time.time() X2, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X2, S, mu, epsilon=epsilon, max_iters=1 ) k = 1 while (time.time() - t_start < 0.5 * t_max) and (acc > epsilon): # take a single gradient step ts = time.time() X2, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X2, S, mu, max_iters=2 ** k, epsilon=epsilon, verbose=verbose ) k += 1 if verbose: print "Incremental embedding time of X2 gradient step at itration %s is %s" % ( str(k), str(time.time() - ts), ) t_s = time.time() tau = utilsCrowdKernel.getCrowdKernelTauDistribution(X, S, mu) if verbose: print "Time to compute tau %s" % str(time.time() - t_s) resource.set("X", X.tolist()) resource.set("X2", X2.tolist()) _ts = time.time() tau = utilsCrowdKernel.getCrowdKernelTauDistribution(X, S, mu) _te = time.time() resource.set("tau", tau.tolist())
def __incremental_embedding_update(self, resource, args): n = resource.get('n') d = resource.get('d') S = resource.get_list('S') verbose = False X = numpy.array(resource.get('X')) X2 = numpy.array(resource.get('X2')) # set maximum time allowed to update embedding t_max = 1.0 epsilon = 0.00001 # a relative convergence criterion, see computeEmbeddingWithGD documentation mu = .05 t_start = time.time() X, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X, S, mu, epsilon=epsilon, max_iters=1) _te = time.time() k = 1 while (time.time() - t_start < .5 * t_max) and (acc > epsilon): # take a single gradient step ts = time.time() X, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X, S, mu, max_iters=2**k, epsilon=epsilon, verbose=verbose) k += 1 if verbose == True: print "Incremental embedding time of X gradient step at iteration %s is %s" % ( str(k), str(time.time() - ts)) if d == 2: X2 = X else: t_start = time.time() X2, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X2, S, mu, epsilon=epsilon, max_iters=1) k = 1 while (time.time() - t_start < .5 * t_max) and (acc > epsilon): # take a single gradient step ts = time.time() X2, emp_loss_new, hinge_loss_new, log_loss_new, acc = utilsCrowdKernel.computeEmbeddingWithGD( X2, S, mu, max_iters=2**k, epsilon=epsilon, verbose=verbose) k += 1 if verbose: print "Incremental embedding time of X2 gradient step at itration %s is %s" % ( str(k), str(time.time() - ts)) t_s = time.time() tau = utilsCrowdKernel.getCrowdKernelTauDistribution(X, S, mu) if verbose: print "Time to compute tau %s" % str(time.time() - t_s) resource.set('X', X.tolist()) resource.set('X2', X2.tolist()) _ts = time.time() tau = utilsCrowdKernel.getCrowdKernelTauDistribution(X, S, mu) _te = time.time() resource.set('tau', tau.tolist())