コード例 #1
0
ファイル: RandomSampling.py プロジェクト: samim23/NEXT
    def __full_embedding_update(self, resource, args):

        n = resource.get('n')
        d = resource.get('d')
        S = resource.get_list('S')

        X_old = numpy.array(resource.get('X'))

        t_max = 5.0
        epsilon = 0.01  # a relative convergence criterion, see computeEmbeddingWithGD documentation

        emp_loss_old, hinge_loss_old = utilsMDS.getLoss(X_old, S)
        X, tmp = utilsMDS.computeEmbeddingWithEpochSGD(n,
                                                       d,
                                                       S,
                                                       max_num_passes=16,
                                                       epsilon=0,
                                                       verbose=True)
        t_start = time.time()
        X, emp_loss_new, hinge_loss_new, acc = utilsMDS.computeEmbeddingWithGD(
            X, S, max_iters=1)
        k = 1
        while (time.time() - t_start < 0.5 * t_max) and (acc > epsilon):
            # take a single gradient step
            X, emp_loss_new, hinge_loss_new, acc = utilsMDS.computeEmbeddingWithGD(
                X, S, max_iters=2**k)
            k += 1
        emp_loss_new, hinge_loss_new = utilsMDS.getLoss(X, S)
        if emp_loss_old < emp_loss_new:
            X = X_old
        resource.set('X', X.tolist())

        if d == 2:
            X2 = X
        else:
            emp_loss_old, hinge_loss_old = utilsMDS.getLoss(X2_old, S)
            X2, tmp = utilsMDS.computeEmbeddingWithEpochSGD(n,
                                                            2,
                                                            S,
                                                            max_num_passes=16,
                                                            epsilon=0,
                                                            verbose=True)
            t_start = time.time()
            X2, emp_loss_new, hinge_loss_new, acc = utilsMDS.computeEmbeddingWithGD(
                X2, S, max_iters=1)
            k = 1
            while (time.time() - t_start < 0.5 * t_max) and (acc > epsilon):
                # take a single gradient step
                X2, emp_loss_new, hinge_loss_new, acc = utilsMDS.computeEmbeddingWithGD(
                    X2, S, max_iters=2**k)
                k += 1
            emp_loss_new, hinge_loss_new = utilsMDS.getLoss(X2, S)
            if emp_loss_old < emp_loss_new:
                X2 = X2_old
            resource.set('X2', X2.tolist())
コード例 #2
0
ファイル: RandomSampling.py プロジェクト: ngurnani/NEXT
  def __full_embedding_update(self,resource,args):

    n = resource.get('n')
    d = resource.get('d')
    S = resource.get_list('S')

    X_old = numpy.array(resource.get('X'))

    t_max = 5.0
    epsilon = 0.01 # a relative convergence criterion, see computeEmbeddingWithGD documentation

    emp_loss_old,hinge_loss_old = utilsMDS.getLoss(X_old,S)
    X,tmp = utilsMDS.computeEmbeddingWithEpochSGD(n,d,S,max_num_passes=16,epsilon=0,verbose=True)
    t_start = time.time()
    X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=1)
    k = 1
    while (time.time()-t_start<0.5*t_max) and (acc > epsilon):
      # take a single gradient step
      X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=2**k)
      k += 1
    emp_loss_new,hinge_loss_new = utilsMDS.getLoss(X,S)
    if emp_loss_old < emp_loss_new:
      X = X_old
    resource.set('X',X.tolist())

    if d==2:
      X2 = X
    else:
      emp_loss_old,hinge_loss_old = utilsMDS.getLoss(X2_old,S)
      X2,tmp = utilsMDS.computeEmbeddingWithEpochSGD(n,2,S,max_num_passes=16,epsilon=0,verbose=True)
      t_start = time.time()
      X2,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X2,S,max_iters=1)
      k = 1
      while (time.time()-t_start<0.5*t_max) and (acc > epsilon):      
        # take a single gradient step
        X2,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X2,S,max_iters=2**k)
        k += 1
      emp_loss_new,hinge_loss_new = utilsMDS.getLoss(X2,S)
      if emp_loss_old < emp_loss_new:
        X2 = X2_old
      resource.set('X2',X2.tolist())
コード例 #3
0
ファイル: RandomSampling.py プロジェクト: samim23/NEXT
    def __incremental_embedding_update(self, resource, args):

        n = resource.get('n')
        d = resource.get('d')
        S = resource.get_list('S')

        X = numpy.array(resource.get('X'))
        # set maximum time allowed to update embedding
        t_max = 1.0
        epsilon = 0.01  # a relative convergence criterion, see computeEmbeddingWithGD documentation

        # take a single gradient step
        t_start = time.time()
        X, emp_loss_new, hinge_loss_new, acc = utilsMDS.computeEmbeddingWithGD(
            X, S, max_iters=1)
        k = 1
        while (time.time() - t_start < 0.5 * t_max) and (acc > epsilon):
            # take a single gradient step
            X, emp_loss_new, hinge_loss_new, acc = utilsMDS.computeEmbeddingWithGD(
                X, S, max_iters=2**k)
            k += 1

        if d == 2:
            X2 = X
        else:
            X2 = numpy.array(resource.get('X2'))
            # take a single gradient step
            t_start = time.time()
            X2, emp_loss_new, hinge_loss_new, acc = utilsMDS.computeEmbeddingWithGD(
                X2, S, max_iters=1)
            k = 1
            while (time.time() - t_start < 0.5 * t_max) and (acc > epsilon):
                # take a single gradient step
                X2, emp_loss_new, hinge_loss_new, acc = utilsMDS.computeEmbeddingWithGD(
                    X2, S, max_iters=2**k)
                k += 1

        resource.set('X', X.tolist())
        resource.set('X2', X2.tolist())
コード例 #4
0
ファイル: RandomSampling.py プロジェクト: ngurnani/NEXT
  def __incremental_embedding_update(self,resource,args):

    n = resource.get('n')
    d = resource.get('d')
    S = resource.get_list('S')


    X = numpy.array(resource.get('X'))
    # set maximum time allowed to update embedding
    t_max = 1.0
    epsilon = 0.01 # a relative convergence criterion, see computeEmbeddingWithGD documentation

    # take a single gradient step
    t_start = time.time()
    X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=1)
    k = 1
    while (time.time()-t_start<0.5*t_max) and (acc > epsilon):
      # take a single gradient step
      X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=2**k)
      k += 1

    if d==2:
      X2 = X
    else:
      X2 = numpy.array(resource.get('X2'))
      # take a single gradient step
      t_start = time.time()
      X2,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X2,S,max_iters=1)
      k = 1
      while (time.time()-t_start<0.5*t_max) and (acc > epsilon):    
        # take a single gradient step
        X2,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X2,S,max_iters=2**k)
        k += 1  

    resource.set('X',X.tolist())
    resource.set('X2',X2.tolist())