コード例 #1
0
ファイル: netflix.py プロジェクト: MaggieQi/spartan
  def _evaluate(self, ctx, deps):
    V, M, U = deps['V'], deps['M'], deps['U']

    strata = _compute_strata(V)
    util.log_info('Start eval')
    
    for i, stratum in enumerate(strata):
      util.log_info('Processing stratum: %d of %d (size = %d)', i, len(strata), len(stratum))
      #for ex in stratum: print ex

      worklist = set(stratum)
      expr.shuffle(V, sgd_netflix_mapper,
                   kw={'V' : lazify(V), 'M' : lazify(M), 'U' : lazify(U),
                       'worklist' : worklist }).force()
                       
    util.log_info('Eval done.')
コード例 #2
0
ファイル: test_sparse.py プロジェクト: EasonLiao/spartan
 def test_sparse_sum(self):
   x = expr.sparse_diagonal(ARRAY_SIZE).force()
   y = x.glom()
   print y.todense()
   
   x = expr.lazify(x)
   for axis in [None, 0, 1]:
     y = x.sum(axis)
     val = y.glom()
     print val
コード例 #3
0
ファイル: test_sparse.py プロジェクト: rossparks/spartan
 def test_sparse_sum(self):
   x = expr.sparse_diagonal(ARRAY_SIZE).force()
   y = x.glom()
   print y.todense()
   
   x = expr.lazify(x)
   for axis in [None, 0, 1]:
     y = x.sum(axis)
     val = y.glom()
     print val
コード例 #4
0
ファイル: simple_svm.py プロジェクト: rossparks/spartan
    def examine_example(self, i, N, labels, kernel_results):
        ''' Check if the alpha_i can be optimized. It should satisfy the KKT condition.
    If so, choose it as one parameter to be optimized.
    
    Args:
      i(int): index of the alpha to be checked
      N(int): the number of features
      labels(Expr): the labels of the training data
      kernel_results(Expr): the result of the kernel function on the training data
    '''

        Ei = self.E[i, 0]
        ai = self.alpha[i, 0]
        r = labels[i, 0] * Ei
        # check if satisfy KKT condition
        if r < -self.tol and ai < self.C or r > self.tol and ai > self.tol:
            alpha_expr = expr.lazify(self.alpha)
            active_set_mask = (alpha_expr > self.tol) * (alpha_expr < self.C)
            active_set = active_set_mask.glom().nonzero()[0]
            #print 'actives:', active_set

            # first check the jth example that maximize the |Ei - Ej|
            idxj = -1
            if active_set.shape[0] > 1:
                active_E = expr.abs(expr.lazify(self.E) - Ei)[active_set_mask -
                                                              True]
                idxj = np.argmax(active_E.glom())

                if self.take_step(idxj, i, N, labels, kernel_results): return 1

            # then check the examples in active_set
            for j in active_set:
                if j != idxj and self.take_step(j, i, N, labels,
                                                kernel_results):
                    return 1

            # finally check the other examples
            for j in xrange(N):
                if j not in active_set and self.take_step(
                        j, i, N, labels, kernel_results):
                    return 1

        return 0
コード例 #5
0
ファイル: simple_svm.py プロジェクト: GabrielWen/spartan
    def examine_example(self, i, N, labels, kernel_results):
        """ Check if the alpha_i can be optimized. It should satisfy the KKT condition.
    If so, choose it as one parameter to be optimized.

    Args:
      i(int): index of the alpha to be checked
      N(int): the number of features
      labels(Expr): the labels of the training data
      kernel_results(Expr): the result of the kernel function on the training data
    """

        Ei = self.E[i, 0]
        ai = self.alpha[i, 0]
        r = labels[i, 0] * Ei
        # check if satisfy KKT condition
        if r < -self.tol and ai < self.C or r > self.tol and ai > self.tol:
            alpha_expr = expr.lazify(self.alpha)
            active_set_mask = (alpha_expr > self.tol) * (alpha_expr < self.C)
            active_set = active_set_mask.glom().nonzero()[0]
            # print 'actives:', active_set

            # first check the jth example that maximize the |Ei - Ej|
            idxj = -1
            if active_set.shape[0] > 1:
                active_E = expr.abs(expr.lazify(self.E) - Ei)[active_set_mask - True]
                idxj = np.argmax(active_E.glom())

                if self.take_step(idxj, i, N, labels, kernel_results):
                    return 1

            # then check the examples in active_set
            for j in active_set:
                if j != idxj and self.take_step(j, i, N, labels, kernel_results):
                    return 1

            # finally check the other examples
            for j in xrange(N):
                if j not in active_set and self.take_step(j, i, N, labels, kernel_results):
                    return 1

        return 0
コード例 #6
0
    def _evaluate(self, ctx, deps):
        V, M, U = deps['V'], deps['M'], deps['U']

        strata = _compute_strata(V)
        util.log_info('Start eval')

        for i, stratum in enumerate(strata):
            util.log_info('Processing stratum: %d of %d (size = %d)', i,
                          len(strata), len(stratum))
            #for ex in stratum: print ex

            worklist = set(stratum)
            expr.shuffle(V,
                         sgd_netflix_mapper,
                         kw={
                             'V': lazify(V),
                             'M': lazify(M),
                             'U': lazify(U),
                             'worklist': worklist
                         }).evaluate()

        util.log_info('Eval done.')
コード例 #7
0
ファイル: item_recommender.py プロジェクト: EasonLiao/spartan
  def _get_norm_of_each_item(self, rating_table):
    """Get norm of each item vector.
    For each Item, caculate the norm the item vector.
    Parameters
    ----------
    rating_table : Spartan matrix of shape(M, N). 
                   Each column represents the rating of the item.

    Returns
    ---------
    item_norm:  Spartan matrix of shape(N,).
                item_norm[i] equals || rating_table[:,i] || 

    """
    ctx = blob_ctx.get()
    if isinstance(rating_table, array.distarray.DistArray):
      rating_table = expr.lazify(rating_table)
    res = expr.sqrt(expr.sum(expr.multiply(rating_table, rating_table), axis=0, 
                             tile_hint=(rating_table.shape[1] / ctx.num_workers, )))
    return res.force()
コード例 #8
0
ファイル: simple_svm.py プロジェクト: EasonLiao/spartan
  def train_smo_2005(self, data, labels):
    '''
    Train an SVM model using the SMO (2005) algorithm.
   
    Args:
      data(Expr): points to be trained
      labels(Expr): the correct labels of the training data
    '''
    
    N = data.shape[0] # Number of instances
    D = data.shape[1]  # Number of features

    self.b = 0.0
    alpha = expr.zeros((N,1), dtype=np.float64, tile_hint=[N/self.ctx.num_workers, 1]).force()
    
    # linear kernel
    kernel_results = expr.dot(data, expr.transpose(data), tile_hint=[N/self.ctx.num_workers, N])
    gradient = expr.ones((N, 1), dtype=np.float64, tile_hint=[N/self.ctx.num_workers, 1]) * -1.0
    
    expr_labels = expr.lazify(labels)
    
    util.log_info("Starting SMO")
    pv1 = pv2 = -1
    it = 0
    while it < self.maxiter:
      util.log_info("Iteration:%d", it)
      
      minObj = 1e100
      
      expr_alpha = expr.lazify(alpha)
      G = expr.multiply(labels, gradient) * -1.0

      v1_mask = ((expr_labels > self.tol) * (expr_alpha < self.C) + (expr_labels < -self.tol) * (expr_alpha > self.tol))
      v1 = expr.argmax(G[v1_mask-True]).glom().item()
      maxG = G[v1,0].glom()
      print 'maxv1:', v1, 'maxG:', maxG

      v2_mask = ((expr_labels > self.tol) * (expr_alpha > self.tol) + (expr_labels < -self.tol) * (expr_alpha < self.C))     
      min_v2 = expr.argmin(G[v2_mask-True]).glom().item()
      minG = G[min_v2,0].glom()
      #print 'minv2:', min_v2, 'minG:', minG
      
      set_v2 = v2_mask.glom().nonzero()[0]
      #print 'actives:', set_v2.shape[0]
      v2 = -1
      for v in set_v2:
        b = maxG - G[v,0].glom()
        if b > self.tol:
          na = (kernel_results[v1,v1] + kernel_results[v,v] - 2*kernel_results[v1,v]).glom()[0][0]
          if na < self.tol: na = 1e12
          
          obj = -(b*b)/na
          if obj <= minObj and v1 != pv1 or v != pv2:
            v2 = v
            a = na
            minObj = obj
      
      if v2 == -1: break
      if maxG - minG < self.tol: break
      
      print 'opt v1:', v1, 'v2:', v2

      pv1 = v1
      pv2 = v2
    
      y1 = labels[v1,0]
      y2 = labels[v2,0]    
        
      oldA1 = alpha[v1,0]
      oldA2 = alpha[v2,0]
      
      # Calculate new alpha values, to reduce the objective function...
      b = y2*expr.glom(gradient[v2,0]) - y1*expr.glom(gradient[v1,0])
      if y1 != y2:
        a += 4 * kernel_results[v1,v2].glom()
      
      newA1 = oldA1 + y1*b/a
      newA2 = oldA2 - y2*b/a   

      # Correct for alpha being out of range...
      sum = y1*oldA1 + y2*oldA2;
  
      if newA1 < self.tol: newA1 = 0.0
      elif newA1 > self.C: newA1 = self.C
     
      newA2 = y2 * (sum - y1 * newA1) 

      if newA2 < self.tol: newA2 = 0.0;
      elif newA2 > self.C: newA2 = self.C
     
      newA1 = y1 * (sum - y2 * newA2)
  
      # Update the gradient...
      dA1 = newA1 - oldA1
      dA2 = newA2 - oldA2
  
      gradient += expr.multiply(labels, kernel_results[:,v1]) * y1 * dA1 + expr.multiply(labels, kernel_results[:,v2]) * y2 * dA2

      alpha[v1,0] = newA1
      alpha[v2,0] = newA2
 
      #print 'alpha:', alpha.glom().T
      
      it += 1
      #print 'gradient:', gradient.glom().T

    self.w = expr.zeros((D, 1), dtype=np.float64).force()
    for i in xrange(D): 
      self.w[i,0] = expr.reduce(alpha, axis=None, dtype_fn=lambda input: input.dtype,
                                local_reduce_fn=margin_mapper,
                                accumulate_fn=np.add, 
                                fn_kw=dict(label=labels, data=expr.force(data[:,i]))).glom()
    
    self.b = 0.0
    E = (labels - self.margins(data)).force()
    
    minB = -1e100
    maxB = 1e100
    actualB = 0.0
    numActualB = 0
    
    for i in xrange(N):
      ai = alpha[i,0]
      yi = labels[i,0]
      Ei = E[i,0]
      
      if ai < 1e-3:
        if yi < self.tol:
          maxB = min((maxB,Ei))
        else:
          minB = max((minB,Ei))
      elif ai > self.C - 1e-3:
        if yi < self.tol:
          minB = max((minB,Ei))
        else:
          maxB = min((maxB,Ei))
      else:
        numActualB += 1
        actualB += (Ei - actualB) / float(numActualB)
    if numActualB > 0:
      self.b = actualB
    else:
      self.b = 0.5*(minB + maxB)

    self.usew_ = True
    print 'iteration finish:', it
    print 'b:', self.b
    print 'w:', self.w.glom()
コード例 #9
0
ファイル: simple_svm.py プロジェクト: rossparks/spartan
    def train_smo_2005(self, data, labels):
        '''
    Train an SVM model using the SMO (2005) algorithm.
   
    Args:
      data(Expr): points to be trained
      labels(Expr): the correct labels of the training data
    '''

        N = data.shape[0]  # Number of instances
        D = data.shape[1]  # Number of features

        self.b = 0.0
        alpha = expr.zeros((N, 1),
                           dtype=np.float64,
                           tile_hint=[N / self.ctx.num_workers, 1]).force()

        # linear kernel
        kernel_results = expr.dot(data,
                                  expr.transpose(data),
                                  tile_hint=[N / self.ctx.num_workers, N])
        gradient = expr.ones(
            (N, 1), dtype=np.float64, tile_hint=[N / self.ctx.num_workers, 1
                                                 ]) * -1.0

        expr_labels = expr.lazify(labels)

        util.log_info("Starting SMO")
        pv1 = pv2 = -1
        it = 0
        while it < self.maxiter:
            util.log_info("Iteration:%d", it)

            minObj = 1e100

            expr_alpha = expr.lazify(alpha)
            G = expr.multiply(labels, gradient) * -1.0

            v1_mask = ((expr_labels > self.tol) * (expr_alpha < self.C) +
                       (expr_labels < -self.tol) * (expr_alpha > self.tol))
            v1 = expr.argmax(G[v1_mask - True]).glom().item()
            maxG = G[v1, 0].glom()
            print 'maxv1:', v1, 'maxG:', maxG

            v2_mask = ((expr_labels > self.tol) * (expr_alpha > self.tol) +
                       (expr_labels < -self.tol) * (expr_alpha < self.C))
            min_v2 = expr.argmin(G[v2_mask - True]).glom().item()
            minG = G[min_v2, 0].glom()
            #print 'minv2:', min_v2, 'minG:', minG

            set_v2 = v2_mask.glom().nonzero()[0]
            #print 'actives:', set_v2.shape[0]
            v2 = -1
            for v in set_v2:
                b = maxG - G[v, 0].glom()
                if b > self.tol:
                    na = (kernel_results[v1, v1] + kernel_results[v, v] -
                          2 * kernel_results[v1, v]).glom()[0][0]
                    if na < self.tol: na = 1e12

                    obj = -(b * b) / na
                    if obj <= minObj and v1 != pv1 or v != pv2:
                        v2 = v
                        a = na
                        minObj = obj

            if v2 == -1: break
            if maxG - minG < self.tol: break

            print 'opt v1:', v1, 'v2:', v2

            pv1 = v1
            pv2 = v2

            y1 = labels[v1, 0]
            y2 = labels[v2, 0]

            oldA1 = alpha[v1, 0]
            oldA2 = alpha[v2, 0]

            # Calculate new alpha values, to reduce the objective function...
            b = y2 * expr.glom(gradient[v2, 0]) - y1 * expr.glom(gradient[v1,
                                                                          0])
            if y1 != y2:
                a += 4 * kernel_results[v1, v2].glom()

            newA1 = oldA1 + y1 * b / a
            newA2 = oldA2 - y2 * b / a

            # Correct for alpha being out of range...
            sum = y1 * oldA1 + y2 * oldA2

            if newA1 < self.tol: newA1 = 0.0
            elif newA1 > self.C: newA1 = self.C

            newA2 = y2 * (sum - y1 * newA1)

            if newA2 < self.tol: newA2 = 0.0
            elif newA2 > self.C: newA2 = self.C

            newA1 = y1 * (sum - y2 * newA2)

            # Update the gradient...
            dA1 = newA1 - oldA1
            dA2 = newA2 - oldA2

            gradient += expr.multiply(
                labels, kernel_results[:, v1]) * y1 * dA1 + expr.multiply(
                    labels, kernel_results[:, v2]) * y2 * dA2

            alpha[v1, 0] = newA1
            alpha[v2, 0] = newA2

            #print 'alpha:', alpha.glom().T

            it += 1
            #print 'gradient:', gradient.glom().T

        self.w = expr.zeros((D, 1), dtype=np.float64).force()
        for i in xrange(D):
            self.w[i, 0] = expr.reduce(alpha,
                                       axis=None,
                                       dtype_fn=lambda input: input.dtype,
                                       local_reduce_fn=margin_mapper,
                                       accumulate_fn=np.add,
                                       fn_kw=dict(label=labels,
                                                  data=expr.force(
                                                      data[:, i]))).glom()

        self.b = 0.0
        E = (labels - self.margins(data)).force()

        minB = -1e100
        maxB = 1e100
        actualB = 0.0
        numActualB = 0

        for i in xrange(N):
            ai = alpha[i, 0]
            yi = labels[i, 0]
            Ei = E[i, 0]

            if ai < 1e-3:
                if yi < self.tol:
                    maxB = min((maxB, Ei))
                else:
                    minB = max((minB, Ei))
            elif ai > self.C - 1e-3:
                if yi < self.tol:
                    minB = max((minB, Ei))
                else:
                    maxB = min((maxB, Ei))
            else:
                numActualB += 1
                actualB += (Ei - actualB) / float(numActualB)
        if numActualB > 0:
            self.b = actualB
        else:
            self.b = 0.5 * (minB + maxB)

        self.usew_ = True
        print 'iteration finish:', it
        print 'b:', self.b
        print 'w:', self.w.glom()