Ejemplo n.º 1
0
    def examine_example(self, i, N, labels, kernel_results):
        ''' Check if the alpha_i can be optimized. It should satisfy the KKT condition.
    If so, choose it as one parameter to be optimized.
    
    Args:
      i(int): index of the alpha to be checked
      N(int): the number of features
      labels(Expr): the labels of the training data
      kernel_results(Expr): the result of the kernel function on the training data
    '''

        Ei = self.E[i, 0]
        ai = self.alpha[i, 0]
        r = labels[i, 0] * Ei
        # check if satisfy KKT condition
        if r < -self.tol and ai < self.C or r > self.tol and ai > self.tol:
            alpha_expr = expr.lazify(self.alpha)
            active_set_mask = (alpha_expr > self.tol) * (alpha_expr < self.C)
            active_set = active_set_mask.glom().nonzero()[0]
            #print 'actives:', active_set

            # first check the jth example that maximize the |Ei - Ej|
            idxj = -1
            if active_set.shape[0] > 1:
                active_E = expr.abs(expr.lazify(self.E) - Ei)[active_set_mask -
                                                              True]
                idxj = np.argmax(active_E.glom())

                if self.take_step(idxj, i, N, labels, kernel_results): return 1

            # then check the examples in active_set
            for j in active_set:
                if j != idxj and self.take_step(j, i, N, labels,
                                                kernel_results):
                    return 1

            # finally check the other examples
            for j in xrange(N):
                if j not in active_set and self.take_step(
                        j, i, N, labels, kernel_results):
                    return 1

        return 0
Ejemplo n.º 2
0
    def examine_example(self, i, N, labels, kernel_results):
        """ Check if the alpha_i can be optimized. It should satisfy the KKT condition.
    If so, choose it as one parameter to be optimized.

    Args:
      i(int): index of the alpha to be checked
      N(int): the number of features
      labels(Expr): the labels of the training data
      kernel_results(Expr): the result of the kernel function on the training data
    """

        Ei = self.E[i, 0]
        ai = self.alpha[i, 0]
        r = labels[i, 0] * Ei
        # check if satisfy KKT condition
        if r < -self.tol and ai < self.C or r > self.tol and ai > self.tol:
            alpha_expr = expr.lazify(self.alpha)
            active_set_mask = (alpha_expr > self.tol) * (alpha_expr < self.C)
            active_set = active_set_mask.glom().nonzero()[0]
            # print 'actives:', active_set

            # first check the jth example that maximize the |Ei - Ej|
            idxj = -1
            if active_set.shape[0] > 1:
                active_E = expr.abs(expr.lazify(self.E) - Ei)[active_set_mask - True]
                idxj = np.argmax(active_E.glom())

                if self.take_step(idxj, i, N, labels, kernel_results):
                    return 1

            # then check the examples in active_set
            for j in active_set:
                if j != idxj and self.take_step(j, i, N, labels, kernel_results):
                    return 1

            # finally check the other examples
            for j in xrange(N):
                if j not in active_set and self.take_step(j, i, N, labels, kernel_results):
                    return 1

        return 0
Ejemplo n.º 3
0
def predict_price(ask, bid, t):
  # element-wise difference 
  spread = ask - bid
  
  # element-wise average of ask and bid  
  midprice = (ask + bid) / 2
  
  # slices allow for cheaply extracting parts of an array
  d_spread = spread[t:] - spread[:-t]

  # find prices `t` steps in the future of d_spread
  d_spread = d_spread[:-t]
  future_price = midprice[2*t:]
 
  util.log_info('D: %s, M: %s', d_spread.shape, future_price.shape)

  # compute a univariate linear predictor
  regression = mean(future_price / d_spread)
  prediction = regression * d_spread
  
  error = mean(abs(prediction - future_price))
  return error 
Ejemplo n.º 4
0
def find_change(arr, threshold=0.5):
  diff = abs(arr[1:] - arr[:-1])
  diff = eager(diff)
  return diff[diff > threshold]
Ejemplo n.º 5
0
 def setUp(self):
   if not hasattr(self, 'current'):
     self.current = eager(expr.abs(10 + expr.randn(10)))
     self.strike = eager(expr.abs(20 + expr.randn(10)))
Ejemplo n.º 6
0
 def setUp(self):
     if not hasattr(self, 'current'):
         self.current = eager(expr.abs(10 + expr.randn(10)))
         self.strike = eager(expr.abs(20 + expr.randn(10)))