Ejemplo n.º 1
0
def benchmark_convnet(ctx, timer):
  image_size = BASE_IMG_SIZE
  minibatch = 64
  #minibatch = ctx.num_workers
  hint = util.divup(image_size, sqrt(ctx.num_workers))
  tile_hint = (util.divup(minibatch, ctx.num_workers), N_COLORS, image_size, image_size)
  util.log_info('Hint: %s', tile_hint)
    
  images = expr.eager(expr.ones((minibatch, N_COLORS, image_size, image_size),
                                tile_hint=tile_hint))
  
  w1 = expr.eager(expr.ones((N_FILTERS, N_COLORS) + FILTER_SIZE,
                            tile_hint=ONE_TILE))
  w2 = expr.eager(expr.ones((N_FILTERS, N_FILTERS) + FILTER_SIZE,
                            tile_hint=ONE_TILE))
  w3 = expr.eager(expr.ones((N_FILTERS, N_FILTERS) + FILTER_SIZE,
                            tile_hint=ONE_TILE))
  
  def _():
    conv1 = stencil.stencil(images, w1, 2)
    pool1 = stencil.maxpool(conv1)
   
    conv2 = stencil.stencil(pool1, w2, 2)
    pool2 = stencil.maxpool(conv2)
    
    conv3 = stencil.stencil(pool2, w3, 2)
    pool3 = stencil.maxpool(conv3)
    
    expr.force(pool3)
 
  # force parakeet functions to compile before timing. 
  _()  
  for i in range(2):
    timer.time_op('convnet', _)
Ejemplo n.º 2
0
def test_convnet(ctx):
    hint = util.divup(64, sqrt(ctx.num_workers))

    images = expr.eager(
        expr.ones((N_IMGS, ) + IMG_SIZE,
                  tile_hint=(N_IMGS, N_COLORS, hint, hint)))

    w1 = expr.eager(
        expr.ones((N_FILTERS, N_COLORS) + FILTER_SIZE, tile_hint=ONE_TILE))

    conv1 = stencil.stencil(images, w1, 2)
    pool1 = stencil.maxpool(conv1)

    w2 = expr.eager(
        expr.ones((N_FILTERS, N_FILTERS) + FILTER_SIZE, tile_hint=ONE_TILE))

    conv2 = stencil.stencil(pool1, w2, 2)
    pool2 = stencil.maxpool(conv2)

    w3 = expr.eager(
        expr.ones((N_FILTERS, N_FILTERS) + FILTER_SIZE, tile_hint=ONE_TILE))
    conv3 = stencil.stencil(pool2, w3, 2)
    pool3 = stencil.maxpool(conv3)

    util.log_info(pool3.shape)
Ejemplo n.º 3
0
 def _(axis):
     util.log_info('Testing sum over axis %s', axis)
     a = expr.ones((TEST_SIZE, TEST_SIZE)) + expr.ones(
         (TEST_SIZE, TEST_SIZE))
     b = a.sum(axis=axis)
     Assert.all_eq(b.glom(), 2 * np.ones(
         (TEST_SIZE, TEST_SIZE)).sum(axis))
Ejemplo n.º 4
0
def fn2():
  a = expr.ones((N, N))
  b = expr.ones((N, N))
  x = expr.dot(a, b)
  g = a + b + x

  return g
Ejemplo n.º 5
0
def fn2():
    a = expr.ones((N, N))
    b = expr.ones((N, N))
    x = expr.dot(a, b)
    g = a + b + x

    return g
Ejemplo n.º 6
0
  def test_optimization_map_with_location(self):
    FLAGS.opt_parakeet_gen = 1
    def mapper(tile, ex):
      return tile + 10

    a = expr.map_with_location(expr.ones((5, 5)), mapper) + expr.ones((5, 5))
    Assert.isinstance(a.optimized().op, expr.local.ParakeetExpr)
Ejemplo n.º 7
0
def benchmark_convnet(ctx, timer):
  image_size = BASE_IMG_SIZE
  minibatch = 64
  #minibatch = ctx.num_workers
  hint = util.divup(image_size, sqrt(ctx.num_workers))
  tile_hint = (util.divup(minibatch, ctx.num_workers), N_COLORS, image_size, image_size)
  util.log_info('Hint: %s', tile_hint)

  images = expr.eager(expr.ones((minibatch, N_COLORS, image_size, image_size),
                                tile_hint=tile_hint))

  w1 = expr.eager(expr.ones((N_FILTERS, N_COLORS) + FILTER_SIZE,
                            tile_hint=ONE_TILE))
  w2 = expr.eager(expr.ones((N_FILTERS, N_FILTERS) + FILTER_SIZE,
                            tile_hint=ONE_TILE))
  w3 = expr.eager(expr.ones((N_FILTERS, N_FILTERS) + FILTER_SIZE,
                            tile_hint=ONE_TILE))

  def _():
    conv1 = stencil.stencil(images, w1, 2)
    pool1 = stencil.maxpool(conv1)

    conv2 = stencil.stencil(pool1, w2, 2)
    pool2 = stencil.maxpool(conv2)

    conv3 = stencil.stencil(pool2, w3, 2)
    pool3 = stencil.maxpool(conv3)

    pool3.evaluate()

  # force parakeet functions to compile before timing.
  _()
  for i in range(2):
    timer.time_op('convnet', _)
Ejemplo n.º 8
0
    def test_add_many(self):
        a = expr.ones((TEST_SIZE, TEST_SIZE))
        b = expr.ones((TEST_SIZE, TEST_SIZE))

        add_many = (a + b + a + b + a + b + a + b + a + b)
        #print add_many
        #print add_many.dag()
        Assert.all_eq(add_many.glom(), np.ones((TEST_SIZE, TEST_SIZE)) * 10)
Ejemplo n.º 9
0
def test_numexpr_opt(ctx):
  a = expr.ones((10, 10))
  b = expr.ones((10, 10))
  c = expr.ones((10, 10))
  d = expr.ones((10, 10))
  e = expr.ones((10, 10))

  f = a + b + c + d + e
  f.evaluate()
Ejemplo n.º 10
0
    def test_optimization_map_with_location(self):
        FLAGS.opt_parakeet_gen = 1

        def mapper(tile, ex):
            return tile + 10

        a = expr.map_with_location(expr.ones((5, 5)), mapper) + expr.ones(
            (5, 5))
        Assert.isinstance(a.optimized().op, expr.operator.local.ParakeetExpr)
Ejemplo n.º 11
0
  def test_add_many(self):
    a = expr.ones((TEST_SIZE, TEST_SIZE))
    b = expr.ones((TEST_SIZE, TEST_SIZE))

    add_many = (a + b + a + b + a + b + a + b + a + b)
    #print add_many
    #print add_many.dag()
    Assert.all_eq(add_many.glom(),
                  np.ones((TEST_SIZE, TEST_SIZE)) * 10)
Ejemplo n.º 12
0
  def test_optimization_region_map(self):
    def mapper(tile, ex):
      return tile + 10

    ex = array.extent.create((0, 0), (1, 5), (5, 5))
    a = expr.region_map(expr.ones((5, 5)), ex, mapper) + expr.ones((5, 5))*10

    for child in a.optimized().op.deps:
      Assert.true(not isinstance(child, expr.local.LocalInput))
Ejemplo n.º 13
0
    def test_optimization_region_map(self):
        def mapper(tile, ex):
            return tile + 10

        ex = array.extent.create((0, 0), (1, 5), (5, 5))
        a = expr.region_map(expr.ones((5, 5)), ex, mapper) + expr.ones(
            (5, 5)) * 10

        for child in a.optimized().op.deps:
            Assert.true(not isinstance(child, expr.operator.local.LocalInput))
Ejemplo n.º 14
0
def fn2():
  a = expr.ones((N, N))
  b = expr.ones((N, N/2))
  g = expr.dot(a, b) + expr.dot(expr.sum(a, axis=1).reshape((1, N)), b)
  t1 = time.time()
  g_opt = g.optimized()
  #g_opt.force()
  t2 = time.time()
  print t2 - t1
  print g_opt
Ejemplo n.º 15
0
def fn2():
  a = expr.ones((N, N))
  b = expr.ones((N, N/2))
  g = expr.dot(a, b) + expr.dot(expr.sum(a, axis=1).reshape((1, N)), b)
  t1 = time.time()
  g_opt = g.optimized()
  #g_opt.evaluate()
  t2 = time.time()
  print t2 - t1
  print g_opt
Ejemplo n.º 16
0
def test_numexpr_opt(ctx):
  a = expr.ones((10, 10))
  b = expr.ones((10, 10))
  c = expr.ones((10, 10))
  d = expr.ones((10, 10))
  e = expr.ones((10, 10))
  
  f = a + b + c + d + e
  f.force()
  #print f.dag()
  #print f.force()
Ejemplo n.º 17
0
def test_numexpr_opt(ctx):
    a = expr.ones((10, 10))
    b = expr.ones((10, 10))
    c = expr.ones((10, 10))
    d = expr.ones((10, 10))
    e = expr.ones((10, 10))

    f = a + b + c + d + e
    f.force()
    #print f.dag()
    #print f.force()
Ejemplo n.º 18
0
def fn1():
  a = expr.ones((N, N))
  b = expr.ones((N, N))
  x = expr.dot(a, b)
  g = a + b + x
   
  t1 = time.time()
  print g.optimized()
  t2 = time.time()

  print t2 - t1
Ejemplo n.º 19
0
def fn1():
  a = expr.ones((N, N))
  b = expr.ones((N, N))
  x = expr.dot(a, b)
  g = a + b + x

  t1 = time.time()
  print g.optimized()
  t2 = time.time()

  print t2 - t1
Ejemplo n.º 20
0
  def test_broadcast(self):
    a = expr.ones((100, 1, 100, 100)).force()
    b = expr.ones((10, 100, 1)).force()
    a, b = broadcast.broadcast((a, b))
    c = expr.add(a, b).force()
    d = expr.sub(a, b).force()

    n = np.ones((100, 10, 100, 100))
    n1 = n + n
    n2 = n - n
    Assert.all_eq(n1, c.glom())
    Assert.all_eq(n2, d.glom())
Ejemplo n.º 21
0
    def test_broadcast(self):
        a = expr.ones((100, 1, 100, 100)).force()
        b = expr.ones((10, 100, 1)).force()
        a, b = broadcast.broadcast((a, b))
        c = expr.add(a, b).force()
        d = expr.sub(a, b).force()

        n = np.ones((100, 10, 100, 100))
        n1 = n + n
        n2 = n - n
        Assert.all_eq(n1, c.glom())
        Assert.all_eq(n2, d.glom())
Ejemplo n.º 22
0
  def test1(self):
    a = expr.ones(ARRAY_SIZE)
    b = expr.ones(ARRAY_SIZE)
    c = expr.ones(ARRAY_SIZE)
    x = a + b + c
    y = x + x
    z = y + y
    z = expr.checkpoint(z, mode='disk')
    z.evaluate()

    failed_worker_id = 0
    ctx = blob_ctx.get()
    ctx.local_worker.mark_failed_worker(failed_worker_id)

    res = z + z
    Assert.all_eq(res.glom(), np.ones(ARRAY_SIZE)*24)
Ejemplo n.º 23
0
def benchmark_optimization(ctx, timer):
  FLAGS.optimization = 0
  DATA_SIZE = 5 * 1000 * 1000
  current = eager(zeros((DATA_SIZE * ctx.num_workers,),
                        dtype=np.float32, tile_hint = (DATA_SIZE,)))
  strike = eager(ones((DATA_SIZE * ctx.num_workers,),
                      dtype=np.float32, tile_hint=(DATA_SIZE,)))
  maturity = eager(strike * 12)
  rate = eager(strike * 0.05)
  volatility = eager(strike * 0.01)

  timer.time_op('opt-none', lambda: bs_step(current, strike, maturity, rate, volatility))
  timer.time_op('opt-none', lambda: bs_step(current, strike, maturity, rate, volatility))
  timer.time_op('opt-none', lambda: bs_step(current, strike, maturity, rate, volatility))

  FLAGS.optimization = 1
  FLAGS.opt_parakeet_gen = 0
  FLAGS.opt_map_fusion = 1
  timer.time_op('opt-fusion', lambda: bs_step(current, strike, maturity, rate, volatility))
  timer.time_op('opt-fusion', lambda: bs_step(current, strike, maturity, rate, volatility))
  timer.time_op('opt-fusion', lambda: bs_step(current, strike, maturity, rate, volatility))

  FLAGS.opt_parakeet_gen = 1
  timer.time_op('opt-parakeet', lambda: bs_step(current, strike, maturity, rate, volatility))
  timer.time_op('opt-parakeet', lambda: bs_step(current, strike, maturity, rate, volatility))
  timer.time_op('opt-parakeet', lambda: bs_step(current, strike, maturity, rate, volatility))
Ejemplo n.º 24
0
 def test1(self):
   a = expr.ones(ARRAY_SIZE)
   b = expr.ones(ARRAY_SIZE)
   c = expr.ones(ARRAY_SIZE)
   x = a + b + c
   y = x + x
   z = y + y
   z = expr.checkpoint(z, mode='disk')
   z.force()
   
   failed_worker_id = 0
   ctx = blob_ctx.get()
   ctx.local_worker.mark_failed_worker(failed_worker_id)
   
   res = z + z
   Assert.all_eq(res.glom(), np.ones(ARRAY_SIZE)*24)
Ejemplo n.º 25
0
def benchmark_optimization(ctx, timer):
    FLAGS.optimization = 0
    DATA_SIZE = 5 * 1000 * 1000
    current = eager(
        zeros((DATA_SIZE * ctx.num_workers, ),
              dtype=np.float32,
              tile_hint=(DATA_SIZE, )))
    strike = eager(
        ones((DATA_SIZE * ctx.num_workers, ),
             dtype=np.float32,
             tile_hint=(DATA_SIZE, )))
    maturity = eager(strike * 12)
    rate = eager(strike * 0.05)
    volatility = eager(strike * 0.01)

    timer.time_op('opt-none',
                  lambda: bs_step(current, strike, maturity, rate, volatility))
    timer.time_op('opt-none',
                  lambda: bs_step(current, strike, maturity, rate, volatility))
    timer.time_op('opt-none',
                  lambda: bs_step(current, strike, maturity, rate, volatility))

    FLAGS.optimization = 1
    FLAGS.opt_parakeet_gen = 0
    FLAGS.opt_map_fusion = 1
    timer.time_op('opt-fusion',
                  lambda: bs_step(current, strike, maturity, rate, volatility))
    timer.time_op('opt-fusion',
                  lambda: bs_step(current, strike, maturity, rate, volatility))
    timer.time_op('opt-fusion',
                  lambda: bs_step(current, strike, maturity, rate, volatility))

    FLAGS.opt_parakeet_gen = 1
    timer.time_op('opt-parakeet',
                  lambda: bs_step(current, strike, maturity, rate, volatility))
    timer.time_op('opt-parakeet',
                  lambda: bs_step(current, strike, maturity, rate, volatility))
    timer.time_op('opt-parakeet',
                  lambda: bs_step(current, strike, maturity, rate, volatility))

    FLAGS.opt_parakeet_gen = 0
    FLAGS.opt_auto_tiling = 0
    timer.time_op('opt-tiling = 0',
                  lambda: bs_step(current, strike, maturity, rate, volatility))
    timer.time_op('opt-tiling = 0',
                  lambda: bs_step(current, strike, maturity, rate, volatility))
    timer.time_op('opt-tiling = 0',
                  lambda: bs_step(current, strike, maturity, rate, volatility))

    FLAGS.opt_auto_tiling = 1
    timer.time_op('opt-tiling',
                  lambda: bs_step(current, strike, maturity, rate, volatility))
    timer.time_op('opt-tiling',
                  lambda: bs_step(current, strike, maturity, rate, volatility))
    timer.time_op('opt-tiling',
                  lambda: bs_step(current, strike, maturity, rate, volatility))
Ejemplo n.º 26
0
    def test_index(self):
        a = expr.arange((TEST_SIZE, TEST_SIZE))
        b = expr.ones((10, ), dtype=np.int)
        z = a[b]
        val = expr.evaluate(z)

        nx = np.arange(TEST_SIZE * TEST_SIZE).reshape(TEST_SIZE, TEST_SIZE)
        ny = np.ones((10, ), dtype=np.int)

        Assert.all_eq(val.glom(), nx[ny])
Ejemplo n.º 27
0
  def test_index(self):
    a = expr.arange((TEST_SIZE, TEST_SIZE))
    b = expr.ones((10,), dtype=np.int)
    z = a[b]
    val = expr.evaluate(z)

    nx = np.arange(TEST_SIZE * TEST_SIZE).reshape(TEST_SIZE, TEST_SIZE)
    ny = np.ones((10,), dtype=np.int)

    Assert.all_eq(val.glom(), nx[ny])
Ejemplo n.º 28
0
def benchmark_matmul(ctx, timer):
    N = int(1000 * math.pow(ctx.num_workers, 1.0 / 3.0))
    # N = 4000
    M = util.divup(N, ctx.num_workers)
    T = util.divup(N, math.sqrt(ctx.num_workers))

    util.log_info("Testing with %d workers, N = %d, tile_size=%s", ctx.num_workers, N, T)

    # x = expr.eager(expr.ones((N, N), dtype=np.double, tile_hint=(N, M)))
    # y = expr.eager(expr.ones((N, N), dtype=np.double, tile_hint=(N, M)))

    x = expr.eager(expr.ones((N, N), dtype=np.double, tile_hint=(T, T)))
    y = expr.eager(expr.ones((N, N), dtype=np.double, tile_hint=(T, T)))

    # print expr.glom(expr.dot(x, y))
    # print expr.dag(expr.dot(x, y))

    def _step():
        expr.evaluate(expr.dot(x, y))

    timer.time_op("matmul", _step)
Ejemplo n.º 29
0
def test_stencil(ctx):
  st = time.time()

  IMG_SIZE = int(8 * math.sqrt(ctx.num_workers))
  FILT_SIZE = 8
  N = 8
  F = 32
  
  tile_size = util.divup(IMG_SIZE, math.sqrt(ctx.num_workers))
  
  images = expr.ones((N, 3, IMG_SIZE, IMG_SIZE), 
                     dtype=np.float, 
                     tile_hint=(N, 3, tile_size, tile_size))
  
  filters = expr.ones((F, 3, FILT_SIZE, FILT_SIZE), 
                      dtype=np.float, 
                      tile_hint=ONE_TILE)
  
  result = stencil.stencil(images, filters, 1)
  ed = time.time()
  print ed - st
Ejemplo n.º 30
0
def test_tilesharing(ctx):
  print "#worker:", ctx.num_workers
  N_EXAMPLES = 5 * ctx.num_workers
  x = expr.ones((N_EXAMPLES, 1), tile_hint=(N_EXAMPLES / ctx.num_workers, 1))
  y = expr.region_map(x, extent.create((0, 0), (3, 1), (N_EXAMPLES, 1)), fn=lambda data, ex, a: data+a, fn_kw={'a': 1})

  npx = np.ones((N_EXAMPLES, 1))
  npy = np.ones((N_EXAMPLES, 1))
  npy[0:3, 0] += 1

  assert np.all(np.equal(x.glom(), npx))
  assert np.all(np.equal(y.glom(), npy))
Ejemplo n.º 31
0
def benchmark_matmul(ctx, timer):
    N = int(1000 * math.pow(ctx.num_workers, 1.0 / 3.0))
    #N = 4000
    M = util.divup(N, ctx.num_workers)
    T = util.divup(N, math.sqrt(ctx.num_workers))

    util.log_info('Testing with %d workers, N = %d, tile_size=%s',
                  ctx.num_workers, N, T)

    #x = expr.eager(expr.ones((N, N), dtype=np.double, tile_hint=(N, M)))
    #y = expr.eager(expr.ones((N, N), dtype=np.double, tile_hint=(N, M)))

    x = expr.eager(expr.ones((N, N), dtype=np.double, tile_hint=(T, T)))
    y = expr.eager(expr.ones((N, N), dtype=np.double, tile_hint=(T, T)))

    #print expr.glom(expr.dot(x, y))
    #print expr.dag(expr.dot(x, y))

    def _step():
        expr.evaluate(expr.dot(x, y))

    timer.time_op('matmul', _step)
Ejemplo n.º 32
0
def center_data(X, y, fit_intercept, normalize=False):
  """
  Centers data to have mean zero along axis 0. This is here because
  nearly all linear models will want their data to be centered.
  """
  if fit_intercept:
    X_mean = X.mean(axis = 0)
    X_mean = expr.reshape(X_mean, (1, X_mean.shape[0]))
    X -= X_mean
    
    if normalize:
      X_std = expr.sqrt(expr.sum(X ** 2, axis=0)).force()
      X_std[X_std == 0] = 1
      X /= X_std
    else:
      X_std = expr.ones(X.shape[1])
    
    y_mean = y.mean(axis=0)
    y -= y_mean
  else:
    X_mean = expr.zeros(X.shape[1])
    X_std = expr.ones(X.shape[1])
    y_mean = 0. if y.ndim == 1 else expr.zeros(y.shape[1], dtype=X.dtype)
  return X, y, X_mean, y_mean, X_std
Ejemplo n.º 33
0
def test_convnet(ctx):
  hint = util.divup(64, sqrt(ctx.num_workers))

  images = expr.eager(expr.ones((N_IMGS,) + IMG_SIZE,
                                tile_hint=(N_IMGS, N_COLORS, hint, hint)))

  w1 = expr.eager(expr.ones((N_FILTERS, N_COLORS) + FILTER_SIZE,
                            tile_hint=ONE_TILE))

  conv1 = stencil.stencil(images, w1, 2)
  pool1 = stencil.maxpool(conv1)

  w2 = expr.eager(expr.ones((N_FILTERS, N_FILTERS) + FILTER_SIZE,
                            tile_hint=ONE_TILE))

  conv2 = stencil.stencil(pool1, w2, 2)
  pool2 = stencil.maxpool(conv2)

  w3 = expr.eager(expr.ones((N_FILTERS, N_FILTERS) + FILTER_SIZE,
                            tile_hint=ONE_TILE))
  conv3 = stencil.stencil(pool2, w3, 2)
  pool3 = stencil.maxpool(conv3)

  util.log_info(pool3.shape)
Ejemplo n.º 34
0
def benchmark_pagerank(ctx, timer):
  num_pages = PAGES_PER_WORKER * ctx.num_workers
  util.log_info('Total pages: %s', num_pages)

  wts = eager(
    expr.shuffle(
      expr.ndarray(
        (num_pages, num_pages), 
        dtype=np.float32,
        tile_hint=(num_pages, PAGES_PER_WORKER / 8)),
      make_weights,
    ))

  p = eager(expr.ones((num_pages, 1), 
                      tile_hint=(PAGES_PER_WORKER / 8, 1), 
                      dtype=np.float32))

  for i in range(3):
    timer.time_op('pagerank', lambda: expr.dot(wts, p).force())
Ejemplo n.º 35
0
def conj_gradient(A, num_iter=15):
  '''
  NAS Conjugate Gradient benchmark
  
  This function is similar to the NAS CG benchmark described in:
  http://www.nas.nasa.gov/News/Techreports/1994/PDF/RNR-94-007.pdf
  See code on page 19-20 for the pseudo code.
  
  Args:
    A(Expr): matrix to be processed.
    num_iter(int): max iteration to run.
  '''
  x = expr.ones((A.shape[1],1))
  
  for iter in range(num_iter):
    #util.log_warn('iteration:%d', iter)
    z = cgit(A, x)
    x = z / expr.norm(z)
  return x
Ejemplo n.º 36
0
def conj_gradient(A, num_iter=15):
    '''
  NAS Conjugate Gradient benchmark
  
  This function is similar to the NAS CG benchmark described in:
  http://www.nas.nasa.gov/News/Techreports/1994/PDF/RNR-94-007.pdf
  See code on page 19-20 for the pseudo code.
  
  Args:
    A(Expr): matrix to be processed.
    num_iter(int): max iteration to run.
  '''
    x = expr.ones((A.shape[1], 1))

    for iter in range(num_iter):
        #util.log_warn('iteration:%d', iter)
        z = cgit(A, x)
        x = z / expr.norm(z)
    return x
Ejemplo n.º 37
0
def benchmark_pagerank(ctx, timer):
    num_pages = PAGES_PER_WORKER * ctx.num_workers
    util.log_info('Total pages: %s', num_pages)

    wts = eager(
        expr.shuffle(
            expr.ndarray((num_pages, num_pages),
                         dtype=np.float32,
                         tile_hint=(num_pages, PAGES_PER_WORKER / 8)),
            make_weights,
        ))

    p = eager(
        expr.ones((num_pages, 1),
                  tile_hint=(PAGES_PER_WORKER / 8, 1),
                  dtype=np.float32))

    for i in range(3):
        timer.time_op('pagerank', lambda: expr.dot(wts, p).force())
def pagerankDistributed(ctx, numPage, numIters, alpha):
  sGenerate = time.time()
  rank = eager(expr.ones((numPage, 1), tile_hint = (numPage / ctx.num_workers, 1), dtype = np.float32))
  linkMatrix = eager(
              expr.shuffle(
                expr.ndarray(
                  (numPage, numPage),
                  dtype = np.float32,
                  tile_hint = (numPage, numPage / ctx.num_workers)),
              make_weights,
              ))
  eGenerate = time.time()
  util.log_info("**pagerank** rank init finished")
  startCompute = time.time()
  for i in range(numIters):
    #rank = ((1 - alpha) * expr.dot(linkMatrix, rank,tile_hint = (numPage, numPage/10))) + belta
    rank = expr.dot(linkMatrix, rank, tile_hint = (numPage, numPage/10))
  rank.evaluate()
  endCompute = time.time()
  util.log_info("**pagerank** compute finished")
  return (eGenerate - sGenerate, endCompute - startCompute)
Ejemplo n.º 39
0
  def test_pagerank(self):
    _skip_if_travis()
    OUTLINKS_PER_PAGE = 10
    PAGES_PER_WORKER = 1000000
    num_pages = PAGES_PER_WORKER * self.ctx.num_workers

    wts = expr.shuffle(
        expr.ndarray(
          (num_pages, num_pages),
          dtype=np.float32,
          tile_hint=(num_pages, PAGES_PER_WORKER / 8)),
        make_weights,
      )

    start = time.time()

    p = expr.eager(expr.ones((num_pages, 1), tile_hint=(PAGES_PER_WORKER / 8, 1),
                             dtype=np.float32))

    expr.dot(wts, p, tile_hint=(PAGES_PER_WORKER / 8, 1)).evaluate()

    cost = time.time() - start
    self._verify_cost("pagerank", cost)
Ejemplo n.º 40
0
 def test_add3(self):
   a = expr.ones((TEST_SIZE, TEST_SIZE))
   b = expr.ones((TEST_SIZE, TEST_SIZE))
   c = expr.ones((TEST_SIZE, TEST_SIZE))
   Assert.all_eq((a + b + c).glom(), np.ones((TEST_SIZE, TEST_SIZE)) * 3)
Ejemplo n.º 41
0
 def test_broadcast(self):
   a = expr.ones((2, 1))
   b = expr.ones((2, 5))
   Assert.all_eq((a / b).glom(), np.ones((2, 5)))
   Assert.all_eq((b / a).glom(), np.ones((2, 5)))
Ejemplo n.º 42
0
    def test_map_1(self):
        a = expr.ones((20, 20))
        b = expr.ones((20, 20))
        c = a + b

        Assert.all_eq(2 * np.ones((20, 20)), expr.glom(c))
Ejemplo n.º 43
0
    def test_ln(self):
        a = 1.0 + expr.ones((100, ), dtype=np.float32)
        b = 1.0 + np.ones(100).astype(np.float32)

        Assert.all_close(expr.ln(a).glom(), np.log(b))
Ejemplo n.º 44
0
 def test_add2(self):
     a = expr.ones((TEST_SIZE, TEST_SIZE))
     b = expr.ones((TEST_SIZE, TEST_SIZE))
     Assert.all_eq((a + b).glom(), np.ones((TEST_SIZE, TEST_SIZE)) * 2)
Ejemplo n.º 45
0
 def test_add3(self):
     a = expr.ones((TEST_SIZE, TEST_SIZE))
     b = expr.ones((TEST_SIZE, TEST_SIZE))
     c = expr.ones((TEST_SIZE, TEST_SIZE))
     Assert.all_eq((a + b + c).glom(), np.ones((TEST_SIZE, TEST_SIZE)) * 3)
Ejemplo n.º 46
0
def fn3():
  a = expr.ones((10,))
  g = expr.diag(a)
  g += expr.ones((10,10))
  g = expr.diagonal(g)
  print g.optimized()
Ejemplo n.º 47
0
 def test_broadcast(self):
     a = expr.ones((2, 1))
     b = expr.ones((2, 5))
     Assert.all_eq((a / b).glom(), np.ones((2, 5)))
     Assert.all_eq((b / a).glom(), np.ones((2, 5)))
Ejemplo n.º 48
0
def simulate(ts_all, te_all, lamb_all, num_paths):
    """Range over a number of independent products.

  :param ts_all: DistArray
    Start dates for a series of swaptions.
  :param te_all: DistArray
    End dates for a series of swaptions.
  :param lamb_all: DistArray
    Parameter values for a series of swaptions.
  :param num_paths: Int
    Number of paths used in random walk.

  :rtype: DistArray

  """
    swaptions = []
    i = 0
    for ts_a, te, lamb in zip(ts_all, te_all, lamb_all):
        for ts in ts_a:
            # start = time()
            print i
            time_structure = arange(None, 0, ts + DELTA, DELTA)
            maturity_structure = arange(None, 0, te, DELTA)

            ############# MODEL ###############
            # Variance reduction technique - Antithetic Variates.
            eps_tmp = randn(time_structure.shape[0] - 1, num_paths)
            eps = concatenate(eps_tmp, -eps_tmp, 1)

            # Forward LIBOR rates for the construction of the spot measure.
            f_kk = zeros((time_structure.shape[0], 2 * num_paths))
            f_kk = assign(f_kk, np.s_[0, :], F_0)

            # Plane kxN of simulated LIBOR rates.
            f_kn = ones((maturity_structure.shape[0], 2 * num_paths)) * F_0

            # Simulations of the plane f_kn for each time step.
            for t in xrange(1, time_structure.shape[0]):
                f_kn_new = f_kn[1:, :] * exp(
                    lamb * mu(f_kn, lamb) * DELTA - 0.5 * lamb * lamb * DELTA + lamb * eps[t - 1, :] * sqrt(DELTA)
                )
                f_kk = assign(f_kk, np.s_[t, :], f_kn_new[0])
                f_kn = f_kn_new

            ############## PRODUCT ###############
            # Value of zero coupon bonds.
            zcb = ones((int((te - ts) / DELTA) + 1, 2 * num_paths))
            f_kn_modified = 1 + DELTA * f_kn
            for j in xrange(zcb.shape[0] - 1):
                zcb = assign(zcb, np.s_[j + 1], zcb[j] / f_kn_modified[j])

            # Swaption price at maturity.
            last_row = zcb[zcb.shape[0] - 1, :].reshape((20,))
            swap_ts = maximum(1 - last_row - THETA * DELTA * expr.sum(zcb[1:], 0), 0)

            # Spot measure used for discounting.
            b_ts = ones((2 * num_paths,))
            tmp = 1 + DELTA * f_kk
            for j in xrange(int(ts / DELTA)):
                b_ts *= tmp[j].reshape((20,))

            # Swaption price at time 0.
            swaption = swap_ts / b_ts

            # Save expected value in bps and std.
            me = mean((swaption[0:num_paths] + swaption[num_paths:]) / 2) * 10000
            st = std((swaption[0:num_paths] + swaption[num_paths:]) / 2) / sqrt(num_paths) * 10000

            swaptions.append([me.optimized().force(), st.optimized().force()])
            # print time() - start
            i += 1
    return swaptions
Ejemplo n.º 49
0
def gen_array(shape):
    return expr.ones(shape)
Ejemplo n.º 50
0
def fn3():
    a = expr.ones((N, N))
    b = expr.ones((N, N / 2))
    g = expr.dot(a, b) + expr.dot(expr.sum(a, axis=1).reshape((1, N)), b)
    return g
Ejemplo n.º 51
0
def fn3():
  a = expr.ones((N, N))
  b = expr.ones((N, N/2))
  g = expr.dot(a, b) + expr.dot(expr.sum(a, axis=1).reshape((1, N)), b)
  return g
Ejemplo n.º 52
0
  def train_smo_2005(self, data, labels):
    '''
    Train an SVM model using the SMO (2005) algorithm.
   
    Args:
      data(Expr): points to be trained
      labels(Expr): the correct labels of the training data
    '''
    
    N = data.shape[0] # Number of instances
    D = data.shape[1]  # Number of features

    self.b = 0.0
    alpha = expr.zeros((N,1), dtype=np.float64, tile_hint=[N/self.ctx.num_workers, 1]).force()
    
    # linear kernel
    kernel_results = expr.dot(data, expr.transpose(data), tile_hint=[N/self.ctx.num_workers, N])
    gradient = expr.ones((N, 1), dtype=np.float64, tile_hint=[N/self.ctx.num_workers, 1]) * -1.0
    
    expr_labels = expr.lazify(labels)
    
    util.log_info("Starting SMO")
    pv1 = pv2 = -1
    it = 0
    while it < self.maxiter:
      util.log_info("Iteration:%d", it)
      
      minObj = 1e100
      
      expr_alpha = expr.lazify(alpha)
      G = expr.multiply(labels, gradient) * -1.0

      v1_mask = ((expr_labels > self.tol) * (expr_alpha < self.C) + (expr_labels < -self.tol) * (expr_alpha > self.tol))
      v1 = expr.argmax(G[v1_mask-True]).glom().item()
      maxG = G[v1,0].glom()
      print 'maxv1:', v1, 'maxG:', maxG

      v2_mask = ((expr_labels > self.tol) * (expr_alpha > self.tol) + (expr_labels < -self.tol) * (expr_alpha < self.C))     
      min_v2 = expr.argmin(G[v2_mask-True]).glom().item()
      minG = G[min_v2,0].glom()
      #print 'minv2:', min_v2, 'minG:', minG
      
      set_v2 = v2_mask.glom().nonzero()[0]
      #print 'actives:', set_v2.shape[0]
      v2 = -1
      for v in set_v2:
        b = maxG - G[v,0].glom()
        if b > self.tol:
          na = (kernel_results[v1,v1] + kernel_results[v,v] - 2*kernel_results[v1,v]).glom()[0][0]
          if na < self.tol: na = 1e12
          
          obj = -(b*b)/na
          if obj <= minObj and v1 != pv1 or v != pv2:
            v2 = v
            a = na
            minObj = obj
      
      if v2 == -1: break
      if maxG - minG < self.tol: break
      
      print 'opt v1:', v1, 'v2:', v2

      pv1 = v1
      pv2 = v2
    
      y1 = labels[v1,0]
      y2 = labels[v2,0]    
        
      oldA1 = alpha[v1,0]
      oldA2 = alpha[v2,0]
      
      # Calculate new alpha values, to reduce the objective function...
      b = y2*expr.glom(gradient[v2,0]) - y1*expr.glom(gradient[v1,0])
      if y1 != y2:
        a += 4 * kernel_results[v1,v2].glom()
      
      newA1 = oldA1 + y1*b/a
      newA2 = oldA2 - y2*b/a   

      # Correct for alpha being out of range...
      sum = y1*oldA1 + y2*oldA2;
  
      if newA1 < self.tol: newA1 = 0.0
      elif newA1 > self.C: newA1 = self.C
     
      newA2 = y2 * (sum - y1 * newA1) 

      if newA2 < self.tol: newA2 = 0.0;
      elif newA2 > self.C: newA2 = self.C
     
      newA1 = y1 * (sum - y2 * newA2)
  
      # Update the gradient...
      dA1 = newA1 - oldA1
      dA2 = newA2 - oldA2
  
      gradient += expr.multiply(labels, kernel_results[:,v1]) * y1 * dA1 + expr.multiply(labels, kernel_results[:,v2]) * y2 * dA2

      alpha[v1,0] = newA1
      alpha[v2,0] = newA2
 
      #print 'alpha:', alpha.glom().T
      
      it += 1
      #print 'gradient:', gradient.glom().T

    self.w = expr.zeros((D, 1), dtype=np.float64).force()
    for i in xrange(D): 
      self.w[i,0] = expr.reduce(alpha, axis=None, dtype_fn=lambda input: input.dtype,
                                local_reduce_fn=margin_mapper,
                                accumulate_fn=np.add, 
                                fn_kw=dict(label=labels, data=expr.force(data[:,i]))).glom()
    
    self.b = 0.0
    E = (labels - self.margins(data)).force()
    
    minB = -1e100
    maxB = 1e100
    actualB = 0.0
    numActualB = 0
    
    for i in xrange(N):
      ai = alpha[i,0]
      yi = labels[i,0]
      Ei = E[i,0]
      
      if ai < 1e-3:
        if yi < self.tol:
          maxB = min((maxB,Ei))
        else:
          minB = max((minB,Ei))
      elif ai > self.C - 1e-3:
        if yi < self.tol:
          minB = max((minB,Ei))
        else:
          maxB = min((maxB,Ei))
      else:
        numActualB += 1
        actualB += (Ei - actualB) / float(numActualB)
    if numActualB > 0:
      self.b = actualB
    else:
      self.b = 0.5*(minB + maxB)

    self.usew_ = True
    print 'iteration finish:', it
    print 'b:', self.b
    print 'w:', self.w.glom()
Ejemplo n.º 53
0
 def test_add2(self):
   a = expr.ones((TEST_SIZE, TEST_SIZE))
   b = expr.ones((TEST_SIZE, TEST_SIZE))
   Assert.all_eq((a + b).glom(), np.ones((TEST_SIZE, TEST_SIZE)) * 2)
Ejemplo n.º 54
0
def gen_array(shape):
  return expr.ones(shape)
Ejemplo n.º 55
0
def simulate(ts_all, te_all, lamb_all, num_paths):
  '''Range over a number of independent products.

  :param ts_all: DistArray
    Start dates for a series of swaptions.
  :param te_all: DistArray
    End dates for a series of swaptions.
  :param lamb_all: DistArray
    Parameter values for a series of swaptions.
  :param num_paths: Int
    Number of paths used in random walk.

  :rtype: DistArray

  '''
  swaptions = []
  i = 0
  for ts_a, te, lamb in zip(ts_all, te_all, lamb_all):
    for ts in ts_a:
      #start = time()
      print i
      time_structure = arange(None, 0, ts + DELTA, DELTA)
      maturity_structure = arange(None, 0, te, DELTA)

      ############# MODEL ###############
      # Variance reduction technique - Antithetic Variates.
      eps_tmp = randn(time_structure.shape[0] - 1, num_paths)
      eps = concatenate(eps_tmp, -eps_tmp, 1)

      # Forward LIBOR rates for the construction of the spot measure.
      f_kk = zeros((time_structure.shape[0], 2*num_paths))
      f_kk = assign(f_kk, np.s_[0, :], F_0)

      # Plane kxN of simulated LIBOR rates.
      f_kn = ones((maturity_structure.shape[0], 2*num_paths))*F_0

      # Simulations of the plane f_kn for each time step.
      for t in xrange(1, time_structure.shape[0]):
        f_kn_new = f_kn[1:, :]*exp(lamb*mu(f_kn, lamb)*DELTA-0.5*lamb*lamb *
            DELTA + lamb*eps[t - 1, :]*sqrt(DELTA))
        f_kk = assign(f_kk, np.s_[t, :], f_kn_new[0])
        f_kn = f_kn_new

      ############## PRODUCT ###############
      # Value of zero coupon bonds.
      zcb = ones((int((te-ts)/DELTA)+1, 2*num_paths))
      f_kn_modified = 1 + DELTA*f_kn
      for j in xrange(zcb.shape[0] - 1):
        zcb = assign(zcb, np.s_[j + 1], zcb[j] / f_kn_modified[j])

      # Swaption price at maturity.
      last_row = zcb[zcb.shape[0] - 1, :].reshape((20, ))
      swap_ts = maximum(1 - last_row - THETA*DELTA*expr.sum(zcb[1:], 0), 0)

      # Spot measure used for discounting.
      b_ts = ones((2*num_paths, ))
      tmp = 1 + DELTA * f_kk
      for j in xrange(int(ts/DELTA)):
        b_ts *= tmp[j].reshape((20, ))

      # Swaption price at time 0.
      swaption = swap_ts/b_ts

      # Save expected value in bps and std.
      me = mean((swaption[0:num_paths] + swaption[num_paths:])/2) * 10000
      st = std((swaption[0:num_paths] + swaption[num_paths:])/2)/sqrt(num_paths)*10000

      swaptions.append([me.optimized().force(), st.optimized().force()])
      #print time() - start
      i += 1
  return swaptions