Пример #1
0
 def test_gpu(self):
     timer_ = timer.get_timer(cuda.cupy)
     self.assertIs(timer_.xp, cuda.cupy)
Пример #2
0
update_time = 0.0
print('iteration\tforward\tbackward\tupdate (in seconds)')
for iteration in six.moves.range(start_iteration, args.iteration):
    if args.gpu >= 0:
        cache.clear_cache(args.cache_level)

    # data generation
    data = numpy.random.uniform(-1, 1,
                                (args.batchsize, in_channels, in_flame,
                                 in_height, in_width)).astype(numpy.float32)
    data = chainer.Variable(xp.asarray(data))
    label = numpy.zeros((args.batchsize, ), dtype=numpy.int32)
    label = chainer.Variable(xp.asarray(label))

    # forward
    with timer.get_timer(xp) as t:
        loss = model(data, label)
    forward_time_one = t.total_time()

    # backward
    with timer.get_timer(xp) as t:
        loss.backward()
    backward_time_one = t.total_time()

    # parameter update
    with timer.get_timer(xp) as t:
        optimizer.update()
    update_time_one = t.total_time()

    if iteration < 0:
        print('Burn-in\t{}\t{}\t{}'.format(forward_time_one, backward_time_one,
Пример #3
0
 def test_cpu(self):
     timer_ = timer.get_timer(numpy)
     self.assertIs(timer_.xp, numpy)