def main():
    args = parse_arguments()
    use_ideep = False
    if is_ideep_available():
        if args.ideep:
            use_ideep = True
        else:
            print('>> you can use ideep to accelerate inference speed')
            print('>> with optional argument --ideep')
    predict(args.arch, args.trained, use_ideep)
Example #2
0
 def __init__(self):
     self.chainer_version = chainer.__version__
     self.numpy_version = numpy.__version__
     self.platform_version = platform.platform()
     if cuda.available:
         self.cuda_info = cuda.cupyx.get_runtime_info()
     else:
         self.cuda_info = None
     if intel64.is_ideep_available():
         self.ideep_version = intel64.ideep.__version__
     else:
         self.ideep_version = None
    def accuracy(self, backend_config):
        model = self.model
        optimizer = self.optimizer
        optimizer.setup(model)

        if backend_config.use_ideep == 'always':
            if not intel64.is_ideep_available():
                # TODO(niboshi): This is temporary workaround.
                # See the comment on Skipped.
                raise Skipped('ideep is required to run this test.')

        model.to_device(backend_config.device)

        with chainer.using_device(backend_config.device):
            return self._train_linear_classifier(
                model, optimizer, backend_config)
    def accuracy(self, backend_config, gpu_device=None):
        model = self.model
        optimizer = self.optimizer
        optimizer.setup(model)

        if backend_config.use_cuda:
            model.to_gpu(device=gpu_device)
        elif backend_config.use_ideep == 'always':
            if not intel64.is_ideep_available():
                # TODO(niboshi): This is temporary workaround.
                # See the comment on Skipped.
                raise Skipped('ideep is required to run this test.')
            model.to_intel64()

        with backend_config:
            return self._train_linear_classifier(
                model, optimizer, backend_config.use_cuda)
    def accuracy(self, backend_config, loss_scaling=False):
        model = self.model
        optimizer = self.optimizer
        optimizer.setup(model)
        _optimizer_loss_scaling(optimizer, loss_scaling)

        if backend_config.use_ideep == 'always':
            if not intel64.is_ideep_available():
                # TODO(niboshi): This is temporary workaround.
                # See the comment on Skipped.
                raise Skipped('ideep is required to run this test.')

        model.to_device(backend_config.device)

        with chainer.using_device(backend_config.device):
            return self._train_linear_classifier(
                model, optimizer, backend_config)
Example #6
0
    def accuracy(self, backend_config):
        # TODO(niboshi): Support it
        if backend_config.use_chainerx and self.dtype == numpy.float16:
            raise unittest.SkipTest('ChainerX does not support float16')

        model = self.model
        optimizer = self.optimizer
        optimizer.setup(model)

        if backend_config.use_ideep == 'always':
            if not intel64.is_ideep_available():
                # TODO(niboshi): This is temporary workaround.
                # See the comment on Skipped.
                raise Skipped('ideep is required to run this test.')

        model.to_device(backend_config.device)

        with chainer.using_device(backend_config.device):
            return self._train_linear_classifier(model, optimizer,
                                                 backend_config)
    def accuracy(self, backend_config):
        # TODO(niboshi): Support it
        if backend_config.use_chainerx and self.dtype == numpy.float16:
            raise unittest.SkipTest('ChainerX does not support float16')

        model = self.model
        optimizer = self.optimizer
        optimizer.setup(model)

        if backend_config.use_ideep == 'always':
            if not intel64.is_ideep_available():
                # TODO(niboshi): This is temporary workaround.
                # See the comment on Skipped.
                raise Skipped('ideep is required to run this test.')

        model.to_device(backend_config.device)

        with chainer.using_device(backend_config.device):
            return self._train_linear_classifier(
                model, optimizer, backend_config)
Example #8
0
def main():
    enable_ideep = is_ideep_available()
    model = VGG()
    mode = "never"
    if enable_ideep:
        model.to_intel64()
        mode = "always"

    nb_itr = 20
    timings = []
    for i in tqdm.tqdm(range(nb_itr)):
        data = np.random.randn(1, 3, 224, 224).astype(np.float32)
        start_time = time.time()
        with chainer.using_config('train', False):
            with chainer.using_config('enable_backprop', False):
                with chainer.using_config('use_ideep', mode):
                    ret = F.softmax(model(chainer.Variable(data)))
        print(ret.data.ravel()[0])
        timings.append(time.time() - start_time)
    print(
        '%10s : %f (sd %f)' %
        ('chainer-vgg-16', np.array(timings).mean(), np.array(timings).std()))
        x = self.conv_ds_4(x)
        x = self.conv_ds_5(x)
        x = self.conv_ds_6(x)
        x = self.conv_ds_7(x)
        x = self.conv_ds_8(x)
        x = self.conv_ds_9(x)
        x = self.conv_ds_10(x)
        x = self.conv_ds_11(x)
        x = self.conv_ds_12(x)
        x = self.conv_ds_13(x)
        x = self.conv_ds_14(x)
        x = F.average_pooling_2d(x, 7, stride=1)
        return F.softmax(x)


enable_ideep = is_ideep_available()
model = MobileNet()

mode = "never"
if enable_ideep:
    model.to_intel64()
    mode = "always"

nb_itr = 20
timings = []
for i in tqdm.tqdm(range(nb_itr)):
    data = np.random.randn(1, 3, 224, 224).astype(np.float32)
    start_time = time.time()
    with chainer.using_config('train', False):
        with chainer.using_config('enable_backprop', False):
            with chainer.using_config('use_ideep', mode):