Beispiel #1
0
def test():
    model = SimpleNetChild()
    dataset = [((numpy.ones((2, 5, 5)) * i).astype(numpy.float32),
                numpy.int32(0)) for i in range(100)]

    batch_size = 5
    devices = tuple([chainer.get_device(d) for d in sys.argv[1].split(',')])
    iters = [chainer.iterators.SerialIterator(i, batch_size) for i in
             chainer.datasets.split_dataset_n_random(
                 dataset, len(devices))]
    optimizer = chainer.optimizers.SGD(lr=1.0)
    optimizer.setup(model)

    # Initialize CUDA context.
    cuda.cupy.cuda.runtime.runtimeGetVersion()

    try:
        mpu.MultiprocessParallelUpdater(iters, optimizer, devices=devices)
    except RuntimeError as e:
        if sys.argv[2] == 'fork':
            assert 'CUDA context' in str(e)
            return

    updater = mpu.MultiprocessParallelUpdater(
        iters, optimizer, devices=devices)
    trainer = chainer.training.Trainer(updater, (1, 'epoch'), '/tmp')
    trainer.run()
    assert sys.argv[2] != 'fork'
Beispiel #2
0
def test():
    model = chainer.Link()
    dataset = [((numpy.ones(
        (2, 5, 5)) * i).astype(numpy.float32), numpy.int32(0))
               for i in range(100)]

    batch_size = 5
    devices = (0, )
    iters = [
        chainer.iterators.SerialIterator(i, batch_size)
        for i in chainer.datasets.split_dataset_n_random(
            dataset, len(devices))
    ]
    optimizer = chainer.optimizers.SGD(lr=1.0)
    optimizer.setup(model)

    # Initialize CUDA context.
    cuda.cupy.cuda.runtime.runtimeGetVersion()

    try:
        mpu.MultiprocessParallelUpdater(iters, optimizer, devices=devices)
    except RuntimeError as e:
        assert 'CUDA context' in str(e)
        return

    assert False
    def test_update_uses_raw_array(self):
        if mpu.MultiprocessParallelUpdater.available():
            model = SimpleNetRawArray(self)
            dataset = [((numpy.ones((2, 5, 5)) * i).astype(numpy.float32),
                        numpy.int32(0)) for i in range(100)]

            batch_size = 5
            devices = (1,)
            iters = [chainer.iterators.SerialIterator(i, batch_size) for i in
                     chainer.datasets.split_dataset_n_random(
                         dataset, len(devices))]
            optimizer = chainer.optimizers.SGD(lr=1.0)
            optimizer.setup(model)
            updater = mpu.MultiprocessParallelUpdater(
                iters, optimizer, devices=devices)
            updater.update()

            self.assertEqual(model.call_called, 1)
Beispiel #4
0
def test():
    model = SimpleNetRawArray()
    dataset = [((numpy.ones(
        (2, 5, 5)) * i).astype(numpy.float32), numpy.int32(0))
               for i in range(100)]

    batch_size = 5
    devices = (0, )
    iters = [
        chainer.iterators.SerialIterator(i, batch_size)
        for i in chainer.datasets.split_dataset_n_random(
            dataset, len(devices))
    ]
    optimizer = chainer.optimizers.SGD(lr=1.0)
    optimizer.setup(model)

    with testing.assert_warns(UserWarning):
        updater = mpu.MultiprocessParallelUpdater(iters,
                                                  optimizer,
                                                  devices=devices)
    updater.update()

    assert model.call_called == 1
Beispiel #5
0
        self.loss = chainer.functions.softmax_cross_entropy(y, t)
        chainer.reporter.report({'loss': self.loss}, self)

        return self.loss


if __name__ == '__main__':
    model = SimpleNetChildReporter()
    dataset = [(numpy.full((2, 5, 5), i, numpy.float32), numpy.int32(0))
               for i in range(100)]

    batch_size = 5
    devices = tuple([int(x) for x in sys.argv[1].split(',')])
    iters = [
        chainer.iterators.SerialIterator(i, batch_size)
        for i in chainer.datasets.split_dataset_n_random(
            dataset, len(devices))
    ]
    optimizer = chainer.optimizers.SGD(lr=1.0)
    optimizer.setup(model)
    updater = mpu.MultiprocessParallelUpdater(iters,
                                              optimizer,
                                              devices=devices)
    trainer = trainer.Trainer(updater, (1, 'iteration'), '/tmp')
    trainer.run()
    assert model.call_called == 1

# This snippet is not a test code.
# testing.run_module(__name__, __file__)