def setUp(self): self.optimizer = chainer.GradientMethod() self.target = chainer.ChainList( SimpleLink(np.arange(3).astype(np.float32), np.arange(3).astype(np.float32)), SimpleLink(np.arange(3).astype(np.float32), np.arange(3).astype(np.float32))) self.optimizer.create_update_rule = mock.MagicMock
def setup_cpu(self): self.comm = chainermn.create_communicator('naive') self.target = DynamicExampleModel() self.target.a.W.data[:] = self.comm.rank self.target.b.W.data[:] = self.comm.rank + 1 self.target.a.W.grad[:] = 0 self.target.b.W.grad[:] = 0 self.actual_optimizer = chainer.GradientMethod() self.actual_optimizer.create_update_rule = mock.MagicMock
def setUp(self): opt = chainer.GradientMethod() opt.init_state_cpu = mock.MagicMock() opt.init_state_gpu = mock.MagicMock() opt.update_one_cpu = mock.MagicMock() opt.update_one_gpu = mock.MagicMock() self.optimizer = opt self.target = SimpleLink( np.arange(3).astype(np.float32), np.arange(3).astype(np.float32))
def setup_gpu(self, device=None): self.comm = chainermn.create_communicator('hierarchical') device = self.comm.intra_rank chainer.cuda.get_device(device).use() self.target = DynamicExampleModel() self.target.to_gpu() self.target.a.W.data[:] = self.comm.rank self.target.b.W.data[:] = self.comm.rank + 1 self.target.a.W.grad[:] = 0 self.target.b.W.grad[:] = 0 self.actual_optimizer = chainer.GradientMethod() self.actual_optimizer.create_update_rule = mock.MagicMock
def setup_gpu(self, device=None): if nccl.get_version() < 2000: pytest.skip('This test requires NCCL version >= 2.0') self.comm = chainermn.create_communicator('pure_nccl') device = self.comm.intra_rank chainer.cuda.get_device_from_id(device).use() self.target = DynamicExampleModel() self.target.to_gpu() self.target.a.W.data[:] = self.comm.rank self.target.b.W.data[:] = self.comm.rank + 1 self.target.a.W.grad[:] = 0 self.target.b.W.grad[:] = 0 self.actual_optimizer = chainer.GradientMethod() self.actual_optimizer.create_update_rule = mock.MagicMock
def setup_gpu(self, device=None): self.comm = chainermn.create_communicator('flat') device = self.comm.intra_rank chainer.cuda.get_device_from_id(device).use() self.target = ExampleModel() self.target.to_device(cupy.cuda.Device()) self.target.a.W.data[:] = self.comm.rank self.target.b.W.data[:] = self.comm.rank + 1 self.target.c.W.data[:] = self.comm.rank + 2 self.target.a.W.grad[:] = 0 self.target.b.W.grad[:] = 0 self.target.c.W.grad[:] = 0 self.actual_optimizer = chainer.GradientMethod() self.actual_optimizer.create_update_rule = mock.MagicMock
def setup_gpu(self, use_chx=False): self.comm = chainermn.create_communicator('flat') self.target = DynamicExampleModel() self.device = chainermn.testing.get_device(self.comm.intra_rank, use_chx) chainer.cuda.get_device_from_id(self.comm.intra_rank).use() self.target.to_device(self.device) self.target.a.W.data[:] = self.comm.rank self.target.b.W.data[:] = self.comm.rank + 1 self.target.a.W.grad[:] = 0 self.target.b.W.grad[:] = 0 self.actual_optimizer = chainer.GradientMethod() self.actual_optimizer.create_update_rule = mock.MagicMock
def setup(self, batched_copy): if nccl.get_build_version() < 2000: pytest.skip('This test requires NCCL version >= 2.0') self.comm = chainermn.create_communicator('pure_nccl', batched_copy=batched_copy) device = self.comm.intra_rank chainer.cuda.get_device_from_id(device).use() self.target = ExampleModel() self.target.to_device(cupy.cuda.Device()) self.target.a.W.data[:] = self.comm.rank self.target.b.W.data[:] = self.comm.rank + 1 self.target.c.W.data[:] = self.comm.rank + 2 self.target.a.W.grad[:] = 0 self.target.b.W.grad[:] = 0 self.target.c.W.grad[:] = 0 self.actual_optimizer = chainer.GradientMethod() self.actual_optimizer.create_update_rule = mock.MagicMock