Ejemplo n.º 1
0
 def test_single_gpu(self):
     device = torch.device("cuda:0")
     net = torch.nn.Conv2d(1, 1, 3, padding=1).to(device)
     opt = torch.optim.Adam(net.parameters(), 1e-3)
     trainer = create_multigpu_supervised_trainer(net, opt, fake_loss,
                                                  [device])
     trainer.run(fake_data_stream(), 2, 2)
Ejemplo n.º 2
0
    def test_distributed(self):
        device = torch.device(f"cuda:{dist.get_rank()}")
        net = torch.nn.Conv2d(1, 1, 3, padding=1).to(device)
        opt = torch.optim.Adam(net.parameters(), 1e-3)

        trainer = create_multigpu_supervised_trainer(net, opt, fake_loss, [device], distributed=True)
        trainer.run(fake_data_stream(), 2, 2)
        # assert the trainer output is loss value
        self.assertTrue(isinstance(trainer.state.output, float))
Ejemplo n.º 3
0
    def test_multi_gpu(self):
        net = torch.nn.Conv2d(1, 1, 3, padding=1)
        opt = torch.optim.Adam(net.parameters(), 1e-3)

        with warnings.catch_warnings():
            warnings.simplefilter(
                "ignore")  # ignore warnings about imbalanced GPU memory

            trainer = create_multigpu_supervised_trainer(
                net, opt, fake_loss, None)

        trainer.run(fake_data_stream(), 2, 2)
Ejemplo n.º 4
0
 def test_cpu(self):
     net = torch.nn.Conv2d(1, 1, 3, padding=1)
     opt = torch.optim.Adam(net.parameters(), 1e-3)
     trainer = create_multigpu_supervised_trainer(net, opt, fake_loss, [])
     trainer.run(fake_data_stream(), 2, 2)