Exemplo n.º 1
0
    def test_basic(self):
        def model(x, y):
            return (x + y) * y

        with torchdynamo.optimize(aot_autograd_cudagraphs):
            for i in range(N_ITERS):
                x = torch.randn(3, device="cuda", requires_grad=True)
                y = torch.randn(3, device="cuda")
                loss = model(x, y).sum()
                loss.backward()
Exemplo n.º 2
0
    def test_htod(self):
        def model(x, y):
            a = x + y
            return a * 3

        with torchdynamo.optimize(aot_autograd_cudagraphs):
            for i in range(N_ITERS):
                x = torch.randn(3, device="cuda", requires_grad=True)
                y = torch.randn((), device="cpu")
                loss = model(x, y).sum()
                loss.backward()
Exemplo n.º 3
0
    def test_factory(self):
        def model(y):
            x = torch.zeros(3, device="cuda:0")
            x.add_(3)
            return x * y

        with torchdynamo.optimize(aot_autograd_cudagraphs):
            for i in range(N_ITERS):
                with self.subTest(i):
                    y = torch.randn(3, device="cuda:0", requires_grad=True)
                    loss = model(y).sum()
                    loss.backward()
Exemplo n.º 4
0
    def test_mutate_constant(self):
        def model(x, y):
            c = torch.tensor(1)
            c.add_(2)
            return x * y * 0 + c

        with torchdynamo.optimize(aot_autograd_cudagraphs):
            for i in range(N_ITERS):
                with self.subTest(i):
                    x = torch.randn(1, device="cuda", requires_grad=True)
                    y = torch.randn(1, device="cuda")
                    loss = model(x, y).sum()
                    self.assertEqual(loss, torch.tensor(3.0, device="cuda"))
                    loss.backward()
Exemplo n.º 5
0
    def test_mutate_input(self):
        def model(x, y):
            y.add_(3)
            return x * y

        with torchdynamo.optimize(aot_autograd_cudagraphs):
            for i in range(N_ITERS):
                with self.subTest(i):
                    x = torch.randn(3, device="cuda", requires_grad=True)
                    y = torch.randn(3, device="cuda")
                    y_orig = y.clone()
                    loss = model(x, y).sum()
                    self.assertEqual(y, y_orig + 3)
                    loss.backward()
Exemplo n.º 6
0
    def test_dead_fill(self):
        def model(x):
            x = x.clone()
            y = x[0:0]
            x.fill_(2)
            y.fill_(3)
            return x, y

        with torchdynamo.optimize(aot_autograd_cudagraphs):
            for i in range(N_ITERS):
                with self.subTest(i):
                    x = torch.empty(20, device="cuda:0")
                    rx, ry = model(x)
                    self.assertEqual(rx,
                                     torch.full((20, ), 2.0, device="cuda:0"))
                    self.assertEqual(ry, torch.empty(0, device="cuda:0"))
Exemplo n.º 7
0
    def test_mutated_metadata(self):
        # more tortured example at
        # https://github.com/pytorch/pytorch/issues/81385
        def model(x):
            x = x.clone()
            x.resize_(20)
            x.fill_(2)
            return x

        with torchdynamo.optimize(aot_autograd_cudagraphs):
            for i in range(N_ITERS):
                with self.subTest(i):
                    x = torch.empty(0, device="cuda:0")
                    rx = model(x)
                    self.assertEqual(rx,
                                     torch.full((20, ), 2.0, device="cuda:0"))