def test_basic(self) -> None: with capture_logs() as logs: x = LoggingTensor(torch.tensor([3.0], requires_grad=True)) log_input("x", x) y = x * x saved_x = y.grad_fn._saved_self grad_y = LoggingTensor(torch.tensor([1.0])) log_input("grad_y", grad_y) g, = torch.autograd.grad((y,), (x,), (grad_y,)) self.assertEqual(g.elem, torch.tensor([6.0])) with torch.no_grad(): self.assertEqual(saved_x, x) self.assertEqual(saved_x._version, x._version) x.add_(2) self.assertEqual(saved_x, x) # TODO: figure out why broken # self.assertEqual(saved_x._version, x._version) self.assertExpectedInline('\n'.join(logs), '''\ $0 = input('x') $1 = torch._ops.aten.mul($0, $0) $2 = input('grad_y') $3 = torch._ops.aten.mul($2, $0) $4 = torch._ops.aten.mul($2, $0) $5 = torch._ops.aten.add($4, $3)''')
def test_torch_ops(self): r = make_tensor((2,), device='cpu', dtype=torch.float) self.assertEqual(torch.ops.prims.sin(r), torch.sin(r)) r = LoggingTensor(r) with capture_logs() as logs: log_input("input", r) prims.sin(r) self.assertExpectedInline('\n'.join(logs), """\ $0 = input('input') $1 = torch._ops.prims.sin.default($0)""")
def get_logs(self, func, inpt): input_clone_logging = LoggingTensor(inpt.clone()) input_functional_logging = torch._to_functional_tensor(input_clone_logging) with capture_logs() as logs: log_input("input", input_clone_logging) torch._enable_functionalization() try: func(input_functional_logging) finally: torch._disable_functionalization() return logs
def test_detach_appears_twice_when_called_once(self) -> None: with capture_logs() as logs: x = LoggingTensor(torch.tensor([3.0]), requires_grad=True) log_input("x", x) x.detach() # FIXME: We actually want this to emit a single detach. However, # it currently emits two, for reasons unclear to us. Leaving # this test here to make sure we don't regress even further (it # would be bad if calling .detach() once emits 3+ detaches). self.assertExpectedInline('\n'.join(logs), '''\ $0 = input('x') $1 = torch._ops.aten.detach.default($0) $2 = torch._ops.aten.detach.default($1)''')
def test_nesting_with_same_enable_torch_dispatch_mode(self) -> None: # "nested" enable_torch_dispatch_modes are allowed if they're the same mode. It's the equivalent of # a noop, so it will only write once to the log with capture_logs() as logs: x = LoggingTensor(torch.tensor([3.])) log_input("x", x) with enable_torch_dispatch_mode(LoggingTensor): with enable_torch_dispatch_mode(LoggingTensor): x + x self.assertExpectedInline( '\n'.join(logs), '''\ $0 = input('x') $1 = torch._ops.aten.add.Tensor($0, $0)''')
def test_out(self) -> None: with capture_logs() as logs: x = LoggingTensor(torch.ones(1)) y = LoggingTensor(torch.zeros(1)) log_input("x", x) log_input("y", y) torch.abs(x, out=y) self.assertEqual(y.elem, torch.ones(1)) # TODO: arguably this shouldn't pass and we should complain # that out isn't a kwarg self.assertExpectedInline('\n'.join(logs), '''\ $0 = input('x') $1 = input('y') $2 = torch._ops.aten.abs($0, out=$1)''')
def test_kwarg_only_and_positional_default(self) -> None: with capture_logs() as logs: x = LoggingTensor(torch.ones(1)) y = LoggingTensor(torch.ones(1)) log_input("x", x) log_input("y", y) torch.ops.aten.kl_div(x, y) torch.ops.aten.kl_div(x, y, 2) torch.ops.aten.kl_div(x, y, log_target=True) torch.ops.aten.kl_div(x, y, 2, log_target=True) # What we are testing here is that we omit reduction # if it is defaulted, even if a kwarg is set self.assertExpectedInline('\n'.join(logs), '''\ $0 = input('x') $1 = input('y') $2 = torch._ops.aten.kl_div($0, $1) $3 = torch._ops.aten.kl_div($0, $1, 2) $4 = torch._ops.aten.kl_div($0, $1, log_target=True) $5 = torch._ops.aten.kl_div($0, $1, 2, log_target=True)''')
def test_kwarg_only(self) -> None: with capture_logs() as logs: x = LoggingTensor(torch.ones(1)) y = LoggingTensor(torch.ones(1, 1)) z = LoggingTensor(torch.ones(1)) log_input("x", x) log_input("y", y) log_input("z", z) torch.addmv(x, y, z) torch.addmv(x, y, z, beta=1) torch.addmv(x, y, z, beta=2) torch.addmv(x, y, z, alpha=2) torch.addmv(x, y, z, beta=2, alpha=2) # The expectation is that beta/alpha don't show up when they're # defaulted. This is even if the user explicitly specified it. self.assertExpectedInline('\n'.join(logs), '''\ $0 = input('x') $1 = input('y') $2 = input('z') $3 = torch._ops.aten.addmv($0, $1, $2) $4 = torch._ops.aten.addmv($0, $1, $2) $5 = torch._ops.aten.addmv($0, $1, $2, beta=2) $6 = torch._ops.aten.addmv($0, $1, $2, alpha=2) $7 = torch._ops.aten.addmv($0, $1, $2, beta=2, alpha=2)''')
def test_custom_autograd(self) -> None: escape = [None] class Square(torch.autograd.Function): @staticmethod def forward(ctx, x): y = x**2 ctx.save_for_backward(x) return y @staticmethod def backward(ctx, grad_output): assert isinstance(grad_output, LoggingTensor) x, = ctx.saved_tensors assert isinstance(x, LoggingTensor) escape[0] = x return grad_output * 2 * x with capture_logs() as logs: x = LoggingTensor(torch.ones(1), requires_grad=True) log_input("x", x) x.grad = LoggingTensor(torch.zeros(1)) log_input("x.grad", x.grad) y = Square.apply(x) grad_output = LoggingTensor(torch.ones(1)) log_input("grad_output", grad_output) y.backward(grad_output) with torch.no_grad(): self.assertEqual(escape[0], x) self.assertEqual(escape[0]._version, x._version) # TODO: figure out why x.requires_grad = False doesn't # trigger an error for LoggingTensor x.add_(2) self.assertEqual(escape[0], x) # TODO: figure out why this is broken # self.assertEqual(escape[0]._version, x._version) self.assertExpectedInline( '\n'.join(logs), '''\ $0 = input('x') $1 = input('x.grad') $2 = torch._ops.aten.pow.Tensor_Scalar($0, 2) $3 = input('grad_output') $4 = torch._ops.aten.mul.Tensor($3, 2) $5 = torch._ops.aten.mul.Tensor($4, $0) $6 = torch._ops.aten.add_.Tensor($1, $5)''')