Ejemplo n.º 1
0
    def _check_helper(self, device, dtype, op, variant, check):
        if variant is None:
            self.skipTest("Skipped! Variant not implemented.")
        if not op.supports_dtype(dtype, torch.device(device).type):
            self.skipTest(
                f"Skipped! {op.name} does not support dtype {str(dtype)}")

        samples = op.sample_inputs(device, dtype, requires_grad=True)
        for sample in samples:
            partial_fn = partial(variant, **sample.kwargs)
            if check == 'gradcheck':
                self.assertTrue(
                    gradcheck(partial_fn, (sample.input, ) + sample.args,
                              check_grad_dtypes=True))
            elif check == 'gradgradcheck':
                self.assertTrue(
                    gradgradcheck(partial_fn, (sample.input, ) + sample.args,
                                  gen_non_contig_grad_outputs=False,
                                  check_grad_dtypes=True))
                self.assertTrue(
                    gradgradcheck(partial_fn, (sample.input, ) + sample.args,
                                  gen_non_contig_grad_outputs=True,
                                  check_grad_dtypes=True))
            else:
                self.assertTrue(False, msg="Unknown check requested!")
Ejemplo n.º 2
0
    def _check_helper(self, device, dtype, op, variant, check):
        if variant is None:
            self.skipTest("Skipped! Variant not implemented.")
        if not op.supports_dtype(dtype, torch.device(device).type):
            self.skipTest(f"Skipped! {op.name} does not support dtype {str(dtype)}")

        samples = op.sample_inputs(device, dtype, requires_grad=True)
        for sample in samples:
            if sample.output_process_fn_grad is not None:
                out_fn = sample.output_process_fn_grad

                def variant_out_fn(*args, **kwargs):
                    return out_fn(variant(*args, **kwargs))
            else:
                variant_out_fn = variant

            def fn(*inputs):
                output = variant_out_fn(*inputs, **sample.kwargs)
                return op.output_func(output)

            if check == 'gradcheck':
                self.assertTrue(gradcheck(fn, (*sample.input,) + sample.args,
                                          check_batched_grad=op.check_batched_grad,
                                          check_grad_dtypes=True))
            elif check == 'gradgradcheck':
                self.assertTrue(gradgradcheck(fn, (*sample.input,) + sample.args,
                                              gen_non_contig_grad_outputs=False,
                                              check_batched_grad=op.check_batched_gradgrad,
                                              check_grad_dtypes=True))
                self.assertTrue(gradgradcheck(fn, (*sample.input,) + sample.args,
                                              gen_non_contig_grad_outputs=True,
                                              check_batched_grad=op.check_batched_gradgrad,
                                              check_grad_dtypes=True))
            else:
                self.assertTrue(False, msg="Unknown check requested!")
Ejemplo n.º 3
0
    def _check_helper(self, device, dtype, op, variant, check):
        if variant is None:
            self.skipTest("Skipped! Variant not implemented.")
        if not op.supports_dtype(dtype, torch.device(device).type):
            self.skipTest(
                f"Skipped! {op.name} does not support dtype {str(dtype)}")

        def is_inplace(variant):
            if hasattr(variant, "__wrapped__"):
                return variant.__wrapped__ is op.get_inplace()
            return variant is op.get_inplace()

        samples = op.sample_inputs(device, dtype, requires_grad=True)
        for sample in samples:
            if sample.broadcasts_input and is_inplace(variant):
                continue

            # Note on TensorList inputs
            #
            # gradcheck does not support TensorList inputs so here we pass TensorList
            # inputs of size n as n single Tensor inputs to gradcheck and wrap the op
            # in a function that puts the n Tensor inputs back into a TensorList
            def fn(*inputs):
                # Put tensors back into TensorList since we splat them when passing to gradcheck
                if is_iterable_of_tensors(sample.input):
                    n = len(sample.input)
                    inputs = (inputs[:n], *inputs[n:])
                output = op.gradcheck_wrapper(variant, *inputs,
                                              **sample.kwargs)
                if sample.output_process_fn_grad is not None:
                    return sample.output_process_fn_grad(output)
                return output

            # Splat TensorList inputs into single Tensor inputs
            gradcheck_args = (sample.input, ) if isinstance(
                sample.input, torch.Tensor) else tuple(sample.input)
            gradcheck_args += sample.args

            if check == 'gradcheck':
                self.assertTrue(
                    gradcheck(fn,
                              gradcheck_args,
                              check_batched_grad=op.check_batched_grad,
                              check_grad_dtypes=True))
            elif check == 'gradgradcheck':
                self.assertTrue(
                    gradgradcheck(fn,
                                  gradcheck_args,
                                  gen_non_contig_grad_outputs=False,
                                  check_batched_grad=op.check_batched_gradgrad,
                                  check_grad_dtypes=True))
                self.assertTrue(
                    gradgradcheck(fn,
                                  gradcheck_args,
                                  gen_non_contig_grad_outputs=True,
                                  check_batched_grad=op.check_batched_gradgrad,
                                  check_grad_dtypes=True))
            else:
                self.assertTrue(False, msg="Unknown check requested!")
Ejemplo n.º 4
0
def test_grad_grad_viterbi(operator):
    states, emissions, theta = make_data(10)

    theta = torch.from_numpy(theta)
    theta = theta[:, None, :, :]
    theta.requires_grad_()

    viterbi = Viterbi(operator)
    gradgradcheck(viterbi, (theta, ))
Ejemplo n.º 5
0
    def _check_helper(self, device, dtype, op, variant, check):
        if variant is None:
            self.skipTest("Skipped! Variant not implemented.")
        if not op.supports_dtype(dtype, torch.device(device).type):
            self.skipTest(
                f"Skipped! {op.name} does not support dtype {str(dtype)}")

        samples = op.sample_inputs(device, dtype, requires_grad=True)
        for sample in samples:
            if sample.output_process_fn_grad is not None:
                out_fn = sample.output_process_fn_grad

                def variant_out_fn(*args, **kwargs):
                    return out_fn(variant(*args, **kwargs))
            else:
                variant_out_fn = variant

            def fn(*inputs):
                # Pack input back into TensorList since we splat it when passing to gradcheck
                if is_iterable_of_tensors(sample.input):
                    n = len(sample.input)
                    inputs = (inputs[:n], *inputs[n:])
                output = variant_out_fn(*inputs, **sample.kwargs)
                return op.output_func(output)

            # Gradcheck does not support TensorList so we splat it with the remaining args
            gradcheck_args = (sample.input, ) if isinstance(
                sample.input, torch.Tensor) else tuple(sample.input)
            gradcheck_args += sample.args

            if check == 'gradcheck':
                self.assertTrue(
                    gradcheck(fn,
                              gradcheck_args,
                              check_batched_grad=op.check_batched_grad,
                              check_grad_dtypes=True))
            elif check == 'gradgradcheck':
                self.assertTrue(
                    gradgradcheck(fn,
                                  gradcheck_args,
                                  gen_non_contig_grad_outputs=False,
                                  check_batched_grad=op.check_batched_gradgrad,
                                  check_grad_dtypes=True))
                self.assertTrue(
                    gradgradcheck(fn,
                                  gradcheck_args,
                                  gen_non_contig_grad_outputs=True,
                                  check_batched_grad=op.check_batched_gradgrad,
                                  check_grad_dtypes=True))
            else:
                self.assertTrue(False, msg="Unknown check requested!")
Ejemplo n.º 6
0
def test_grad_hessian_viterbi_two_samples(operator):
    states1, emissions1, theta1 = make_data(10)
    states2, emissions2, theta2 = make_data(5)
    lengths = torch.LongTensor([10, 5])

    theta1 = torch.from_numpy(theta1)
    theta2 = torch.from_numpy(theta2)

    theta1.requires_grad_()
    theta2.requires_grad_()

    viterbi = Viterbi(operator)

    def func(theta1_, theta2_):
        W = pad_sequence([theta1_, theta2_])
        return viterbi(W, lengths)

    gradcheck(func, (theta1, theta2))
    gradgradcheck(func, (theta1, theta2))
Ejemplo n.º 7
0
    def test_autograd_to_mkldnn(self):
        # MKLDNN only supports float32
        root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)

        def func(root):
            return root.to_mkldnn().to_dense()

        # because MKLDNN only supports float32, we need to lessen the precision.
        # these numbers are just empirical results that seem to work.
        self.assertWarnsRegex(lambda: gradcheck(func, [root], atol=4e-2, rtol=1e-2),
                              'double precision floating point')
        self.assertWarnsRegex(lambda: gradgradcheck(func, [root], atol=4e-2, rtol=1e-2),
                              'double precision floating point')
    def run_conv_double_back_test(self, kern, stride, padding, chan_in, chan_out, batch_size,
                                  inp_size, dilation, no_weight, groups=1, use_cuda=False,
                                  use_bias=True, dtype=torch.double):
        device = torch.device("dpcpp:0")
        x = torch.randn(batch_size, chan_in, inp_size, inp_size, device=device,
                        dtype=dtype, requires_grad=True)
        weight = torch.randn(chan_out, chan_in // groups, kern, kern, device=device,
                             dtype=dtype, requires_grad=not no_weight)
        if use_bias:
            bias = torch.randn(chan_out, device=device, dtype=dtype, requires_grad=True)
        else:
            bias = None

        def func(*inputs):
            if use_bias:
                lx, lweight, lbias = inputs
            else:
                lx, lweight = inputs
                lbias = None
            # We disable cudnn during forward to avoid finite difference imprecision issues
            with cudnn.flags(enabled=False):
                out = F.conv2d(lx, lweight, lbias, stride, padding, dilation, groups)
            return out

        if use_bias:
            inputs = x, weight, bias
        else:
            inputs = x, weight

        dummy_out = func(*inputs)
        grad_y = torch.randn_like(dummy_out, device=device, dtype=dtype, requires_grad=True)

        # Issue #15353: test mkldnn double backward, don't run gradgradcheck due
        # to imprecision issues
        if dtype == torch.float:
            g, = torch.autograd.grad(dummy_out.sum(), x, create_graph=True)
            return g.requires_grad

        return gradgradcheck(func, inputs, (grad_y,))
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs):
    # call assert function rather than returning a bool since it's nicer
    # if we get whether this failed on the gradcheck or the gradgradcheck.
    test_case.assertTrue(gradcheck(apply_fn, inputs))
    test_case.assertTrue(gradgradcheck(apply_fn, inputs))
Ejemplo n.º 10
0
 def test_hessian_needlemanwunsch_function(self):
     needle = NeedlemanWunschDecoder(self.operator)
     inputs = (self.theta, self.A)
     gradgradcheck(needle, inputs, eps=1e-1, atol=1e-1, rtol=1e-1)
Ejemplo n.º 11
0
 def test_hessian_needlemanwunsch_function_Arand(self):
     needle = NeedlemanWunschDecoder(self.operator)
     theta = self.theta.double()
     A = torch.rand_like(theta)
     inputs = (theta, A)
     gradgradcheck(needle, inputs, eps=1e-2)
Ejemplo n.º 12
0
    def forward(self, X):
        return X**3


torch.manual_seed(0)

X = torch.Tensor([3.])
X.requires_grad_()
print('x:', X)
cube = Cube()

Y = cube(X)
print('f(x):', Y)
S = torch.sum(Y)
S.backward()
print('<Grad (f)(x), 1>:', X.grad)

X.grad.zero_()
X.requires_grad_()
Y = cube(X)
S = torch.sum(Y)
G, = torch.autograd.grad(S, (X, ), create_graph=True)
S = G.sum()
S.backward()
print('Grad^2 (f) 1:', X.grad)

X.grad.zero_()
gradcheck(cube, (X, ), eps=1e-4, atol=1e-2)
X.grad.zero_()
gradgradcheck(cube, (X, ), eps=1e-4, atol=1e-2)