示例#1
0
    def test_noncontig(self, test_case, module, input):
        test_case._zero_grad_parameters(module)
        test_case._zero_grad_input(input)
        with freeze_rng_state():
            output = test_case._forward(module, input)
            grad_output = output
            if isinstance(grad_output, Variable):
                grad_output = grad_output.data.clone()
            else:
                grad_output = grad_output.clone()
                output = output.clone()
            grad_output.normal_()
            d_input = deepcopy(
                test_case._backward(module, input, output, grad_output))
            d_param = deepcopy(test_case._get_parameters(module)[1])

        nc_input = self.noncontiguize(input)
        nc_grad_output = self.noncontiguize(grad_output)
        for contig_i, contig_g in product((True, False), repeat=2):
            i = input if contig_i else nc_input
            go = grad_output if contig_g else nc_grad_output
            test_case._zero_grad_parameters(module)
            test_case._zero_grad_input(i)
            with freeze_rng_state():
                try:
                    out = test_case._forward(module, i)
                except Exception:
                    # Some modules will fail because of non contiguous inputs and we're ok with that
                    continue
                grad = test_case._backward(module, i, out, go)

                test_case.assertEqual(out, output)
                test_case.assertEqual(grad, d_input, 1e-4)
                test_case.assertEqual(
                    test_case._get_parameters(module)[1], d_param)
示例#2
0
    def test_noncontig(self, test_case, module, input):
        test_case._zero_grad_parameters(module)
        test_case._zero_grad_input(input)
        with freeze_rng_state():
            output = test_case._forward(module, input)
            grad_output = output
            if isinstance(grad_output, Variable):
                grad_output = grad_output.data.clone()
            else:
                grad_output = grad_output.clone()
                output = output.clone()
            grad_output.normal_()
            d_input = deepcopy(test_case._backward(module, input, output, grad_output))
            d_param = deepcopy(test_case._get_parameters(module)[1])

        nc_input = self.noncontiguize(input)
        nc_grad_output = self.noncontiguize(grad_output)
        for contig_i, contig_g in product((True, False), repeat=2):
            i = input if contig_i else nc_input
            go = grad_output if contig_g else nc_grad_output
            test_case._zero_grad_parameters(module)
            test_case._zero_grad_input(i)
            with freeze_rng_state():
                try:
                    out = test_case._forward(module, i)
                except Exception:
                    # Some modules will fail because of non contiguous inputs and we're ok with that
                    continue
                grad = test_case._backward(module, i, out, go)

                test_case.assertEqual(out, output)
                test_case.assertEqual(grad, d_input, 1e-4)
                test_case.assertEqual(test_case._get_parameters(module)[1], d_param)
示例#3
0
    def _do_test(self, test_case, module, input):
        # TODO: check update parameters
        # TODO: test IO
        module.training()
        test_case.check_jacobian(module, input, self.jacobian_input)
        module.evaluate()
        test_case.check_jacobian(module, input, self.jacobian_input)

        # Test .type()
        module.float().double().forward(input)

        # Test .clearState()
        module.clearState()

        # test if module can be printed
        module.__repr__()

        if self.check_inplace:
            input2 = deepcopy(input)
            module_ip = self.constructor(*self.constructor_args, inplace=True)
            with freeze_rng_state():
                output = module.forward(input)
            test_case.assertEqual(input, input2)
            with freeze_rng_state():
                output2 = module_ip.forward(input2)
            test_case.assertNotEqual(input, input2)
            test_case.assertEqual(output, input2)
示例#4
0
    def test_noncontig(self, test_case, module, input):
        # check no scalars, can't make non-contig
        if isinstance(input, torch.Tensor) and input.dim() == 0:
            return
        if any(i.dim() == 0 for i in input if isinstance(i, torch.Tensor)):
            return

        test_case._zero_grad_parameters(module)
        test_case._zero_grad_input(input)
        with freeze_rng_state():
            output = test_case._forward(module, input)
            grad_output = output.new(output.shape).normal_()
            output = output.clone()
            d_input = deepcopy(
                test_case._backward(module, input, output, grad_output))
            d_param = deepcopy(test_case._get_parameters(module)[1])

        nc_input = self.noncontiguize(input)
        nc_grad_output = self.noncontiguize(grad_output)
        for contig_i, contig_g in product((True, False), repeat=2):
            i = input if contig_i else nc_input
            go = grad_output if contig_g else nc_grad_output
            test_case._zero_grad_parameters(module)
            test_case._zero_grad_input(i)
            with freeze_rng_state():
                out = test_case._forward(module, i)
                grad = test_case._backward(module, i, out, go)

                test_case.assertEqual(out, output)
                test_case.assertEqual(grad, d_input, 1e-4)
                test_case.assertEqual(
                    test_case._get_parameters(module)[1], d_param)
示例#5
0
    def test_noncontig(self, test_case, module, input):
        # check no scalars, can't make non-contig
        if isinstance(input, torch.Tensor) and input.dim() == 0:
            return
        if any(i.dim() == 0 for i in input if isinstance(i, torch.Tensor)):
            return

        test_case._zero_grad_parameters(module)
        test_case._zero_grad_input(input)
        with freeze_rng_state():
            output = test_case._forward(module, input)
            grad_output = output.new(output.shape).normal_()
            output = output.clone()
            d_input = deepcopy(test_case._backward(module, input, output, grad_output))
            d_param = deepcopy(test_case._get_parameters(module)[1])

        nc_input = self.noncontiguize(input)
        nc_grad_output = self.noncontiguize(grad_output)
        for contig_i, contig_g in product((True, False), repeat=2):
            i = input if contig_i else nc_input
            go = grad_output if contig_g else nc_grad_output
            test_case._zero_grad_parameters(module)
            test_case._zero_grad_input(i)
            with freeze_rng_state():
                try:
                    out = test_case._forward(module, i)
                except Exception:
                    # Some modules will fail because of non contiguous inputs and we're ok with that
                    continue
                grad = test_case._backward(module, i, out, go)

                test_case.assertEqual(out, output)
                test_case.assertEqual(grad, d_input, 1e-4)
                test_case.assertEqual(test_case._get_parameters(module)[1], d_param)
示例#6
0
    def _do_test(self, test_case, module, input):
        # TODO: check update parameters
        # TODO: test IO
        module.training()
        test_case.check_jacobian(module, input, self.jacobian_input)
        module.evaluate()
        test_case.check_jacobian(module, input, self.jacobian_input)

        # Test .type()
        module.float().double().forward(input)

        # Test .clearState()
        module.clearState()

        # test if module can be printed
        module.__repr__()

        if self.check_inplace:
            input2 = deepcopy(input)
            module_ip = self.constructor(*self.constructor_args, inplace=True)
            with freeze_rng_state():
                output = module.forward(input)
            test_case.assertEqual(input, input2)
            with freeze_rng_state():
                output2 = module_ip.forward(input2)
            test_case.assertNotEqual(input, input2)
            test_case.assertEqual(output, input2)
示例#7
0
    def test_noncontig(self, test_case, module, input):
        # check no scalars, can't make non-contig
        if isinstance(input, Variable) and input.dim() == 0:
            return
        if any(i.dim() == 0 for i in input if isinstance(i, Variable)):
            return

        test_case._zero_grad_parameters(module)
        test_case._zero_grad_input(input)
        with freeze_rng_state():
            output = test_case._forward(module, input)
            grad_output = output.new(output.shape).normal_()
            output = output.clone()
            d_input = deepcopy(test_case._backward(module, input, output, grad_output))
            d_param = deepcopy(test_case._get_parameters(module)[1])

        nc_input = self.noncontiguize(input)
        nc_grad_output = self.noncontiguize(grad_output)
        for contig_i, contig_g in product((True, False), repeat=2):
            i = input if contig_i else nc_input
            go = grad_output if contig_g else nc_grad_output
            test_case._zero_grad_parameters(module)
            test_case._zero_grad_input(i)
            with freeze_rng_state():
                try:
                    out = test_case._forward(module, i)
                except Exception:
                    # Some modules will fail because of non contiguous inputs and we're ok with that
                    continue
                grad = test_case._backward(module, i, out, go)

                test_case.assertEqual(out, output)
                test_case.assertEqual(grad, d_input, 1e-4)
                test_case.assertEqual(test_case._get_parameters(module)[1], d_param)
示例#8
0
 def test_manual_seed(self):
     with freeze_rng_state():
         x = torch.zeros(4, 4).float().cuda()
         torch.cuda.manual_seed(2)
         self.assertEqual(torch.cuda.initial_seed(), 2)
         x.uniform_()
         torch.cuda.manual_seed(2)
         y = x.clone().uniform_()
         self.assertEqual(x, y)
         self.assertEqual(torch.cuda.initial_seed(), 2)
示例#9
0
 def _forward(self, module, input):
     with freeze_rng_state():
         return module.forward(input)
示例#10
0
 def _forward(self, module, input):
     with freeze_rng_state():
         return module.forward(input)