Esempio n. 1
0
    def _test_add(self, is_cuda=False, is_double=False):
        if is_double:
            self.a = self.a.double()
            self.b = self.b.double()
            self.gt = self.gt.double()
        if is_cuda:
            print("Testing Halide PyTorch CUDA operator...")
            self.a = self.a.cuda()
            self.b = self.b.cuda()
            self.gt = self.gt.cuda()
        else:
            print("Testing Halide PyTorch CPU operator...")

        output = modules.Add()(self.a, self.b)

        if is_double:
            print("  Double-precision mode")
        else:
            print("  Single-precision mode")

        diff = (output - self.gt).sum().item()
        assert diff == 0.0, "Test failed: sum should be 4, got %f" % diff

        # Test the gradient is correct
        self.a.requires_grad = True
        self.b.requires_grad = True
        res = th.autograd.gradcheck(modules.Add(), [self.a, self.b], eps=1e-2)

        print("  Test ran successfully: difference is", diff)
Esempio n. 2
0
    def _test_add(self, is_cuda=False, is_double=False):
        if is_double:
            self.a = self.a.double()
            self.b = self.b.double()
            self.gt = self.gt.double()
        if is_cuda:
            print("Testing Halide PyTorch CUDA operator...")
            self.a = self.a.cuda()
            self.b = self.b.cuda()
            self.gt = self.gt.cuda()
        else:
            print("Testing Halide PyTorch CPU operator...")

        output = modules.Add()(self.a, self.b)

        if is_double:
            print("  Double-precision mode")
        else:
            print("  Single-precision mode")

        diff = (output - self.gt).sum().item()
        assert diff == 0.0, "Test failed: sum should be 4, got %f" % diff

        # Test the gradient is correct
        self.a.requires_grad = True
        self.b.requires_grad = True

        with warnings.catch_warnings():
            # Inputs are float, the gradient checker wants double inputs and
            # will issue a warning.
            warnings.filterwarnings(
                "ignore",
                message="At least one of the inputs that requires "
                "gradient is not of double precision")
            res = th.autograd.gradcheck(modules.Add(), [self.a, self.b],
                                        eps=1e-2)

        print("  Test ran successfully: difference is", diff)
Esempio n. 3
0
    def _test_add(self, is_cuda=False, is_double=False):
        if is_double:
            self.a = self.a.double()
            self.b = self.b.double()
            self.gt = self.gt.double()
        if is_cuda:
            print("Testing Halide PyTorch CUDA operator...")
            self.a = self.a.cuda()
            self.b = self.b.cuda()
            self.gt = self.gt.cuda()
        else:
            print("Testing Halide PyTorch CPU operator...")

        for backward_op in ["add_grad", "add_halidegrad"]:
            add = modules.Add(backward_op)
            output = add(self.a, self.b)

            if is_double:
                print("  .Double-precision mode, backward_op:", backward_op)
            else:
                print("  .Single-precision mode, backward_op:", backward_op)

            diff = (output - self.gt).sum().item()
            assert diff == 0.0, "Test failed: sum should be 4, got %f" % diff

            self.a.requires_grad = True
            self.b.requires_grad = True

            for i in range(100):
                output = add(self.a, self.b).sum()
                output.backward()

            # Inputs are float, the gradient checker wants double inputs and
            # will issue a warning.
            warnings.filterwarnings("ignore", module=r".*gradcheck*")

            # Test the gradient is correct
            res = th.autograd.gradcheck(add, [self.a, self.b], eps=1e-2)

            print("     Test ran successfully: difference is", diff)