def max_pool3d(input, kernel_size, stride, padding, dilation, ceil_mode):
    try:
        if input.device.type == 'dpcpp' and core.get_auto_dnnl():
            return MaxPoolingFunction.apply(input, kernel_size, stride, padding, dilation, ceil_mode)
    except RuntimeError:
        pass
    return torch_max_pool3d(input, kernel_size, stride, padding, dilation, ceil_mode)
Beispiel #2
0
def adaptive_avg_pool2d(input, output_size):
    try:
        if input.device.type == 'dpcpp' and core.get_auto_dnnl():
            return AdaptiveAvgPool2dFunction.apply(input, output_size)
    except RuntimeError:
        pass
    return torch_adaptive_avg_pool2d(input, output_size)
Beispiel #3
0
def reshape(input, size):
    if input.device.type == 'dpcpp' and core.get_auto_dnnl():
        return ReshapeFunction.apply(input, size)
    return torch_reshape(input, size)
 def test_auto_dnnl(self):
     self.assertTrue(ipex.get_auto_dnnl())
     ipex.disable_auto_dnnl()
     self.assertFalse(ipex.get_auto_dnnl())
     ipex.enable_auto_dnnl()
     self.assertTrue(ipex.get_auto_dnnl())
Beispiel #5
0
    def test_view(self):
        tensor = torch.rand(15, device=device)
        template = torch.rand(3, 5, device=device)
        target = template.size()
        self.assertEqual(tensor.view_as(template).size(), target)
        self.assertEqual(tensor.view(3, 5).size(), target)
        self.assertEqual(tensor.view(torch.Size([3, 5])).size(), target)
        self.assertEqual(tensor.view(-1, 5).size(), target)
        self.assertEqual(tensor.view(3, -1).size(), target)

        tensor_view = tensor.view(5, 3)
        tensor_view.fill_(random.uniform(0, 1))
        empty = torch.empty(0, device=device)
        self.assertEqual(empty.view_as(empty), empty)
        self.assertEqual(empty.view(0), empty)
        self.assertEqual(
            empty.view(0, 3, 0, 1).size(), torch.Size([0, 3, 0, 1]))
        self.assertEqual(empty.view(0, 3, 0, 1).view(0), empty)

        # test size inference with empty tensors
        self.assertEqual(empty.view(-1).size(), torch.Size([0]))
        self.assertEqual(empty.view(10, 3, -1).size(), torch.Size([10, 3, 0]))

        with self.assertRaisesRegex(
                RuntimeError,
                r"because the unspecified dimension size -1 can be any value"):
            empty.view(-1, 0)

        with self.assertRaisesRegex(
                RuntimeError,
                r"because the unspecified dimension size -1 can be any value"):
            empty.view(3, 0, -1, 0)

        self.assertRaises(RuntimeError, lambda: tensor.view(15, 0))
        self.assertRaises(RuntimeError, lambda: tensor.view(7, -1))
        self.assertRaises(RuntimeError, lambda: tensor.view(15, -1, -1))

        # TODO(Eikan): DNNL OP does not support >6 dim tensor, so we disable it temporily. When we fix it, we will open it
        old_dnnl_conf = ipex.get_auto_dnnl()
        ipex.disable_auto_dnnl()
        # test view when tensor is not contiguous in every dimension, but only
        # contiguous dimensions are touched.
        tensor = torch.rand(4, 2, 5, 1, 6, 2, 9, 3,
                            device=device).transpose(-1, 2).transpose(-2, 3)
        # size:                      [   4,    2,    3,    9,    6,    2,    1,    5]
        # stride:                    [3840, 1620,    1,    3,   54,   27,  324,  324]
        # contiguous dim chunks:     [__________, ____, ____, __________, ____, ____]
        # merging 1 to chunk after:  [__________, ____, ____, __________, __________]
        contig_tensor = tensor.clone()
        # [4, 2] => [8, 1]
        # [3] => [3]
        # [9] => [3, 3]
        # [6, 2] => [4, 1, 3]
        # [1, 5] => [5]
        view_size = [8, 1, 3, 3, 3, 4, 1, 3, 5]
        self.assertEqual(tensor.view(*view_size),
                         contig_tensor.view(*view_size))
        # [4, 2] => [2, 4]
        # [3] => [3]
        # [9] => [1, 9]
        # [6, 2] => [2, 2, 3]
        # [1, 5] => [5, 1]
        view_size = [2, 4, 3, 1, 9, 2, 2, 3, 5, 1]
        self.assertEqual(tensor.view(*view_size),
                         contig_tensor.view(*view_size))
        # adding size 1 dims
        view_size = [1, 1, 2, 1, 4, 3, 1, 1, 9, 1, 2, 1, 2, 3, 1, 5, 1, 1]
        self.assertEqual(tensor.view(*view_size),
                         contig_tensor.view(*view_size))
        if old_dnnl_conf:
            ipex.enable_auto_dnnl()
        else:
            ipex.disable_auto_dnnl()

        # invalid views
        self.assertRaises(RuntimeError, lambda: tensor.view(-1))
        # crossing [4, 2], [3]
        self.assertRaises(RuntimeError, lambda: tensor.view(24, 9, 6, 2, 1, 5))
        # crossing [6, 2], [1, 5]
        self.assertRaises(RuntimeError, lambda: tensor.view(8, 3, 9, 6, 10))
        # crossing [9], [6, 2]
        self.assertRaises(RuntimeError, lambda: tensor.view(8, 3, 54, 2, 1, 5))

        # view with stride 0 dims
        tensor = torch.empty(1, 1, device=device).expand(
            3, 4)  # all dims are contiguous
        contig_tensor = tensor.clone()
        self.assertEqual(tensor.view(-1), contig_tensor.view(-1))
        self.assertEqual(tensor.view(1, -1, 1), contig_tensor.view(1, -1, 1))
        self.assertEqual(tensor.view(-1, 1), contig_tensor.view(-1, 1))
        self.assertEqual(tensor.view(6, 2, 1), contig_tensor.view(6, 2, 1))
        self.assertEqual(tensor.view(1, 6, 2, 1),
                         contig_tensor.view(1, 6, 2, 1))

        inputs_cpu = torch.randn(0, 6, requires_grad=True)
        inputs_dpcpp = inputs_cpu.detach().to(
            device=device).requires_grad_(True)
        out_dpcpp = inputs_dpcpp.view(1, 0, 6, 1, 1)
        out_cpu = inputs_cpu.view(1, 0, 6, 1, 1)
        self.assertEqual(out_dpcpp.to('cpu'), out_cpu, prec=0.0)

        out_dpcpp.sum().backward()
        out_cpu.sum().backward()
        self.assertEqual(inputs_dpcpp.grad.to('cpu'),
                         inputs_cpu.grad,
                         prec=0.0)
Beispiel #6
0
def linear(input, weight, bias=None):
    if input.device.type == 'dpcpp' and core.get_auto_dnnl():
        return LinearFunction.apply(input, weight, bias)
    return F_linear(input, weight, bias)