Exemple #1
0
 def forward(self, x):
     return torch.mkldnn_convolution(
         x,
         self.weight,
         self.bias,
         self.padding,
         self.stride,
         self.dilation,
         self.groups)
Exemple #2
0
    def test_conv2d_ext(self, device, dtype):
        # this list for save the cases which dh,dw == 0
        # and raise error in the end
        self.Fail = list()

        total_cases = self._collect_cases()
        for case in total_cases:
            case_name = case['case_name']
            bs = case['mb']
            group = case['g']
            ic, ih, iw = case['ic'], case['ih'], case['iw']
            oc = case['oc']
            kh, kw = case['kh'], case['kw']
            sh, sw = case['sh'], case['sw']
            ph, pw = case['ph'], case['pw']
            dh, dw = case['dh'], case['dw']
            has_bias = case['bias']
            if dh == 0 or dw == 0:
                self.Fail.append(case_name)
                continue

            ic_g = ic // group
            torch.manual_seed(1)
            input = torch.randn((bs, ic, ih, iw),
                                device=device,
                                dtype=dtype,
                                requires_grad=True)
            weight = torch.randn(
                (oc, ic_g, kh, kw), device=device, dtype=dtype) * 0.01
            bias = None if has_bias == 'False' else torch.randn(
                (oc), device=device, dtype=dtype)

            k = [kh, kw]
            s = [sh, sw]
            p = [ph, pw]
            d = [dh, dw]
            thnn_output = self._thnn_conv_group(input, weight, k, bias, s, p,
                                                d, group)
            if self.device_type == 'cpu' and torch.backends.mkldnn.is_available(
            ):
                output = torch.mkldnn_convolution(input, weight, bias, p, s, d,
                                                  group)
            elif self.device_type == 'cuda' and torch.backends.cudnn.is_available(
            ):
                output = torch.cudnn_convolution(input, weight, bias, p, s, d,
                                                 group, True, True)
            else:
                output = torch.conv2d(input, weight, bias, s, p, d, group)

            msg = 'device:{}, dtype:{}, group:{}, batchsize:{}' \
                  'input channel:{}, output channel:{}, ' \
                  'bias:{}, padding:{}, dilation:{}, stride:{}, ' \
                  'kernel:{}'
            msg = msg.format(device, dtype, group, bs, ic, oc, has_bias, p, d,
                             s, k)

            if self.device_type == 'cuda' and cudnn.is_available():
                self.assertEqual(output,
                                 thnn_output,
                                 msg=msg,
                                 atol=1e-2,
                                 rtol=1e-2)
            else:
                self.assertEqual(output, thnn_output, msg=msg)

        if self.Fail != []:
            warnings.warn('invalid cases dilation height or weight is 0: ' +
                          ",".join(self.Fail))