示例#1
0
 def init(self, N, IC, OC, L, G, kernel, stride, pad):
     super(QConv1dBenchmark, self).init(N, IC, OC, (L, ), G, (kernel, ), stride, pad)
     self.qconv1d = nnq.Conv1d(IC, OC, kernel, stride=stride, padding=pad, groups=G)
     self.qconv1d.set_weight_bias(self.qW, None)
     self.qconv1d.scale = torch.tensor([self.scale], dtype=torch.double)
     self.qconv1d.zero_point = torch.tensor([self.zero_point], dtype=torch.int)
     self.set_module_name("QConv1d")
示例#2
0
    def init(self, IC, OC, kernel, stride, N, L, device):
        G = 1
        pad = 0
        self.scale = 1.0 / 255
        self.zero_point = 0
        X = torch.randn(N, IC, L, dtype=torch.float32)
        qX = torch.quantize_per_tensor(X,
                                       scale=self.scale,
                                       zero_point=self.zero_point,
                                       dtype=torch.quint8)
        # Convert the tensor to NHWC format
        W = torch.randn(OC, IC // G, kernel, dtype=torch.float32)
        self.qW = torch.quantize_per_tensor(W,
                                            scale=self.scale,
                                            zero_point=0,
                                            dtype=torch.qint8)

        self.input = qX

        self.qconv1d = nnq.Conv1d(IC,
                                  OC,
                                  kernel,
                                  stride=stride,
                                  padding=pad,
                                  groups=G)
        self.qconv1d.set_weight_bias(self.qW, None)
        self.qconv1d.scale = torch.tensor([self.scale], dtype=torch.double)
        self.qconv1d.zero_point = torch.tensor([self.zero_point],
                                               dtype=torch.int)
        self.set_module_name("QConv1d")
示例#3
0
    def test_conv1d_api(self, batch_size, in_channels_per_group, length,
                        out_channels_per_group, groups, kernel, stride, pad,
                        dilation, X_scale, X_zero_point, W_scale, W_zero_point,
                        Y_scale, Y_zero_point, use_bias, use_fused,
                        use_channelwise):
        # Tests the correctness of the conv2d module.
        in_channels = in_channels_per_group * groups
        out_channels = out_channels_per_group * groups
        input_feature_map_size = (length, )
        kernel_size = (kernel, )
        stride = (stride, )
        pad = (pad, )
        dilation = (dilation, )
        if torch.backends.quantized.engine == 'qnnpack':
            use_channelwise = False
        if use_fused:
            module_name = "QuantizedConvReLU1d"
            qconv_module = nnq_fused.ConvReLU1d(in_channels,
                                                out_channels,
                                                kernel,
                                                stride,
                                                pad,
                                                dilation,
                                                groups,
                                                use_bias,
                                                padding_mode="zeros")
        else:
            module_name = "QuantizedConv1d"
            qconv_module = nnq.Conv1d(in_channels,
                                      out_channels,
                                      kernel,
                                      stride,
                                      pad,
                                      dilation,
                                      groups,
                                      use_bias,
                                      padding_mode="zeros")

        conv_module = nn.Conv1d(in_channels,
                                out_channels,
                                kernel,
                                stride,
                                pad,
                                dilation,
                                groups,
                                use_bias,
                                padding_mode="zeros")
        if use_fused:
            relu_module = nn.ReLU()
            conv_module = nni.ConvReLU1d(conv_module, relu_module)
        conv_module = conv_module.float()

        self._test_conv_api_impl(module_name, qconv_module, conv_module,
                                 batch_size, in_channels_per_group,
                                 input_feature_map_size,
                                 out_channels_per_group, groups, kernel_size,
                                 stride, pad, dilation, X_scale, X_zero_point,
                                 W_scale, W_zero_point, Y_scale, Y_zero_point,
                                 use_bias, use_fused, use_channelwise)