def test_conv3d(self): module = nnq.Conv3d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode="zeros") self._test_op(module, input_size=[1, 3, 6, 6, 6], generate=False)
def test_conv3d_api( self, batch_size, in_channels_per_group, D, H, W, out_channels_per_group, groups, kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w, pad_d, pad_h, pad_w, dilation, X_scale, X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias, use_channelwise, use_fused, qengine, ): # Tests the correctness of the conv3d module. if qengine not in torch.backends.quantized.supported_engines: return in_channels = in_channels_per_group * groups out_channels = out_channels_per_group * groups input_feature_map_size = (D, H, W) kernel_size = (kernel_d, kernel_h, kernel_w) stride = (stride_d, stride_h, stride_w) padding = (pad_d, pad_h, pad_w) dilation = (dilation, dilation, dilation) with override_quantized_engine(qengine): if use_fused: module_name = "QuantizedConvReLU3d" qconv_module = nnq_fused.ConvReLU3d( in_channels, out_channels, kernel_size, stride, padding, dilation, groups, use_bias, padding_mode="zeros") else: module_name = "QuantizedConv3d" qconv_module = nnq.Conv3d( in_channels, out_channels, kernel_size, stride, padding, dilation, groups, use_bias, padding_mode="zeros") conv_module = nn.Conv3d( in_channels, out_channels, kernel_size, stride, padding, dilation, groups, use_bias, padding_mode="zeros") if use_fused: relu_module = nn.ReLU() conv_module = nni.ConvReLU3d(conv_module, relu_module) conv_module = conv_module.float() self._test_conv_api_impl( module_name, qconv_module, conv_module, batch_size, in_channels_per_group, input_feature_map_size, out_channels_per_group, groups, kernel_size, stride, padding, dilation, X_scale, X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias, use_fused, use_channelwise)
def test_conv3d(self): if 'fbgemm' in supported_qengines: with override_quantized_engine('fbgemm'): module = nnq.Conv3d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode="zeros") self._test_op(module, input_size=[1, 3, 6, 6, 6], generate=False)