Beispiel #1
0
 def forward(self, input):
     '''
     '''
     if self.preHookFx is None:
         return F.conv_transpose3d(
             input,
             self.weight,
             self.bias,
             self.stride,
             self.padding,
             self.output_padding,
             self.groups,
             self.dilation,
         )
     else:
         return F.conv_transpose3d(
             input,
             self.preHookFx(self.weight),
             self.bias,
             self.stride,
             self.padding,
             self.output_padding,
             self.groups,
             self.dilation,
         )
Beispiel #2
0
    def forward(self, x, w0, w1, b1, y):
        x = F.conv_transpose3d(x,
                               w0,
                               None,
                               stride=(2, 2, 2),
                               padding=(1, 0, 1),
                               output_padding=(1, 1, 0))
        x = F.conv_transpose3d(x,
                               w1,
                               b1,
                               stride=(1, 1, 2),
                               padding=(2, 2, 1),
                               dilation=(2, 2, 1),
                               groups=2)

        y = F.conv_transpose3d(y,
                               self.w2,
                               self.b2,
                               stride=(2, 2, 2),
                               padding=(1, 0, 1),
                               output_padding=(1, 1, 0))
        y = F.conv_transpose3d(y,
                               self.w3,
                               None,
                               stride=(1, 1, 2),
                               padding=(2, 2, 1),
                               dilation=(2, 2, 1),
                               groups=3)
        return x, y
Beispiel #3
0
    def forward(self, input):
        '''
        '''
        # device = input.device
        # dtype  = input.dtype
        # # add necessary padding for odd spatial dimension
        # This is not needed as unpool multiplies the spatial dimension, hence it is always fine
        # if input.shape[2]%self.weight.shape[2] != 0:
        #     input = torch.cat(
        #         (
        #             input,
        #             torch.zeros(
        #                 (input.shape[0], input.shape[1], input.shape[2]%self.weight.shape[2], input.shape[3], input.shape[4]),
        #                 dtype=dtype
        #             ).to(device)
        #         ),
        #         dim=2,
        #     )
        # if input.shape[3]%self.weight.shape[3] != 0:
        #     input = torch.cat(
        #         (
        #             input,
        #             torch.zeros(
        #                 (input.shape[0], input.shape[1], input.shape[2], input.shape[3]%self.weight.shape[3], input.shape[4]),
        #                 dtype=dtype
        #             ),
        #             dim=3,
        #         )
        #     )

        dataShape = input.shape

        if self.preHookFx is None:
            result = F.conv_transpose3d(
                input.reshape(
                    (dataShape[0], 1, -1, dataShape[3], dataShape[4])),
                self.weight,
                self.bias,
                self.stride,
                self.padding,
                self.output_padding,
                self.groups,
                self.dilation,
            )
        else:
            result = F.conv_transpose3d(
                input.reshape(
                    (dataShape[0], 1, -1, dataShape[3], dataShape[4])),
                self.preHookFx(self.weight),
                self.bias,
                self.stride,
                self.padding,
                self.output_padding,
                self.groups,
                self.dilation,
            )

        return result.reshape((result.shape[0], dataShape[1], -1,
                               result.shape[3], result.shape[4]))
def pr_deconv3d(self, input):
    offset = input.min().detach()
    input = PreHook.apply(input, offset)
    resp = F.conv_transpose3d(input, self.weight, self.bias, self.stride,
                              self.padding, self.dilation,
                              self.groups).detach()
    pos_weight = F.relu(self.weight).detach()
    norm_factor = F.conv_transpose3d(input - offset, pos_weight, None,
                                     self.stride, self.padding, self.dilation,
                                     self.groups)
    output = PostHook.apply(resp, norm_factor)
    return output
Beispiel #5
0
 def forward(self, x):
     if x.dim() == 5:
         x = torch.cat([*x.chunk()], 2).squeeze()
     return Q(
         F.conv_transpose3d(x, self.weight, self.bias, self.stride,
                            self.padding, self.output_padding, self.groups,
                            self.dilation))
    def test_fake_quant_per_channel_other_prec(self):
        kernel_size = 3

        quant_desc_input = QuantDescriptor(num_bits=4)
        quant_desc_weight = QuantDescriptor(num_bits=3, axis=(1))

        quant_conv_object = quant_conv.QuantConvTranspose3d(
            _NUM_IN_CHANNELS,
            _NUM_OUT_CHANNELS,
            kernel_size,
            bias=False,
            quant_desc_input=quant_desc_input,
            quant_desc_weight=quant_desc_weight)
        test_input = torch.randn(16, _NUM_IN_CHANNELS, 16, 16, 16)

        test_input_quantizer = TensorQuantizer(quant_desc_input)
        weight_quantizer = TensorQuantizer(quant_desc_weight)

        quant_input = test_input_quantizer(test_input)

        weight_copy = quant_conv_object.weight.clone()
        quant_weight = weight_quantizer(weight_copy)

        out1 = F.conv_transpose3d(quant_input, quant_weight)
        out2 = quant_conv_object(test_input)
        np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
Beispiel #7
0
    def forward(self,
                x: torch.Tensor,
                output_size: Optional[List[int]] = None) -> torch.Tensor:
        """
        we have:
        w(float) -- quant - dequant \
        x(float) ------------- F.convTranspose3d ---
        In the full model, we will see
        w(float) -- quant - *dequant \
        x -- quant --- *dequant --  *F.convTranspose3d --- *quant - dequant
        and the backend should be able to fuse the ops with `*` into a quantized conv3d
        """

        assert isinstance(self.padding, tuple)
        # One cannot replace List by Tuple or Sequence in "_output_padding" because
        # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
        output_padding = self._output_padding(
            input, output_size, self.stride, self.padding, self.kernel_size,
            self.dilation)  # type: ignore[arg-type]

        weight_quant_dequant = self.get_weight()
        result = F.conv_transpose3d(x, weight_quant_dequant, self.bias,
                                    self.stride, self.padding, output_padding,
                                    self.groups, self.dilation)
        return result
Beispiel #8
0
    def test_fake_quant_per_channel_bias(self):
        kernel_size = 3

        quant_conv_object = quant_conv.QuantConvTranspose3d(
            _NUM_IN_CHANNELS,
            _NUM_OUT_CHANNELS,
            kernel_size,
            bias=True,
            quant_desc_weight=tensor_quant.
            QUANT_DESC_8BIT_CONVTRANSPOSE3D_WEIGHT_PER_CHANNEL)
        test_input = torch.randn(2, _NUM_IN_CHANNELS, 2, 2, 2)

        quant_input = tensor_quant.fake_tensor_quant(
            test_input, torch.max(torch.abs(test_input)))

        weight_copy = quant_conv_object.weight.clone()
        amax = quant_utils.reduce_amax(weight_copy, axis=(0, 2, 3, 4))
        quant_weight = tensor_quant.fake_tensor_quant(weight_copy, amax)

        out1 = F.conv_transpose3d(quant_input,
                                  quant_weight,
                                  bias=quant_conv_object.bias)
        out2 = quant_conv_object(test_input)
        np.testing.assert_array_equal(out1.detach().cpu().numpy(),
                                      out2.detach().cpu().numpy())
Beispiel #9
0
    def forward(self, inputs: 'Tensor') -> 'Tensor':
        """ Forward pass method for transposed convolution layer.

        Parameters
        ----------
        inputs : torch Tensor
            input tensor for transposed convolution layer.

        Returns
        -------
        torch Tensor
            result of convolutional operation applied to the input tensor.
        """
        conv_args = (inputs, self.weight, self.bias, self.stride, 0, 0,
                     self.groups, self.dilation)

        if self.ndims == 2:
            x = F.conv_transpose1d(*conv_args)
        elif self.ndims == 3:
            x = F.conv_transpose2d(*conv_args)
        elif self.ndims == 4:
            x = F.conv_transpose3d(*conv_args)

        if self.crop:
            x = crop(x, self.crop_sizes)
        return x
Beispiel #10
0
 def forward_pass(m, in_tensor, wieght):
     return F.conv_transpose3d(in_tensor,
                               weight,
                               stride=m.stride,
                               padding=m.padding,
                               output_padding=m.output_padding,
                               dilation=m.dilation,
                               groups=m.groups).detach()
Beispiel #11
0
 def backward_pass(layer, in_tensor, weight):
     return F.conv_transpose3d(in_tensor,
                               weight,
                               stride=layer.stride,
                               padding=layer.padding,
                               output_padding=layer.output_padding,
                               groups=layer.groups,
                               dilation=layer.dilation).detach()
Beispiel #12
0
    def forward(self, x):
        y = self.K(x)
        y = self.norm(y)
        y = self.act(y)
        y = -F.conv_transpose3d(
            y, self.K.weight, bias=self.K.bias, padding=self.kernel_size // 2)

        return y
Beispiel #13
0
def snip_forward_deconv3d(self, x):
    return F.conv_transpose3d(
        x,
        self.weight * self.weight_mask,
        self.bias,
        self.stride,
        self.padding,
        self.output_padding,
    )
Beispiel #14
0
    def forward(self, foreground, mask, background="same"):
        ###assume the masked area has value 1
        bz, nc, w, h, d = foreground.size()
        if background == "same":
            background = foreground.clone()
        background = background * (1 - mask)
        background = F.pad(background, [
            self.patch_size // 2, self.patch_size // 2, self.patch_size // 2,
            self.patch_size // 2, self.patch_size // 2, self.patch_size // 2
        ])
        conv_kernels_all = background.unfold(2, self.patch_size, self.stride) \
            .unfold(3, self.patch_size, self.stride) \
            .unfold(4, self.patch_size, self.stride) \
            .contiguous().view(bz, nc, -1, self.patch_size, self.patch_size, self.patch_size)
        conv_kernels_all = conv_kernels_all.transpose(2, 1)
        output_tensor = []
        for i in range(bz):
            feature_map = foreground[i:i + 1]

            # form convolutional kernels
            conv_kernels = conv_kernels_all[i] + 0.0000001
            norm_factor = torch.sum(conv_kernels**2, [1, 2, 3, 4],
                                    keepdim=True)**0.5
            conv_kernels = conv_kernels / norm_factor

            conv_result = F.conv3d(feature_map,
                                   conv_kernels,
                                   padding=self.patch_size // 2)
            if self.propagate_size != 1:
                if self.prop_kernels is None:
                    self.prop_kernels = torch.ones([
                        conv_result.size(1), 1, self.propagate_size,
                        self.propagate_size, self.propagate_size
                    ])
                    self.prop_kernels.requires_grad = False
                    self.prop_kernels = self.prop_kernels.cuda()
                conv_result = F.conv3d(conv_result,
                                       self.prop_kernels,
                                       stride=1,
                                       padding=1,
                                       groups=conv_result.size(1))
            attention_scores = F.softmax(conv_result, dim=1)
            ##propagate the scores
            recovered_foreground = F.conv_transpose3d(
                attention_scores,
                conv_kernels,
                stride=1,
                padding=self.patch_size // 2)
            # average the recovered value, at the same time make non-masked area 0
            recovered_foreground = (recovered_foreground *
                                    mask[i]) / (self.patch_size**3)
            # recover the image
            final_output = recovered_foreground + feature_map * (1 - mask[i])
            output_tensor.append(final_output)
        return torch.cat(output_tensor, dim=0)
Beispiel #15
0
    def backward(self, x, dy):

        # Recompute forward
        y1 = self.K(x)
        y2 = self.norm(y1)
        y_act = self.act(y2)
        dyact = self.dact(y2)
        y = -F.conv_transpose3d(y_act,
                                self.K.weight,
                                bias=self.K.bias,
                                padding=self.kernel_size // 2)

        # dx = -K'diag(sigma'(x, K))*K*dy
        # dK = d/dK1(y'*K1'*sigma(K2*x)) + d/dK2(y'*K1'*sigma(K2*x))
        #    = d/dK1(sigma(K2*x)*K1*y) +  y'*K1'*dsigma(K2*x)  * d/dK2 (K2*x)
        #    = sigma(K2*x) d/dK1 (K1*y) + y'*K1'*dsigma(K2*x) * d/dK2(K2*x)
        # (put back K1 = -K, K2 = K)
        #    = - sigma(K*x) d/dK (K*y) - (K*y)'*dsigma(K*x) * d/dK (K*x)

        dx1 = self.K(dy)
        dx2 = dyact * dx1
        dx2 = self.dnorm(y1, dx2)
        dx = -F.conv_transpose3d(dx2,
                                 self.K.weight,
                                 bias=self.K.bias,
                                 padding=self.kernel_size // 2)

        if dy.device.type == 'cpu' or x.device.type == 'cpu':
            dK1 = -F.grad.conv3d_weight(
                dy, self.K.weight.shape, y_act, padding=self.kernel_size // 2)
            dK2 = -F.grad.conv3d_weight(
                x, self.K.weight.shape, dx2, padding=self.kernel_size // 2)
        else:
            dK1 = -conv3d_weight(
                dy, self.K.weight.shape, y_act, padding=self.kernel_size // 2)
            dK2 = -conv3d_weight(
                x, self.K.weight.shape, dx2, padding=self.kernel_size // 2)
        dK = dK1 + dK2

        self.K.weight.grad = dK

        return y, dx, dK
Beispiel #16
0
    def _compute_flow_3d(self):

        # compute dense displacement
        displacement = F.conv_transpose3d(self.trans_parameters, self._kernel,
                                          padding=self._padding, stride=self._stride, groups=3)

        # crop displacement
        return th.squeeze(displacement[:, :, self._stride[0] + self._crop_start[0]:-self._stride[0] - self._crop_end[0],
                                  self._stride[1] + self._crop_start[1]:-self._stride[1] - self._crop_end[1],
                                  self._stride[2] + self._crop_start[2]:-self._stride[2] - self._crop_end[2]
                                  ].transpose_(1,4).transpose_(1,3).transpose_(1,2))
Beispiel #17
0
  def _conv_forward(self, input, weight, bias=None, output_size=None):
    if self.padding_mode != 'zeros':
      raise ValueError(
          'Only `zeros` padding mode is supported for QuantizedConvTransposeBatchNorm3d'
      )

    output_padding = self._output_padding(input, output_size, self.stride,
                                          self.padding, self.kernel_size)

    return F.conv_transpose3d(input, weight, bias, self.stride, self.padding,
                              output_padding, self.groups, self.dilation)
    def forward(self, x, output_size=None):
        weight = self.weight

        if self.weight_standardization:
            weight = weight - weight.mean(dim=(1, 2, 3, 4), keepdim=True)
            weight = weight / (weight.std(dim=(1, 2, 3, 4), keepdim=True) + 1e-5)

        weight = F.conv3d(weight, self.kernel, padding=1, groups=self.in_channels)
        x = F.conv_transpose3d(x, weight, **self.kwargs)

        return x
Beispiel #19
0
def sfb1d(lo, hi, g0, g1, mode='zero', dim=-1):
    """ 1D synthesis filter bank of an image tensor
    """
    C = lo.shape[1]
    d = dim % 5
    # If g0, g1 are not tensors, make them. If they are, then assume that they
    # are in the right order
    L = g0.numel()
    shape = [1, 1, 1, 1, 1]
    shape[d] = L
    N = 2*lo.shape[d]
    # If g aren't in the right shape, make them so
    if g0.shape != tuple(shape):
        g0 = g0.reshape(*shape)
    if g1.shape != tuple(shape):
        g1 = g1.reshape(*shape)

    s = [1, 1, 1]
    s[d-2] = 2
    g0 = torch.cat([g0]*C, dim=0)
    g1 = torch.cat([g1]*C, dim=0)

    #print(lo.shape,hi.shape)

    if mode == 'zero':
        pad = [0, 0, 0]
        pad[d-2] = (L-2)//2
        y = F.conv_transpose3d(lo, g0, stride=s, padding=pad, groups=C) + \
            F.conv_transpose3d(hi, g1, stride=s, padding=pad, groups=C)
    elif mode == 'reflect':
        pad = [0, 0, 0]
        pad[d-2] = (L-2)//2
        lo = padding(lo, pad, mode)
        hi = padding(hi, pad, mode)
        y = F.conv_transpose3d(lo, g0, stride=s, groups=C) + \
            F.conv_transpose3d(hi, g1, stride=s, groups=C)

    else:
        raise ValueError("Unkown pad type: {}".format(mode))

    return y
Beispiel #20
0
    def inverse(self, x):

        N, C, D, H, W = x.shape
        C = C // 8

        filters = torch.cat([
            self.weight,
        ] * C, dim=0)

        Y = F.conv_transpose3d(x, filters, groups=C, stride=2)

        return Y * 8  # Need to figure out where this factor comes from. Haar should be 1 -1 not 1/2, should account for this
Beispiel #21
0
    def forwardBackward(self, dY):

        N, C, D, H, W = dY.shape
        C = C // 8

        filters = torch.cat([
            self.weight,
        ] * C, dim=0)

        dYnm1 = F.conv_transpose3d(dY, filters, groups=C, stride=2)

        return dYnm1
Beispiel #22
0
    def backward(self, hidden):
        weight_flip = (0, 1, 3, 2, 4)

        # reverse the padding
        shrunk = hidden[:, :, 1:-1, 1:-1, 1:-1]

        l3 = F.conv_transpose3d(shrunk,
                                self.conv3.weight.flip(*weight_flip),
                                stride=1)
        F.relu(self.bn3_back(l3), inplace=True)

        l2 = F.conv_transpose3d(l3,
                                self.conv2.weight.flip(*weight_flip),
                                stride=1)
        F.relu(self.bn2_back(l2), inplace=True)

        l1 = F.conv_transpose3d(l2,
                                self.conv1.weight.flip(*weight_flip),
                                stride=1)

        return l1 + hidden
    def forward(self, input, output_size=None):
        if self.train_scale:
            weight_scale = self.weight_scale
        else:
            weight_scale = Variable(self.weight_scale)

        # normalize weight matrix and linear projection [in x out x h x w x z]
        # for each output dimension, normalize through (in, h, w, z)  = (0, 2, 3, 4) dims
        if self.in_channels == self.out_channels:
            norm_weight = self.weight * (
                weight_scale[None, :, None, None, None] / torch.sqrt(
                    (self.weight**2).sum(4).sum(3).sum(2).sum(0) +
                    1e-6).reshape([-1, 1, 1, 1, 1])).expand_as(self.weight)
        else:
            norm_weight = self.weight * (
                weight_scale[None, :, None, None, None] /
                torch.sqrt((self.weight**2).sum(4).sum(3).sum(2).sum(0) +
                           1e-6)).expand_as(self.weight)
        output_padding = self._output_padding(input, output_size, self.stride,
                                              self.padding, self.kernel_size)
        activation = F.conv_transpose3d(input,
                                        norm_weight,
                                        bias=None,
                                        stride=self.stride,
                                        padding=self.padding,
                                        output_padding=output_padding,
                                        groups=self.groups)

        if self.init_mode == True:
            mean_act = activation.mean(4).mean(3).mean(2).mean(0).squeeze()
            activation = activation - mean_act[None, :, None, None,
                                               None].expand_as(activation)

            inv_stdv = self.init_stdv / torch.sqrt(
                (activation**2).mean(4).mean(3).mean(2).mean(0) +
                1e-6).squeeze()
            activation = activation * inv_stdv[None, :, None, None,
                                               None].expand_as(activation)

            if self.train_scale:
                self.weight_scale.data = self.weight_scale.data * inv_stdv.data
            else:
                self.weight_scale = self.weight_scale * inv_stdv.data
            self.bias.data = -mean_act.data * inv_stdv.data

        else:
            if self.bias is not None:
                activation = activation + self.bias[None, :, None, None,
                                                    None].expand_as(activation)

        return activation
Beispiel #24
0
    def forward(self, input, output_size=None):
        if self.padding_mode != 'zeros':
            raise ValueError(
                'Only `zeros` padding mode is supported for QuantConvTranspose3d'
            )

        output_padding = self._output_padding(input, output_size, self.stride,
                                              self.padding, self.kernel_size)

        quant_input, quant_weight = self._quant(input)
        output = F.conv_transpose3d(quant_input, quant_weight, self.bias,
                                    self.stride, self.padding, output_padding,
                                    self.groups, self.dilation)

        return output
Beispiel #25
0
    def compute_displacement(self, params):
        # compute dense displacement
        displacement = F.conv_transpose3d(params, self.kernel,
                                          padding=self.padding, stride=self.stride, groups=3)

        # crop displacement
        displacement = displacement[:, :,
                       self.control_point_spacing[0] + self.crop_start[0]:-self.control_point_spacing[0] -
                                                                          self.crop_end[0],
                       self.control_point_spacing[1] + self.crop_start[1]:-self.control_point_spacing[1] -
                                                                          self.crop_end[1],
                       self.control_point_spacing[2] + self.crop_start[2]:-self.control_point_spacing[2] -
                                                                          self.crop_end[2]]

        return displacement.permute(0, 2, 3, 4, 1)
Beispiel #26
0
  def forward(self, input, output_size=None):
    if self.padding_mode != 'zeros':
      raise ValueError(
          'Only `zeros` padding mode is supported for ConvTranspose2d')

    output_padding = self._output_padding(input, output_size, self.stride,
                                          self.padding, self.kernel_size)

    quantized_weight = self.weight_quantizer(self.weight)
    quantized_bias = self.bias_quantizer(
        self.bias) if self.bias is not None else None

    return F.conv_transpose3d(input, quantized_weight, quantized_bias,
                              self.stride, self.padding, output_padding,
                              self.groups, self.dilation)
    def test_no_quant(self):
        kernel_size = 3

        quant_conv_object = quant_conv.QuantConvTranspose3d(
            _NUM_IN_CHANNELS,
            _NUM_OUT_CHANNELS,
            kernel_size,
            bias=False)
        quant_conv_object.input_quantizer.disable()
        quant_conv_object.weight_quantizer.disable()
        test_input = torch.randn(16, _NUM_IN_CHANNELS, 32, 32, 32)

        weight_copy = quant_conv_object.weight.clone()
        quant_weight = weight_copy

        out1 = F.conv_transpose3d(test_input, quant_weight)
        out2 = quant_conv_object(test_input)
        np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
Beispiel #28
0
def conv_transpose(x: Tensor, kernel: Tensor, stride: List[int],
                   oshape: List[int]) -> Tensor:
    """ND transposed convolution (padding = 'same')
    x : (B, Ci, *inspatial) tensor
    kernel : (Ci, Co, *kernel_size) tensor
    stride : List{dim}[int]
    oshape : List{dim}[int]
    returns : (B, Co, *outspatial) tensor
    """
    dim = x.dim() - 2
    ishape = x.shape[-dim:]
    kernel_size = kernel.shape[-dim:]
    out_channels = kernel.shape[-dim - 1]
    inp_channels = kernel.shape[-dim - 2]
    if inp_channels == out_channels == 1:
        groups = x.shape[-dim - 1]
        kernel = kernel.expand(torch.Size([groups, 1]) + kernel.shape[2:])
    else:
        groups = 1
    ipad = conv_input_padding(kernel_size)
    opad = convt_output_padding(oshape, ishape, kernel_size, stride)
    if dim == 1:
        x = F.conv_transpose1d(x,
                               kernel,
                               stride=stride,
                               output_padding=opad,
                               padding=ipad,
                               groups=groups)
    elif dim == 2:
        x = F.conv_transpose2d(x,
                               kernel,
                               stride=stride,
                               output_padding=opad,
                               padding=ipad,
                               groups=groups)
    else:
        x = F.conv_transpose3d(x,
                               kernel,
                               stride=stride,
                               output_padding=opad,
                               padding=ipad,
                               groups=groups)
    return x
Beispiel #29
0
 def forward(self, x):
     # self.cnt += 1
     # if self.cnt==1000:
     #import ipdb as pdb; pdb.set_trace()
     # if self.config["quantization"].lower() == "bnn":
     #     Wb = quant.bnn_sign(self.weight/self.H)*self.H
     # elif self.config["quantization"].lower() == "int":
     #     Wb = quant.int_nn(self.weight, self.config["weight_i_width"])
     # elif self.config["quantization"].lower() == "fixed":
     #     Wb = quant.fixed_nn(self.weight, self.config["weight_i_width"], self.config["weight_f_width"])
     # elif self.config["quantization"].lower() == "ternary":
     #     Wb = quant.ternary_q(self.weight)
     # else:
     #     Wb = self.weight
    if self.config["quantization"].lower() == "fixed":
         Wb = quant.fixed_nn(self.weight, self.config["weight_i_width"], self.config["weight_f_width"])
    else:
         Wb = self.weight
    return F.conv_transpose3d(x, Wb, self.bias, self.stride, self.padding)
Beispiel #30
0
 def test_conv_transpose3d(self):
     # Data and weight tensors
     conv_transpose3d_tensor = torch.randn(20,
                                           16,
                                           50,
                                           10,
                                           20,
                                           device='cuda',
                                           dtype=self.dtype)
     conv_transpose3d_filter = torch.randn(16,
                                           33,
                                           3,
                                           3,
                                           3,
                                           device='cuda',
                                           dtype=self.dtype)
     conv_transpose3d_bias = torch.randn(33,
                                         device='cuda',
                                         dtype=self.dtype)
     # Conv transpose runs
     conv_transpose3d_out = F.conv_transpose3d(conv_transpose3d_tensor,
                                               conv_transpose3d_filter)
     conv_transpose3d_out_biased = F.conv_transpose3d(
         conv_transpose3d_tensor,
         conv_transpose3d_filter,
         bias=conv_transpose3d_bias)
     conv_transpose3d_out_strided = F.conv_transpose3d(
         conv_transpose3d_tensor, conv_transpose3d_filter, stride=2)
     conv_transpose3d_out_padded = F.conv_transpose3d(
         conv_transpose3d_tensor, conv_transpose3d_filter, padding=3)
     conv_transpose3d_out2_padded = F.conv_transpose3d(
         conv_transpose3d_tensor,
         conv_transpose3d_filter,
         output_padding=2,
         dilation=3)
     conv_transpose3d_out_grouped = F.conv_transpose3d(
         conv_transpose3d_tensor, conv_transpose3d_filter, groups=2)
     conv_transpose3d_out_dilated = F.conv_transpose3d(
         conv_transpose3d_tensor, conv_transpose3d_filter, dilation=2)