Esempio n. 1
0
def check_zero_offset():
    conv_offset = nn.Conv2d(inC, deformable_groups * 2 * kH * kW,
        kernel_size=(kH, kW),
        stride=(1, 1),
        padding=(1, 1),
        bias=True).cuda()

    conv_mask = nn.Conv2d(inC, deformable_groups * 1 * kH * kW,
        kernel_size=(kH, kW),
        stride=(1, 1),
        padding=(1, 1),
        bias=True).cuda()

    dcn_v2 = DCNv2(inC, outC, (kH, kW),
                   stride=1, padding=1, dilation=1,
                   deformable_groups=deformable_groups).cuda()

    conv_offset.weight.data.zero_()
    conv_offset.bias.data.zero_()
    conv_mask.weight.data.zero_()
    conv_mask.bias.data.zero_()
    conv_identify(dcn_v2.weight, dcn_v2.bias)

    input = torch.randn(N, inC, inH, inW).cuda()
    offset = conv_offset(input)
    mask = conv_mask(input)
    mask = torch.sigmoid(mask)
    output = dcn_v2(input, offset, mask)
    output *= 2
    d = (input - output).abs().max()
    if d < 1e-10:
        print('Zero offset passed')
    else:
        print('Zero offset failed')
Esempio n. 2
0
    def __init__(self, filters_in, anchor_num, fo_class, temp=False):
        super(MTR_Head1, self).__init__()
        self.fo_class = fo_class
        self.anchor_num = anchor_num
        self.temp = temp

        self.__conv_conf = nn.Conv2d(in_channels=filters_in, out_channels=self.anchor_num * 1, kernel_size=1, stride=1,
                                     padding=0)

        # self.__conv_offset_mask1 = Convolutional(filters_in, self.anchor_num*4, kernel_size=1, stride=1, pad=0)
        self.__conv_offset_mask = nn.Conv2d(in_channels=filters_in, out_channels=3 * 9, kernel_size=1, stride=1,
                                            padding=0, bias=True)

        self.__dconv_loc = DCNv2(filters_in, filters_in, kernel_size=3, stride=1, padding=1)
        self.__bnloc = nn.BatchNorm2d(filters_in)
        self.__reluloc = nn.LeakyReLU(inplace=True)
        self.__dconv_locx = nn.Conv2d(filters_in, self.anchor_num * 4, kernel_size=1, stride=1, padding=0)

        self.__dconv_cla = DCNv2(filters_in, filters_in, kernel_size=3, stride=1, padding=1)
        self.__bncla = nn.BatchNorm2d(filters_in)
        self.__relucla = nn.LeakyReLU(inplace=True)
        self.__dconv_clax = nn.Conv2d(filters_in, self.anchor_num * self.fo_class, kernel_size=1, stride=1, padding=0)

        self.init_offset()
Esempio n. 3
0
    def __init__(self,
                 inplanes,
                 planes,
                 kernel_size=3,
                 stride=1,
                 padding=1,
                 dilation=1,
                 bn=False,
                 deformable_groups=1,
                 only_dcn=False,
                 use_depth=False):
        super(DeFormConvModule, self).__init__()

        #conv2d = DepthConv(inplanes,planes,kernel_size=kernel_size,stride=stride,padding=padding,dilation=dilation)
        channels_ = deformable_groups * 3 * kernel_size * kernel_size
        #print('inplanes',inplanes,channels_)
        self.use_depth = use_depth
        if self.use_depth:
            ip = 1
        else:
            ip = inplanes
        self.conv_offset_mask = nn.Conv2d(ip,
                                          channels_,
                                          kernel_size=kernel_size,
                                          stride=stride,
                                          padding=padding,
                                          bias=True)
        self.conv_offset_mask.weight.data.zero_()
        self.conv_offset_mask.bias.data.zero_()

        conv2d = DCNv2(inplanes,
                       planes, (kernel_size, kernel_size),
                       stride=stride,
                       padding=padding,
                       dilation=dilation,
                       deformable_groups=deformable_groups)

        layers = []
        if not only_dcn:
            if bn:
                layers += [nn.BatchNorm2d(planes), nn.ReLU(inplace=True)]
            else:
                layers += [nn.ReLU(inplace=True)]
        self.layers = nn.Sequential(*([conv2d] + layers))  #(*layers)
Esempio n. 4
0
    def __init__(self, num_feat=64, deformable_groups=8):
        """

        :param num_feat:
        :param deformable_groups:
        """
        super(PCDAlignment, self).__init__()

        # Pyramid has three levels:
        # L3: level 3, 1/4 spatial size
        # L2: level 2, 1/2 spatial size
        # L1: level 1, original spatial size
        self.offset_conv1 = nn.ModuleDict()
        self.offset_conv2 = nn.ModuleDict()
        self.offset_conv3 = nn.ModuleDict()
        self.dcn_pack = nn.ModuleDict()
        self.feat_conv = nn.ModuleDict()

        # Pyramids
        for i in range(3, 0, -1):
            level = f'l{i}'
            self.offset_conv1[level] = nn.Conv2d(in_channels=num_feat * 2,
                                                 out_channels=num_feat,
                                                 kernel_size=(3, 3),
                                                 stride=(1, 1),
                                                 padding=(1, 1))
            if i == 3:
                self.offset_conv2[level] = nn.Conv2d(in_channels=num_feat,
                                                     out_channels=num_feat,
                                                     kernel_size=(3, 3),
                                                     stride=(1, 1),
                                                     padding=(1, 1))
            else:
                self.offset_conv2[level] = nn.Conv2d(in_channels=num_feat * 2,
                                                     out_channels=num_feat,
                                                     kernel_size=(3, 3),
                                                     stride=(1, 1),
                                                     padding=(1, 1))
                self.offset_conv3[level] = nn.Conv2d(in_channels=num_feat,
                                                     out_channels=num_feat,
                                                     kernel_size=(3, 3),
                                                     stride=(1, 1),
                                                     padding=(1, 1))
            # FIXME: DCNv2Pack() takes no arguments
            self.dcn_pack[level] = DCNv2(num_feat,
                                         num_feat,
                                         3,
                                         stride=1,
                                         padding=1,
                                         deformable_groups=deformable_groups)

            if i < 3:
                self.feat_conv[level] = nn.Conv2d(in_channels=num_feat * 2,
                                                  out_channels=num_feat,
                                                  kernel_size=(3, 3),
                                                  stride=(1, 1),
                                                  padding=(1, 1))

        # Cascading dcn
        self.cas_offset_conv1 = nn.Conv2d(in_channels=num_feat * 2,
                                          out_channels=num_feat,
                                          kernel_size=(3, 3),
                                          stride=(1, 1),
                                          padding=(1, 1))
        self.cas_offset_conv2 = nn.Conv2d(in_channels=num_feat,
                                          out_channels=num_feat,
                                          kernel_size=(3, 3),
                                          stride=(1, 1),
                                          padding=(1, 1))
        self.cas_dcnpack = DCNv2(num_feat,
                                 num_feat,
                                 3,
                                 stride=1,
                                 padding=1,
                                 dilation=1,
                                 deformable_groups=deformable_groups)

        self.upsample = nn.Upsample(scale_factor=2,
                                    mode='bilinear',
                                    align_corners=False)
        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
Esempio n. 5
0
import numpy as np

import sys
sys.path.insert(0, '/data1/home/v-dachen/external/mxnet/mxnet_v1.1.0_dcnv2')
import mxnet as mx
from mxnet import autograd

N, inC, inH, inW = 2, 16, 64, 64
# N, inC, inH, inW = 2, 3, 1, 1
outC, outH, outW = 4, 64, 64
# outC = 3
kH, kW = 3, 3
# kH, kW = 1, 1
num_deformable_groups = 1

dcn_th = DCNv2(inC, outC, kH, 1, 1, 1, num_deformable_groups, no_bias=True)

weight = np.random.normal(0, 1, (4, 16, 3, 3)).astype(np.float32)
inputs = np.random.normal(0, 1, (N, inC, inH, inW)).astype(np.float32)
mask = np.random.uniform(
    0, 1, (N, num_deformable_groups * kH * kW, outH, outW)).astype(np.float32)
offset = np.random.normal(
    0, 1,
    (N, num_deformable_groups * 2 * kH * kW, outH, outW)).astype(np.float32)

dcn_th.weight.data = torch.from_numpy(weight).cuda()
inputs_var = Variable(torch.from_numpy(inputs).cuda(), requires_grad=True)
offset_var = Variable(torch.from_numpy(offset).cuda(), requires_grad=True)
mask_var = Variable(torch.from_numpy(mask).cuda(), requires_grad=True)

output_th = dcn_th(inputs_var, offset_var, mask_var)