Exemplo n.º 1
0
def conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1, dcn=False, padding=-1, dilation=1):
    if padding == -1: padding = (kernel_size-1)//2
    if batchNorm:
        if dcn:
            return nn.Sequential(
#                DCN(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2),
                DCN(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation),
                nn.BatchNorm2d(out_planes),
                nn.LeakyReLU(0.1,inplace=True)
            )
        else:
            return nn.Sequential(
#                nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False),
                nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False),
                nn.BatchNorm2d(out_planes),
                nn.LeakyReLU(0.1,inplace=True)
            )
    else:
        if dcn:
            return nn.Sequential(
#                DCN(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2),
                DCN(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation),
                nn.LeakyReLU(0.1,inplace=True)
            )
        else:
            return nn.Sequential(
#                nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True),
                nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True),
                nn.LeakyReLU(0.1,inplace=True)
            )
Exemplo n.º 2
0
def export_dconv_onnx():
    input = torch.randn(1, 64, 128, 128).cuda()
    # wrap all things (offset and mask) in DCN
    dcn = DCN(64,
              64,
              kernel_size=(3, 3),
              stride=1,
              padding=1,
              deformable_groups=2).cuda()
    dcn.eval()
    dynamic = False
    torch.onnx.export(dcn,
                      input,
                      "dcn.onnx",
                      input_names=["input"],
                      output_names=["output"],
                      dynamic_axes={
                          "input": {
                              0: 'batch'
                          },
                          "output": {
                              0: 'batch'
                          }
                      } if dynamic else None,
                      opset_version=11,
                      do_constant_folding=True)
    print("export dcn onnx successfully!")
Exemplo n.º 3
0
 def __init__(self, a, b, c):
     super(DUB, self).__init__()
     self.conv3 = nn.Sequential(
         nn.BatchNorm2d(a), nn.ReLU(inplace=True),
         DCN(a, b, kernel_size=3, stride=1, padding=1))
     self.conv1 = nn.Sequential(
         nn.BatchNorm2d(a + b), nn.ReLU(inplace=True),
         DCN(a + b, c, kernel_size=1, stride=1, padding=0))
Exemplo n.º 4
0
def example_dconv_half():
	input = torch.randn(2, 64, 128, 128).half().cuda()
	# wrap all things (offset and mask) in DCN
	dcn = DCN(64, 64, kernel_size=(3, 3), stride=1,
			  padding=1, deformable_groups=2)
	dcn=dcn.half().cuda()
	
	print(dcn)
	
	# print(dcn.weight.shape, input.shape)
	output = dcn(input)
	# targert = output.new(*output.size())
	# targert.data.uniform_(-0.01, 0.01)
	# error = (targert - output).mean()
	# error.backward()
	print(output.shape)
Exemplo n.º 5
0
 def __init__(self, c1, c2, k=4, s=2):
     super(DeConvDCN, self).__init__()
     self.layers = []
     dcn = DCN(c1,
               c2,
               kernel_size=(3, 3),
               stride=1,
               padding=1,
               dilation=1,
               deformable_groups=1)
     deconv = nn.ConvTranspose2d(in_channels=c2,
                                 out_channels=c2,
                                 kernel_size=k,
                                 stride=s,
                                 padding=1,
                                 output_padding=0,
                                 bias=False)
     fill_up_weights(deconv)
     self.layers.append(dcn)
     self.layers.append(nn.BatchNorm2d(c2))
     self.layers.append(nn.SiLU())
     self.layers.append(deconv)
     self.layers.append(nn.BatchNorm2d(c2))
     self.layers.append(nn.SiLU())
     self.layers = nn.Sequential(*self.layers)
Exemplo n.º 6
0
    def _make_deconv_layer(self, num_filters, num_kernels):

        layers = []

        kernel, padding, output_padding = \
            self._get_deconv_cfg(num_kernels)

        planes = num_filters
        fc = DCN(self.inplanes, planes,
                 kernel_size=(3, 3), stride=1,
                 padding=1, dilation=1, deformable_groups=1)
        # fc = nn.Conv2d(self.inplanes, planes,
        #         kernel_size=3, stride=1,
        #         padding=1, dilation=1, bias=False)
        # fill_fc_weights(fc)
        up = nn.ConvTranspose2d(
            in_channels=planes,
            out_channels=planes,
            kernel_size=kernel,
            stride=2,
            padding=padding,
            output_padding=output_padding,
            bias=self.deconv_with_bias)
        fill_up_weights(up)

        layers.append(fc)
        layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
        layers.append(nn.ReLU(inplace=True))
        layers.append(up)
        layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
        layers.append(nn.ReLU(inplace=True))
        self.inplanes = planes

        return nn.Sequential(*layers)
Exemplo n.º 7
0
 def __init__(self, chi, cho):
     super(DeformConv, self).__init__()
     self.actf = nn.Sequential(
         nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
         nn.ReLU(inplace=True)
     )
     self.conv = DCN(chi, cho, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1)
Exemplo n.º 8
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              norm_layer=nn.BatchNorm2d,
              dilation=1,
              use_dcn=False):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = norm_layer(planes)
     self.relu = nn.ReLU(inplace=True)
     if use_dcn:
         self.conv2 = DCN(planes,
                          planes,
                          kernel_size=3,
                          stride=stride,
                          padding=dilation,
                          dilation=dilation,
                          deformable_groups=1)
         self.conv2.bias.data.zero_()
         self.conv2.conv_offset_mask.weight.data.zero_()
         self.conv2.conv_offset_mask.bias.data.zero_()
     else:
         self.conv2 = conv3x3(planes, planes)
     self.bn2 = norm_layer(planes)
     self.downsample = downsample
     self.stride = stride
Exemplo n.º 9
0
 def __init__(self, chi, cho):
     super(DeformConv, self).__init__()
     self.actf = nn.Sequential(nn.BatchNorm2d(cho, momentum=BN_MOMENTUM), nn.ReLU(inplace=True))
     self.conv = DCN(chi, cho, kernel_size=(3, 3), stride=1, padding=1, dilation=1, deformable_groups=1)
     for name, m in self.actf.named_modules():
         if isinstance(m, nn.BatchNorm2d):
             nn.init.constant_(m.weight, 1)
             nn.init.constant_(m.bias, 0)
Exemplo n.º 10
0
def i_conv(batchNorm,
           in_planes,
           out_planes,
           kernel_size=3,
           stride=1,
           bias=True,
           dcn=False,
           modulation=False):
    if batchNorm:
        if dcn:
            return nn.Sequential(
                DCN(in_planes,
                    out_planes,
                    kernel_size=kernel_size,
                    stride=stride,
                    padding=(kernel_size - 1) // 2),
                nn.BatchNorm2d(out_planes),
            )
        else:
            return nn.Sequential(
                nn.Conv2d(in_planes,
                          out_planes,
                          kernel_size=kernel_size,
                          stride=stride,
                          padding=(kernel_size - 1) // 2,
                          bias=bias),
                nn.BatchNorm2d(out_planes),
            )

    else:
        if dcn:
            return nn.Sequential(
                DCN(in_planes,
                    out_planes,
                    kernel_size=kernel_size,
                    stride=stride,
                    padding=(kernel_size - 1) // 2), )
        else:
            return nn.Sequential(
                nn.Conv2d(in_planes,
                          out_planes,
                          kernel_size=kernel_size,
                          stride=stride,
                          padding=(kernel_size - 1) // 2,
                          bias=bias), )
Exemplo n.º 11
0
def predict_flow(in_planes, dcn=False, modulation=False):
    if dcn:
        return DCN(in_planes, 2, kernel_size=3, stride=1, padding=1)
    else:
        return nn.Conv2d(in_planes,
                         2,
                         kernel_size=3,
                         stride=1,
                         padding=1,
                         bias=True)
Exemplo n.º 12
0
def example_dconv():
    from dcn_v2 import DCN
    input = torch.randn(2, 64, 128, 128).cuda()
    # wrap all things (offset and mask) in DCN
    dcn = DCN(64, 64, kernel_size=(3,3), stride=1, padding=1, deformable_groups=2).cuda()
    output = dcn(input)
    targert = output.new(*output.size())
    targert.data.uniform_(-0.01, 0.01)
    error = (targert - output).mean()
    error.backward()
    print(output.shape)
Exemplo n.º 13
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 norm_layer=nn.BatchNorm2d,
                 dilation=1,
                 use_dcn=False):
        super(Bottleneck, self).__init__()

        ##______________________________________    1   _____________________________________________________##
        self.conv1 = nn.Conv2d(inplanes,
                               planes,
                               kernel_size=1,
                               bias=False,
                               dilation=dilation)
        self.bn1 = norm_layer(planes)

        ##______________________________________    2   _____________________________________________________##
        if use_dcn:
            self.conv2 = DCN(planes,
                             planes,
                             kernel_size=3,
                             stride=stride,
                             padding=dilation,
                             dilation=dilation,
                             deformable_groups=1)
            self.conv2.bias.data.zero_()
            self.conv2.conv_offset_mask.weight.data.zero_()
            self.conv2.conv_offset_mask.bias.data.zero_()
        else:
            self.conv2 = nn.Conv2d(planes,
                                   planes,
                                   kernel_size=3,
                                   stride=stride,
                                   padding=dilation,
                                   bias=False,
                                   dilation=dilation)
        self.bn2 = norm_layer(planes)

        ##______________________________________    3   _____________________________________________________##
        self.conv3 = nn.Conv2d(planes,
                               planes * 4,
                               kernel_size=1,
                               bias=False,
                               dilation=dilation)
        self.bn3 = norm_layer(planes * 4)

        ##___________________________________________________________________________________________________##
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride  #  ->  where is this stride used ??
Exemplo n.º 14
0
    def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
        """
        :param num_layers:
        :param num_filters:
        :param num_kernels:
        :return:
        """
        assert num_layers == len(num_filters), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'
        assert num_layers == len(num_kernels), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'

        layers = []
        for i in range(num_layers):
            kernel, padding, output_padding = \
                self._get_deconv_cfg(num_kernels[i], i)

            planes = num_filters[i]
            fc = DCN(self.inplanes,
                     planes,
                     kernel_size=(3, 3),
                     stride=1,
                     padding=1,
                     dilation=1,
                     deformable_groups=1)
            # fc = nn.Conv2d(self.inplanes, planes,
            #         kernel_size=3, stride=1,
            #         padding=1, dilation=1, bias=False)
            # fill_fc_weights(fc)
            up = nn.ConvTranspose2d(in_channels=planes,
                                    out_channels=planes,
                                    kernel_size=kernel,
                                    stride=2,
                                    padding=padding,
                                    output_padding=output_padding,
                                    bias=self.deconv_with_bias)
            fill_up_weights(up)

            layers.append(fc)
            layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
            layers.append(nn.ReLU(inplace=True))
            layers.append(up)
            layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
            layers.append(nn.ReLU(inplace=True))
            self.inplanes = planes

        return nn.Sequential(*layers)
Exemplo n.º 15
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              norm_layer=nn.BatchNorm2d,
              dilation=1,
              use_dcn=False,
              activation_func=nn.ReLU(inplace=True)):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes,
                            planes,
                            kernel_size=1,
                            bias=False,
                            dilation=dilation)
     self.bn1 = norm_layer(planes)
     if use_dcn:
         self.conv2 = DCN(planes,
                          planes,
                          kernel_size=3,
                          stride=stride,
                          padding=dilation,
                          dilation=dilation,
                          deformable_groups=1)
         self.conv2.bias.data.zero_()
         self.conv2.conv_offset_mask.weight.data.zero_()
         self.conv2.conv_offset_mask.bias.data.zero_()
     else:
         self.conv2 = nn.Conv2d(planes,
                                planes,
                                kernel_size=3,
                                stride=stride,
                                padding=dilation,
                                bias=False,
                                dilation=dilation)
     self.bn2 = norm_layer(planes)
     self.conv3 = nn.Conv2d(planes,
                            planes * 4,
                            kernel_size=1,
                            bias=False,
                            dilation=dilation)
     self.bn3 = norm_layer(planes * 4)
     self.acf = activation_func
     self.downsample = downsample
     self.stride = stride
Exemplo n.º 16
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              norm_layer=nn.BatchNorm,
              dilation=1,
              use_dcn=False):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv(inplanes,
                          planes,
                          kernel_size=1,
                          bias=False,
                          dilation=dilation)
     self.bn1 = norm_layer(planes)
     if use_dcn:
         self.conv2 = DCN(planes,
                          planes,
                          kernel_size=3,
                          stride=stride,
                          padding=dilation,
                          dilation=dilation,
                          deformable_groups=1)
         init.constant_(self.conv2.bias, 0.0)
         init.constant_(self.conv2.conv_offset_mask.weight, 0.0)
         init.constant_(self.conv2.conv_offset_mask.bias, 0.0)
     else:
         self.conv2 = nn.Conv(planes,
                              planes,
                              kernel_size=3,
                              stride=stride,
                              padding=dilation,
                              bias=False,
                              dilation=dilation)
     self.bn2 = norm_layer(planes)
     self.conv3 = nn.Conv(planes,
                          planes * 4,
                          kernel_size=1,
                          bias=False,
                          dilation=dilation)
     self.bn3 = norm_layer(planes * 4)
     self.relu = nn.ReLU()
     self.downsample = downsample
     self.stride = stride
Exemplo n.º 17
0
    def __init__(self, pretrained=True):
        super(Net, self).__init__()

        if (pretrained == True):
            print("=> loading checkpoint from pretrained dpn92-5k-1k")
            dpn92 = pretrainedmodels.__dict__['dpn92'](
                num_classes=1000, pretrained='imagenet+5k').features
        else:
            dpn92 = pretrainedmodels.__dict__['dpn92'](
                num_classes=1000, pretrained=False).features

        #   dx: downsample to factor x
        #   ux: upsample to factor x

        #   Haze Density Map Generate sub-Net
        self.d64u1 = UNet(input_nc=3, output_nc=3, nf=8)

        #   Encoder Decoder sub-Net
        self.d8 = dpn92[:5]  #out608
        self.d16 = dpn92[5:9]  #out1096
        self.d32 = dpn92[9:29]

        self.u16 = DUB(2432, 512, 256)
        self.u8 = DUB(1352, 256, 128)
        self.u4 = DUB(736, 128, 256)
        self.u2 = DUB(256, 64, 128)
        self.u1 = DUB(128, 32, 16)

        self.in16 = nn.InstanceNorm2d(1096, affine=False)
        self.in8 = nn.InstanceNorm2d(608, affine=False)

        # Details Refinement sub-Net
        self.d4u1 = nn.Sequential(nn.Conv2d(3, 16, 3, 1, 1, bias=True),
                                  nn.BatchNorm2d(16), invPixelShuffle(4),
                                  nn.Conv2d(256, 16, 3, 1, 1, bias=True),
                                  nn.BatchNorm2d(16),
                                  nn.Sequential(*[WAB(16) for _ in range(3)]),
                                  nn.Conv2d(16, 256, 3, 1, 1, bias=True),
                                  nn.PixelShuffle(4), nn.BatchNorm2d(16),
                                  nn.Conv2d(16, 13, 3, 1, 1, bias=True))

        self.tail = nn.Sequential(nn.BatchNorm2d(32), nn.ReLU(inplace=True),
                                  DCN(32, 3, 3, 1, 1))
Exemplo n.º 18
0
 def __init__(self,
              filters_in,
              filters_out,
              kernel_size,
              stride,
              pad,
              groups=1,
              norm=None,
              activate=None):
     super(Deformable_Convolutional, self).__init__()
     self.norm = norm
     self.activate = activate
     self.__dcn = DCN(filters_in,
                      filters_out,
                      kernel_size=kernel_size,
                      stride=stride,
                      padding=pad,
                      deformable_groups=groups).cuda()
     if norm:
         assert norm in norm_name.keys()
         if norm == "bn":
             self.__norm = norm_name[norm](num_features=filters_out)
     if activate:
         assert activate in activate_name.keys()
         if activate == "leaky":
             self.__activate = activate_name[activate](negative_slope=0.1,
                                                       inplace=True)
         if activate == "relu":
             self.__activate = activate_name[activate](inplace=True)
         if activate == "relu6":
             self.__activate = activate_name[activate](inplace=True)
         if activate == "Mish":
             self.__activate = Mish()
         if activate == "Swish":
             self.__activate = Swish()
         if activate == "MEMish":
             self.__activate = MemoryEfficientMish()
         if activate == "MESwish":
             self.__activate = MemoryEfficientSwish()
         if activate == "FReLu":
             self.__activate = FReLU()
Exemplo n.º 19
0
    def __init__(self,
                 input_dim,
                 filters,
                 filter_size,
                 stride=1,
                 bias_attr=False,
                 bn=0,
                 gn=0,
                 af=0,
                 groups=32,
                 act=None,
                 freeze_norm=False,
                 is_test=False,
                 norm_decay=0.,
                 use_dcn=False):
        super(Conv2dUnit, self).__init__()
        self.groups = groups
        self.act = act
        self.freeze_norm = freeze_norm
        self.is_test = is_test
        self.norm_decay = norm_decay
        self.use_dcn = use_dcn

        # conv
        if use_dcn:
            self.conv = DCN(input_dim,
                            filters,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            dilation=1,
                            deformable_groups=1)
            self.conv.bias.data.zero_()
            self.conv.conv_offset_mask.weight.data.zero_()
            self.conv.conv_offset_mask.bias.data.zero_()
        else:
            self.conv = torch.nn.Conv2d(input_dim,
                                        filters,
                                        kernel_size=filter_size,
                                        stride=stride,
                                        padding=(filter_size - 1) // 2,
                                        bias=bias_attr)

        # norm
        self.bn = None
        self.gn = None
        self.af = None
        if bn:
            self.bn = torch.nn.BatchNorm2d(filters)
        if gn:
            self.gn = torch.nn.GroupNorm(num_groups=groups,
                                         num_channels=filters)
        if af:
            self.af = AffineChannel(filters)

        # act
        self.act = None
        if act == 'relu':
            self.act = torch.nn.ReLU(inplace=True)
        elif act == 'leaky':
            self.act = torch.nn.LeakyReLU(0.1)
        elif act == 'mish':
            self.act = Mish()
Exemplo n.º 20
0
from dcn_v2 import DCN
import torch

x = torch.randn(5, 3, 40, 40).cuda()
net = DCN(x.shape[1], 6, (3, 3), 1, 1).cuda()
y = net(x)
print("y.shape:", y.shape)
torch.onnx.export(net, x, 'dcn.onnx', verbose=False, opset_version=9)

x_ = x.view(-1)
x_str = ""

for i in range(x_.shape[0]):
    x_str += str(x_[i].item()) + ","
x_str = x_str[:-1]
x_str += "\n"

with open("x_data.txt", "w") as fout:
    fout.writelines(x_str)

x_ = y.view(-1)
x_str = ""
for i in range(x_.shape[0]):
    x_str += str(x_[i].item()) + ","
x_str = x_str[:-1]
x_str += "\n"

with open("y_data.txt", "w") as fout:
    fout.writelines(x_str)
Exemplo n.º 21
0
import torch
from dcn_v2 import DCN
input = torch.randn(2, 64, 128, 128).cuda()
# wrap all things (offset and mask) in DCN
dcn = DCN(64, 64, kernel_size=(3,3), stride=1, padding=1, deformable_groups=2).cuda()
output = dcn(input)
print(output.shape)