def __init__(self): super(TDAN_VSR, self).__init__() self.name = 'TDAN' self.conv_first = nn.Conv2d(3, 64, 3, padding=1, bias=True) self.residual_layer = self.make_layer(Res_Block, 5) self.relu = nn.ReLU(inplace=True) # deformable self.cr = nn.Conv2d(128, 64, 3, padding=1, bias=True) self.off2d_1 = nn.Conv2d(64, 18 * 8, 3, padding=1, bias=True) self.dconv_1 = ConvOffset2d(64, 64, 3, padding=1, num_deformable_groups=8) self.off2d_2 = nn.Conv2d(64, 18 * 8, 3, padding=1, bias=True) self.deconv_2 = ConvOffset2d(64, 64, 3, padding=1, num_deformable_groups=8) self.off2d_3 = nn.Conv2d(64, 18 * 8, 3, padding=1, bias=True) self.deconv_3 = ConvOffset2d(64, 64, 3, padding=1, num_deformable_groups=8) self.off2d = nn.Conv2d(64, 18 * 8, 3, padding=1, bias=True) self.dconv = ConvOffset2d(64, 64, (3, 3), padding=(1, 1), num_deformable_groups=8) self.recon_lr = nn.Conv2d(64, 3, 3, padding=1, bias=True) fea_ex = [nn.Conv2d(5 * 3, 64, 3, padding=1, bias=True), nn.ReLU()] self.fea_ex = nn.Sequential(*fea_ex) self.recon_layer = self.make_layer(Res_Block, 10) upscaling = [ Upsampler(default_conv, 4, 64, act=False), nn.Conv2d(64, 3, 3, padding=1, bias=False) ] self.up = nn.Sequential(*upscaling) # xavier initialization for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n))
def __init__(self): super(Net_Deform_CUDA, self).__init__() self.relu = nn.ReLU() self.pool = nn.MaxPool2d(2) self.conv_off1 = nn.Conv2d(1, 1 * 2 * 5 * 5, 3, 1, 1, bias=False).cuda() self.conv1 = ConvOffset2d(1, 16, (5, 5), stride=1, padding=2, num_deformable_groups=1).cuda() self.conv_off2 = nn.Conv2d(16, 4 * 2 * 5 * 5, 3, 1, 1, bias=False).cuda() self.conv2 = ConvOffset2d(16, 32, (5, 5), stride=1, padding=2, num_deformable_groups=4).cuda() self.out = nn.Linear(32 * 7 * 7, 10)
def __init__(self): super(align_net_w_feat, self).__init__() self.conv_first = nn.Conv2d(3, 64, 3, padding=1, bias=True) self.residual_layer = self.make_layer(Res_Block, 5) self.relu = nn.ReLU(inplace=True) # deformable self.cr = nn.Conv2d(128, 64, 3, padding=1, bias=True) self.off2d_1 = nn.Conv2d(64, 18 * 8, 3, padding=1, bias=True) self.dconv_1 = ConvOffset2d(64, 64, 3, padding=1, num_deformable_groups=8) self.off2d_2 = nn.Conv2d(64, 18 * 8, 3, padding=1, bias=True) self.deconv_2 = ConvOffset2d(64, 64, 3, padding=1, num_deformable_groups=8) self.off2d_3 = nn.Conv2d(64, 18 * 8, 3, padding=1, bias=True) self.deconv_3 = ConvOffset2d(64, 64, 3, padding=1, num_deformable_groups=8) self.off2d = nn.Conv2d(64, 18 * 8, 3, padding=1, bias=True) self.dconv = ConvOffset2d(64, 64, (3, 3), padding=(1, 1), num_deformable_groups=8) self.recon_lr = nn.Conv2d(64, 3, 3, padding=1, bias=True) # xavier initialization for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n))
def __init__(self, in_channels, growth_rate): super(DenseLayer, self).__init__() # self.add_module('norm', nn.BatchNorm2d(num_features=in_channels)) self.norm = nn.BatchNorm2d(num_features=in_channels) # self.add_module('prelu', nn.PReLU()) self.prelu = nn.PReLU() # author's impl - lasange 'same' pads with half # filter size (rounded down) on "both" sides self.conv_offset = nn.Conv2d(in_channels=in_channels, out_channels=num_deformable_groups * 2 * kH * kW, kernel_size=(kH, kW), stride=(1, 1), padding=(1, 1), bias=True) self.deform_conv = ConvOffset2d( in_channels, growth_rate, (kH, kW), stride=1, padding=1, num_deformable_groups=num_deformable_groups) self.drop = nn.Dropout2d(0.2)
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): super(Deform_Conv, self).__init__() self.out_channels = out_planes self.conv = nn.Conv2d(in_planes, groups * 2 * kernel_size * kernel_size, kernel_size=(kernel_size, kernel_size), stride=(stride, stride), padding=(padding, padding), bias=bias) self.conv_offset2d = ConvOffset2d(in_planes, out_planes, (kernel_size, kernel_size), stride=stride, padding=padding, num_deformable_groups=groups)
else: from dcn import ConvOffset2d num_deformable_groups = 2 N, inC, inH, inW = 1, 6, 512, 512 outC, outH, outW = 4, 512, 512 kH, kW = 3, 3 conv = nn.Conv2d(inC, num_deformable_groups * 2 * kH * kW, kernel_size=(kH, kW), stride=(1, 1), padding=(1, 1), bias=False).cuda() conv_offset2d = ConvOffset2d( inC, outC, (kH, kW), stride=1, padding=1, num_deformable_groups=num_deformable_groups).cuda() pdb.set_trace() inputs = Variable(torch.randn(N, inC, inH, inW).cuda()) offset = conv(inputs) output = conv_offset2d(inputs, offset) output.backward(output.data) print(output.size())
import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from torch.autograd import gradcheck from torch.nn.modules.utils import _single, _pair from modules import ConvOffset2d num_deformable_group = 1 N, inC, inH, inW = 1, 3, 512, 512 outC, outH, outW = 4, 512, 512 kH, kW = 3, 3 conv = nn.Conv2d(inC, num_deformable_group * 2 * kH * kW, kernel_size=(kH, kW), stride=(1, 1), padding=(1, 1), bias=False).cuda() conv_offset2d = ConvOffset2d(inC, outC, (kH, kW), stride=1, padding=1).cuda() inputs = Variable(torch.randn(N, inC, inH, inW).cuda()) offset = conv(inputs) output = conv_offset2d(inputs, offset) output.backward(output.data) print(output.size())