def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', \ mode='CNA', res_scale=1, upsample_mode='upconv'): super(RRDB_Net, self).__init__() n_upscale = int(math.log(upscale, 2)) if upscale == 3: n_upscale = 1 fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None) rb_blocks = [B.RRDB(nf, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \ norm_type=norm_type, act_type=act_type, mode='CNA') for _ in range(nb)] LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode) if upsample_mode == 'upconv': upsample_block = B.upconv_blcok elif upsample_mode == 'pixelshuffle': upsample_block = B.pixelshuffle_block else: raise NotImplementedError('upsample mode [%s] is not found' % upsample_mode) if upscale == 3: upsampler = upsample_block(nf, nf, 3, act_type=act_type) else: upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)] HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type) HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None) self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*rb_blocks, LR_conv)),\ *upsampler, HR_conv0, HR_conv1)
def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4, norm_type= None , act_type='relu', \ mode='CNA', res_scale=1, upsample_mode='pixelshuffle'): super(SRResNet, self).__init__() n_upscale = int(math.log(upscale, 2)) if upscale == 3: n_upscale = 1 fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None) resnet_blocks = [B.ResNetBlock(nf, nf, nf, norm_type=norm_type, act_type=act_type,\ mode=mode, res_scale=res_scale) for _ in range(nb)] LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode) if upsample_mode == 'upconv': upsample_block = B.upconv_blcok elif upsample_mode == 'pixelshuffle': upsample_block = B.pixelshuffle_block else: raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode)) if upscale == 3: upsampler = upsample_block(nf, nf, 3, act_type=act_type) else: upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)] HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type) HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None) self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*resnet_blocks, LR_conv)),\ *upsampler, HR_conv0, HR_conv1)
def __init__(self, in_nc=1, out_nc=1, nf=16, nb=16, gc=16, norm_type=None, act_type='leakyrelu', mode='CNA'): super(DenoseNet, self).__init__() fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None) rb_blocks = [B.RRDB(nf, kernel_size=3, gc=gc, stride=1, bias=True, pad_type='zero', norm_type=norm_type, act_type=act_type, mode='CNA', dilation=(1, 2, 4), groups=1) for _ in range(nb)] LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode) HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type) HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None) self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*rb_blocks, LR_conv)) , HR_conv0, HR_conv1, nn.Sigmoid())
def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', res_scale=1): super(RRDB_Net, self).__init__() n_upscale = int(math.log(upscale, 2)) fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None) rb_blocks = [ B.RRDB(nf, kernel_size=3, gc=32, stride=1, bias=True, norm_type=norm_type, act_type=act_type) for _ in range(nb) ] LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None) upsample_block = B.upconv_blcok upsampler = [ upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale) ] HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type) HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None) self.model = B.sequential( fea_conv, B.ShortcutBlock(B.sequential(*rb_blocks, LR_conv)), *upsampler, HR_conv0, HR_conv1)
def __init__(self, lm_nc, pan_nc=1, downscale=4, norm_type=None, act_type='leakyrelu', mode='CNA'): super().__init__() assert downscale in (2, 4), 'downscale should be 2 or 4' self.downscale = downscale # Rec LM # conv_LM0 = B.conv_block( # 1, # 1, # kernel_size=4, # stride=2, # norm_type=None, # act_type=act_type, # mode=mode) if downscale == 4 else None # conv_LM1 = B.conv_block( # 1, # 1, # kernel_size=4, # stride=2, # norm_type=None, # act_type=None, # mode=mode) conv_LM0 = B.conv_block(lm_nc, lm_nc, kernel_size=3, stride=1, groups=lm_nc, bias=False, norm_type=None, act_type=None, mode=mode) down_LM1 = nn.Upsample(scale_factor=1 / downscale, mode='bilinear') self.LM = B.sequential(conv_LM0, down_LM1) # Rec Pan conv_PAN0 = B.conv_block(lm_nc, pan_nc, kernel_size=1, stride=1, bias=False, norm_type=None, act_type=None, mode=mode) self.PAN = B.sequential(conv_PAN0)
def __init__(self, in_nc=5, out_nc=4, nf=64, nb=None, upscale=4, norm_type=None, act_type='relu', mode='CNA', upsample_mode='upconv'): super().__init__() self.upscale = upscale conv1 = B.conv_block(in_nc, nf, kernel_size=9, norm_type=None, act_type=act_type) conv2 = B.conv_block(nf, nf // 2, kernel_size=5, norm_type=None, act_type=act_type) conv3 = B.conv_block(nf // 2, out_nc, kernel_size=5, norm_type=None, act_type=None) self.model = B.sequential(conv1, conv2, conv3)
def __init__(self): super(OutdoorSceneSeg, self).__init__() # conv1 blocks = [] conv1_1 = B.conv_block(3, 64, 3, 2, 1, 1, False, 'zero', 'batch') # /2 conv1_2 = B.conv_block(64, 64, 3, 1, 1, 1, False, 'zero', 'batch') conv1_3 = B.conv_block(64, 128, 3, 1, 1, 1, False, 'zero', 'batch') max_pool = nn.MaxPool2d(3, stride=2, padding=0, ceil_mode=True) # /2 blocks = [conv1_1, conv1_2, conv1_3, max_pool] # conv2, 3 blocks blocks.append(Res131(128, 64, 256)) for i in range(2): blocks.append(Res131(256, 64, 256)) # conv3, 4 blocks blocks.append(Res131(256, 128, 512, 1, 2)) # /2 for i in range(3): blocks.append(Res131(512, 128, 512)) # conv4, 23 blocks blocks.append(Res131(512, 256, 1024, 2)) for i in range(22): blocks.append(Res131(1024, 256, 1024, 2)) # conv5 blocks.append(Res131(1024, 512, 2048, 4)) blocks.append(Res131(2048, 512, 2048, 4)) blocks.append(Res131(2048, 512, 2048, 4)) blocks.append(B.conv_block(2048, 512, 3, 1, 1, 1, False, 'zero', 'batch')) blocks.append(nn.Dropout(0.1)) # # conv6 blocks.append(nn.Conv2d(512, 8, 1, 1)) self.feature = B.sequential(*blocks) # deconv self.deconv = nn.ConvTranspose2d(8, 8, 16, 8, 4, 0, 8, False, 1) # softmax self.softmax = nn.Softmax(1)
def __init__(self, in_nc, mid_nc, out_nc, dilation=1, stride=1): super(Res131, self).__init__() conv0 = B.conv_block(in_nc, mid_nc, 1, 1, 1, 1, False, 'zero', 'batch') conv1 = B.conv_block(mid_nc, mid_nc, 3, stride, dilation, 1, False, 'zero', 'batch') conv2 = B.conv_block(mid_nc, out_nc, 1, 1, 1, 1, False, 'zero', 'batch', None) # No ReLU self.res = B.sequential(conv0, conv1, conv2) if in_nc == out_nc: self.has_proj = False else: self.has_proj = True self.proj = B.conv_block(in_nc, out_nc, 1, stride, 1, 1, False, 'zero', 'batch', None)
def __init__(self, in_nc, base_nf, norm_type='batch', act_type='leakyrelu', mode='CNA'): super(Discriminator_VGG_192, self).__init__() # features # hxw, c # 192, 64 conv0 = B.conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, \ mode=mode) conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, \ act_type=act_type, mode=mode) # 96, 64 conv2 = B.conv_block(base_nf, base_nf*2, kernel_size=3, stride=1, norm_type=norm_type, \ act_type=act_type, mode=mode) conv3 = B.conv_block(base_nf*2, base_nf*2, kernel_size=4, stride=2, norm_type=norm_type, \ act_type=act_type, mode=mode) # 48, 128 conv4 = B.conv_block(base_nf*2, base_nf*4, kernel_size=3, stride=1, norm_type=norm_type, \ act_type=act_type, mode=mode) conv5 = B.conv_block(base_nf*4, base_nf*4, kernel_size=4, stride=2, norm_type=norm_type, \ act_type=act_type, mode=mode) # 24, 256 conv6 = B.conv_block(base_nf*4, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \ act_type=act_type, mode=mode) conv7 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \ act_type=act_type, mode=mode) # 12, 512 conv8 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \ act_type=act_type, mode=mode) conv9 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \ act_type=act_type, mode=mode) # 6, 512 conv10 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \ act_type=act_type, mode=mode) conv11 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \ act_type=act_type, mode=mode) # 3, 512 self.features = B.sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,\ conv9, conv10, conv11) # classifier self.classifier = nn.Sequential(nn.Linear(512 * 3 * 3, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))
def __init__(self, in_nc, base_nf, norm_type='batch', act_type='leakyrelu'): super(Discriminator_VGG_128, self).__init__() # features # hxw, c # 128, 64 conv0 = B.conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type) conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type) # 64, 64 conv2 = B.conv_block(base_nf, base_nf * 2, kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type) conv3 = B.conv_block(base_nf * 2, base_nf * 2, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type) # 32, 128 conv4 = B.conv_block(base_nf * 2, base_nf * 4, kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type) conv5 = B.conv_block(base_nf * 4, base_nf * 4, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type) # 16, 256 conv6 = B.conv_block(base_nf * 4, base_nf * 8, kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type) conv7 = B.conv_block(base_nf * 8, base_nf * 8, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type) # 8, 512 conv8 = B.conv_block(base_nf * 8, base_nf * 8, kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type) conv9 = B.conv_block(base_nf * 8, base_nf * 8, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type) # 4, 512 self.features = B.sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8, conv9) # classifier self.classifier = nn.Sequential(nn.Linear(512 * 4 * 4, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))
def __init__( self, lm_nc=4, # pan_nc=1, base_nf=64, # downscale=4, norm_type='batch', act_type='leakyrelu', mode='CNA'): super().__init__() # features # h and w, c # input 256, 3 conv0 = B.conv_block(lm_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, mode=mode) conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode) # 128, 64 conv2 = B.conv_block(base_nf, base_nf * 2, kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type, mode=mode) conv3 = B.conv_block(base_nf * 2, base_nf * 2, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode) # 64, 128 conv4 = B.conv_block(base_nf * 2, base_nf * 4, kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type, mode=mode) conv5 = B.conv_block(base_nf * 4, base_nf * 4, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode) self.feature_shallow = B.sequential(conv0, conv1, conv2, conv3, conv4, conv5) # 32, 256 conv6 = B.conv_block(base_nf * 4, base_nf * 8, kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type, mode=mode) conv7 = B.conv_block(base_nf * 8, base_nf * 8, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode) # 16, 512 conv8 = B.conv_block(base_nf * 8, base_nf * 8, kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type, mode=mode) conv9 = B.conv_block(base_nf * 8, base_nf * 8, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode) # 8, 512 conv10 = B.conv_block(base_nf * 8, base_nf * 8, kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type, mode=mode) conv11 = B.conv_block(base_nf * 8, base_nf * 8, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode) # 4, 512 self.feature_deep = B.sequential(conv6, conv7, conv8, conv9, conv10, conv11) # classifier self.classifier = nn.Sequential(nn.Linear(512 * 4 * 4, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))
def __init__(self, lm_nc, pan_nc=1, base_nf=64, downscale=4, norm_type='batch', act_type='leakyrelu', mode='CNA'): super().__init__() assert downscale in (2, 4), 'downscale should be 2 or 4' # LM # input (bs, c, 64, 64) # output (bs, 32, 64, 64) conv_LM0 = B.conv_block(lm_nc, base_nf // 2, kernel_size=3, norm_type=None, act_type=act_type, mode=mode) conv_LM1 = B.conv_block(base_nf // 2, base_nf // 2, kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type, mode=mode) self.LM = B.sequential(conv_LM0, conv_LM1) # PAN # input (bs, 1, 256, 256) # output (bs, 32, 64, 64) conv_PAN0 = B.conv_block(pan_nc, base_nf // 4, kernel_size=3, norm_type=None, act_type=act_type, mode=mode) # size down -> 128 conv_PAN1 = B.conv_block(base_nf // 4, base_nf // 4, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode) if downscale == 4 else None conv_PAN2 = B.conv_block(base_nf // 4, base_nf // 2, kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type, mode=mode) if downscale == 4 else None # size down -> 64 conv_PAN3 = B.conv_block(base_nf // 2, base_nf // 2, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode) self.PAN = B.sequential(conv_PAN0, conv_PAN1, conv_PAN2, conv_PAN3) # Fus(Concatenate(LM_output, PAN_output)) # input (bs, 64, 64, 64) # output (bs, 64, 64, 64) conv_Fus = B.conv_block(base_nf, base_nf, kernel_size=3, norm_type=None, act_type=act_type, mode=mode) self.Fus = B.sequential(conv_Fus) # head # input (bs, 64, 64, 64) # output (bs, 512, 4, 4) convs = [] this_nf = base_nf while True: if this_nf < 512: nf123 = this_nf nf4 = 2 * this_nf # elif this_nf == 512: # nf123 = 512 # nf4 = 512 else: break # size down size_down = B.conv_block(nf123, nf123, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode) # channels up size_keep = B.conv_block(nf123, nf4, kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type, mode=mode) convs.extend([size_down, size_keep]) this_nf *= 2 # 8, 512 conv_last = B.conv_block(512, 512, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode) # 4, 512 convs.append(conv_last) self.head = B.sequential(*convs) # classifier self.classifier = nn.Sequential(nn.Linear(512 * 4 * 4, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))