def __init__(self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d): super(SingleStage, self).__init__() self.down = ResnetBody(block, init_channel, layers, channels, norm) channel = block.expansion * channels[-1] self.up1 = M.Sequential(M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)) self.deconv1 = M.Sequential( M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)) channel = block.expansion * channels[-2] self.up2 = M.Sequential(M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)) self.deconv2 = M.Sequential( M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)) channel = block.expansion * channels[-3] self.up3 = M.Sequential(M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)) self.deconv3 = M.Sequential( M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)) channel = block.expansion * channels[-4] self.up4 = M.Sequential(M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel))
def __init__(self, mode): super().__init__() self.mode = mode self.data = np.random.random((1, 3, 224, 224)).astype(np.float32) self.normal_conv = M.Conv2d(3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)) self.group_conv = M.Conv2d(3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3) self.valid_pad_conv = M.Conv2d(3, 30, 4, padding=(1, 1)) self.valid_pad_1_conv = M.Conv2d(3, 30, 3, stride=2, padding=(1, 1)) self.same_pad_conv = M.Conv2d(3, 30, 3, padding=(1, 1)) self.same_pad_1_conv = M.Conv2d(3, 30, 4, stride=2, padding=(1, 1)) self.same_pad_2_conv = M.Conv2d(3, 30, 2, dilation=3, stride=2, padding=(1, 1)) self.normal_conv.bias = mge.Parameter( np.random.random(self.normal_conv.bias.shape).astype(np.float32)) self.group_conv.bias = mge.Parameter( np.random.random(self.group_conv.bias.shape).astype(np.float32)) self.transpose_conv = M.Sequential( M.ConvTranspose2d(3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1), M.ConvTranspose2d(5, 3, (3, 3)), ) self.transpose_conv[0].bias = mge.Parameter( np.random.random(self.transpose_conv[0].bias.shape).astype( np.float32)) self.transpose_conv[1].bias = mge.Parameter( np.random.random(self.transpose_conv[1].bias.shape).astype( np.float32)) self.tflite_transpose_conv = M.Sequential( M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1), M.ConvTranspose2d(5, 3, (3, 3)), ) self.tflite_transpose_conv[0].bias = mge.Parameter( np.random.random(self.transpose_conv[0].bias.shape).astype( np.float32)) self.tflite_transpose_conv[1].bias = mge.Parameter( np.random.random(self.transpose_conv[1].bias.shape).astype( np.float32))
def __init__(self, ch=128, nframes=7, input_nc=3, output_nc=3, upscale_factor=4, use_cost_volume=False): super(MUCAN, self).__init__() self.nframes = nframes self.upscale_factor = upscale_factor # 每个LR搞三个尺度 self.feature_encoder_carb = M.Sequential( M.Conv2d(input_nc, ch, kernel_size=3, stride=1, padding=1), M.LeakyReLU(negative_slope=0.05), CARBBlocks(channel_num=ch, block_num=4)) self.fea_L1_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L1_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.fea_L2_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L2_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.lrelu = M.LeakyReLU(negative_slope=0.05) if use_cost_volume: self.AU0 = AU_CV(K=6, d=5, ch=ch) self.AU1 = AU_CV(K=5, d=3, ch=ch) self.AU2 = AU_CV(K=4, d=3, ch=ch) else: self.AU0 = AU(ch=ch) self.AU1 = AU(ch=ch) self.AU2 = AU(ch=ch) self.UP0 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) self.UP1 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) self.aggre = M.Conv2d(ch * self.nframes, ch, kernel_size=3, stride=1, padding=1) self.main_conv = M.Sequential( CARBBlocks(channel_num=ch, block_num=10), M.ConvTranspose2d(ch, ch // 2, kernel_size=4, stride=2, padding=1), M.LeakyReLU(), M.Conv2d(ch // 2, ch // 2, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), M.ConvTranspose2d(ch // 2, ch // 4, kernel_size=4, stride=2, padding=1), M.LeakyReLU(), M.Conv2d(ch // 4, output_nc, kernel_size=3, stride=1, padding=1))
def __init__(self, ch=128, nframes = 7, input_nc = 3, output_nc = 3, upscale_factor=4, blocknums1 = 5, blocknums2 = 15, non_local = True): super(MUCANV2, self).__init__() self.nframes = nframes self.upscale_factor = upscale_factor # 每个LR搞三个尺度 self.feature_encoder_carb = M.Sequential( M.Conv2d(input_nc, ch, kernel_size=3, stride=1, padding=1), M.LeakyReLU(negative_slope=0.05), CARBBlocks(channel_num=ch, block_num=blocknums1) ) self.fea_L1_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L1_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.fea_L2_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L2_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.lrelu = M.LeakyReLU(negative_slope=0.05) self.AU0 = AU(ch = ch) self.AU1 = AU(ch = ch) self.AU2 = AU(ch = ch) self.UP0 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) self.UP1 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) if non_local: self.non_local = Separate_non_local(ch, nframes) else: self.non_local = Identi() self.aggre = M.Conv2d(ch * self.nframes, ch, kernel_size=3, stride=1, padding=1) self.carbs = M.Sequential( CARBBlocks(channel_num=ch, block_num=blocknums2), ) self.main_conv = M.Sequential( M.Conv2d(ch, ch*4, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), PixelShuffle(scale=2), # 128 M.Conv2d(ch, ch*2, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), PixelShuffle(scale=2), # 64 M.Conv2d(ch//2, ch//2, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), M.Conv2d(ch//2, 3, kernel_size=3, stride=1, padding=1) )
def __init__(self, cm, ch): super(MISR_Block, self).__init__() self.model = M.Sequential( ResBlocks(channel_num=cm, resblock_num=5, kernel_size=3), M.ConvTranspose2d(cm, ch, kernel_size=8, stride=4, padding=2), M.PReLU(), )
def test_convtranspose(): deconv = M.ConvTranspose2d(32, 32, 3) @trace(symbolic=True, capture_as_const=True) def fwd(data): return deconv(data) data = Tensor(np.random.random((1, 32, 32, 32))) result = fwd(data) # cu111 has 1e-7 diff check_pygraph_dump(fwd, [data], [result], 5)
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d): super(DeconvLayers, self).__init__() _body = [] for i in range(num_layers): kernel = kernels[i] padding = ( kernel // 3 ) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3 _body += [ M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias), norm(nf2s[i]), M.ReLU(), ] nf1 = nf2s[i] self.body = M.Sequential(*_body)
def __init__(self, in_size, out_size, relu_slope, subnet_repeat_num, subspace_dim=16): super(UNetUpBlock, self).__init__() self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True) self.conv_block = UNetConvBlock(in_size, out_size, False, relu_slope) self.num_subspace = subspace_dim print(self.num_subspace, subnet_repeat_num) self.subnet = Subspace(in_size, self.num_subspace) self.skip_m = skip_blocks(out_size, out_size, subnet_repeat_num)
def __init__(self, in_channels: int, skip_in_channels: int, out_channels: int): super().__init__() self.decode_conv = DecoderBlock(in_channels, in_channels, kernel_size=3) self.upsample = M.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2, padding=0) self.proj_conv = Conv2D(skip_in_channels, out_channels, kernel_size=3, stride=1, padding=1, is_seperable=True, has_relu=True)
import torch.nn.functional as TF from tabulate import tabulate import megengine as mge import megengine.functional as MF import megengine.module as MM module_cache = { "conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()), "dw_conv2d": ( MM.Conv2d(32, 32, 3, 1, 0, groups=32), nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(), ), "conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()), "ConvTranspose2d": ( MM.ConvTranspose2d(32, 32, 3, 1, 0), nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(), ), "BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()), "Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()), } test_cases = [ # (mge op, torch op, small inps, large inps, unpack_inps, rep) ( "adaptive_avg_pool2d", lambda x: MF.adaptive_avg_pool2d(x, (7, 7)), lambda x: TF.adaptive_avg_pool2d(x, (7, 7)), [(2, 32, 16, 16)], [(64, 512, 16, 16)], True,
def __init__(self, in_channels=3, out_channels=3, mid_channels = 160, ch = 24, blocknums1 = 3, blocknums2 = 3, upscale_factor=4, hsa = False, pixel_shuffle = True, window_size = 5): super(RSDNV4, self).__init__() if hsa: raise NotImplementedError("") else: self.hsa = Identi() self.window_size = window_size self.blocknums1 = blocknums1 self.blocknums2 = blocknums2 # 每个LR搞三个尺度 self.feature_encoder_carb = M.Sequential( M.Conv2d(in_channels, ch, kernel_size=3, stride=1, padding=1), M.LeakyReLU(negative_slope=0.05), CARBBlocks(channel_num=ch, block_num=self.blocknums1) ) self.fea_L1_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L1_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.fea_L2_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L2_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.lrelu = M.LeakyReLU(negative_slope=0.05) self.AU0 = AU(ch = ch) self.AU1 = AU(ch = ch) self.AU2 = AU(ch = ch) self.UP0 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) self.UP1 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) self.pre_SD_S = M.Sequential( Conv2d(48 + mid_channels + self.window_size*ch, mid_channels, 3, 1, 1), M.LeakyReLU() ) self.convs = M.Sequential( CARBBlocks(channel_num=mid_channels, block_num=self.blocknums2), ) self.hidden = M.Sequential( Conv2d(2*mid_channels, mid_channels, 3, 1, 1), M.LeakyReLU(), CARBBlocks(channel_num=mid_channels, block_num=3), ) self.tail = M.Sequential( CARBBlocks(channel_num=mid_channels, block_num=3), Conv2d(mid_channels, 48, 3, 1, 1) ) if pixel_shuffle: self.trans_HR = PixelShuffle(upscale_factor) else: self.trans_HR = ConvTranspose2d(48, 3, 4, 4, 0, bias=True)
def __init__(self, in_channels=3, out_channels=3, mid_channels = 128, hidden_channels = 3 * 4 * 4, ch = 24, blocknums = 5, upscale_factor=4, hsa = False, pixel_shuffle = False, window_size = 5): super(RSDNV2, self).__init__() if hsa: raise NotImplementedError("") else: self.hsa = Identi() self.window_size = window_size self.blocknums = blocknums self.hidden_channels = hidden_channels # 每个LR搞三个尺度(同时适用于S和D) self.feature_encoder_carb = M.Sequential( M.Conv2d(in_channels, ch, kernel_size=3, stride=1, padding=1), M.LeakyReLU(negative_slope=0.05), CARBBlocks(channel_num=ch, block_num=blocknums) ) self.fea_L1_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L1_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.fea_L2_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L2_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.lrelu = M.LeakyReLU(negative_slope=0.05) self.AU0 = AU(ch = ch) self.AU1 = AU(ch = ch) self.AU2 = AU(ch = ch) self.UP0 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) self.UP1 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) SDBlocks = [] for _ in range(blocknums): SDBlocks.append(SDBlock(mid_channels)) self.SDBlocks = M.Sequential(*SDBlocks) self.pre_SD_S = M.Sequential( Conv2d(hidden_channels*2 + self.window_size*2*ch, mid_channels, 3, 1, 1), M.LeakyReLU() ) self.pre_SD_D = M.Sequential( Conv2d(hidden_channels*2 + self.window_size*2*ch, mid_channels, 3, 1, 1), M.LeakyReLU() ) self.conv_SD = M.Sequential( Conv2d(mid_channels, hidden_channels, 3, 1, 1), M.LeakyReLU() ) self.convS = Conv2d(mid_channels, hidden_channels, 3, 1, 1) self.convD = Conv2d(mid_channels, hidden_channels, 3, 1, 1) self.convHR = Conv2d(2 * hidden_channels, hidden_channels, 3, 1, 1) if pixel_shuffle: self.trans_S = PixelShuffle(upscale_factor) self.trans_D = PixelShuffle(upscale_factor) self.trans_HR = PixelShuffle(upscale_factor) else: self.trans_S = ConvTranspose2d(hidden_channels, 3, 4, 4, 0, bias=False) self.trans_D = ConvTranspose2d(hidden_channels, 3, 4, 4, 0, bias=False) self.trans_HR = ConvTranspose2d(hidden_channels, 3, 4, 4, 0, bias=False)