def __init__(self, in_channel, out_channel, kernel_size=3, padding=1, style_dim=512, initial=False, upsample=False, fused=False): super(StyledConvBlock, self).__init__() if initial: self.conv1 = ConstantInput(in_channel) else: if upsample: if fused: self.conv1 = nn.Sequential( #FusedUpsample(in_channel, out_channel, kernel_size, padding=padding) Blur(out_channel) nn.Upsample(scale_factor=2, mode='nearest'), nn.Conv2d(in_channel, out_channel, kernel_size, padding=padding)# todo: equal ) else: self.conv1 = nn.Sequential( nn.Upsample(scale_factor=2, mode='nearest'), nn.Conv2d(in_channel, out_channel, kernel_size, padding=padding)# todo: equal Blur(out_channel) ) self.noise1 = NoiseInjection(out_channel) self.adain1 = AdaptiveInstanceNorm(out_channel, style_dim) self.lrelu1 = nn.LeakyReLU(0.2) self.conv2 = nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding) self.noise2 = NoiseInjection(out_channel) self.adain2 = AdaptiveInstanceNorm(out_channel, style_dim) self.lrelu2 = nn.LeakyReLU(0.2)
def __init__(self): super(Generator, self).__init__() self.init_size = (opt.img_size // 4) self.l1 = nn.Sequential( nn.Linear(opt.latent_dim, (128 * (self.init_size**2)))) self.conv_blocks = nn.Sequential( nn.BatchNorm(128), nn.Upsample(scale_factor=2), nn.Conv(128, 128, 3, stride=1, padding=1), nn.BatchNorm(128, eps=0.8), nn.LeakyReLU(scale=0.2), nn.Upsample(scale_factor=2), nn.Conv(128, 64, 3, stride=1, padding=1), nn.BatchNorm(64, eps=0.8), nn.LeakyReLU(scale=0.2), nn.Conv(64, opt.channels, 3, stride=1, padding=1), nn.Tanh()) for m in self.conv_blocks: weights_init_normal(m)
def __init__(self, in_ch=3, n_classes=2): super(NestedUNet, self).__init__() n1 = 64 filters = [n1, (n1 * 2), (n1 * 4), (n1 * 8), (n1 * 16)] self.pool = nn.Pool(2, stride=2, op='maximum') self.Up = nn.Upsample(scale_factor=2, mode='bilinear') self.conv0_0 = DoubleConv(in_ch, filters[0], filters[0]) self.conv1_0 = DoubleConv(filters[0], filters[1], filters[1]) self.conv2_0 = DoubleConv(filters[1], filters[2], filters[2]) self.conv3_0 = DoubleConv(filters[2], filters[3], filters[3]) self.conv4_0 = DoubleConv(filters[3], filters[4], filters[4]) self.conv0_1 = DoubleConv((filters[0] + filters[1]), filters[0], filters[0]) self.conv1_1 = DoubleConv((filters[1] + filters[2]), filters[1], filters[1]) self.conv2_1 = DoubleConv((filters[2] + filters[3]), filters[2], filters[2]) self.conv3_1 = DoubleConv((filters[3] + filters[4]), filters[3], filters[3]) self.conv0_2 = DoubleConv(((filters[0] * 2) + filters[1]), filters[0], filters[0]) self.conv1_2 = DoubleConv(((filters[1] * 2) + filters[2]), filters[1], filters[1]) self.conv2_2 = DoubleConv(((filters[2] * 2) + filters[3]), filters[2], filters[2]) self.conv0_3 = DoubleConv(((filters[0] * 3) + filters[1]), filters[0], filters[0]) self.conv1_3 = DoubleConv(((filters[1] * 3) + filters[2]), filters[1], filters[1]) self.conv0_4 = DoubleConv(((filters[0] * 4) + filters[1]), filters[0], filters[0]) self.final = nn.Conv(filters[0], n_classes, 1)
def __init__(self, in_channels, out_channels, bilinear=True): super().__init__() if bilinear: self.up = nn.Upsample(scale_factor=2, mode='bilinear') self.conv = DoubleConv(in_channels * 2, out_channels, out_channels) else: self.up = nn.ConvTranspose(in_channels, in_channels, 2, stride=2) self.conv = DoubleConv(in_channels * 2, out_channels, out_channels)
def __init__(self, channel=32, pretrained_backbone=False): super(PraNet, self).__init__() self.resnet = res2net50_26w_4s(pretrained=pretrained_backbone) self.rfb2_1 = RFB_modified(512, channel) self.rfb3_1 = RFB_modified(1024, channel) self.rfb4_1 = RFB_modified(2048, channel) self.agg1 = aggregation(channel) self.ra4_conv1 = BasicConv2d(2048, 256, kernel_size=1) self.ra4_conv2 = BasicConv2d(256, 256, kernel_size=5, padding=2) self.ra4_conv3 = BasicConv2d(256, 256, kernel_size=5, padding=2) self.ra4_conv4 = BasicConv2d(256, 256, kernel_size=5, padding=2) self.ra4_conv5 = BasicConv2d(256, 1, kernel_size=1) self.ra3_conv1 = BasicConv2d(1024, 64, kernel_size=1) self.ra3_conv2 = BasicConv2d(64, 64, kernel_size=3, padding=1) self.ra3_conv3 = BasicConv2d(64, 64, kernel_size=3, padding=1) self.ra3_conv4 = BasicConv2d(64, 1, kernel_size=3, padding=1) self.ra2_conv1 = BasicConv2d(512, 64, kernel_size=1) self.ra2_conv2 = BasicConv2d(64, 64, kernel_size=3, padding=1) self.ra2_conv3 = BasicConv2d(64, 64, kernel_size=3, padding=1) self.ra2_conv4 = BasicConv2d(64, 1, kernel_size=3, padding=1) self.upsample1_1 = nn.Upsample(scale_factor=8, mode='bilinear') self.upsample1_2 = nn.Upsample(scale_factor=0.25, mode='bilinear') self.upsample2_1 = nn.Upsample(scale_factor=32, mode='bilinear') self.upsample2_2 = nn.Upsample(scale_factor=2, mode='bilinear') self.upsample3_1 = nn.Upsample(scale_factor=16, mode='bilinear') self.upsample3_2 = nn.Upsample(scale_factor=2, mode='bilinear') self.upsample4 = nn.Upsample(scale_factor=8, mode='bilinear')
def _make_fuse_layers(self): if self.num_branches == 1: return None num_branches = self.num_branches num_inchannels = self.num_inchannels fuse_layers = [] for i in range(num_branches if self.multi_scale_output else 1): fuse_layer = [] for j in range(num_branches): if j > i: fuse_layer.append( nn.Sequential( nn.Conv(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False), BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM), nn.Upsample(scale_factor=2**(j - i), mode='nearest'))) elif j == i: fuse_layer.append(None) else: conv3x3s = [] for k in range(i - j): if k == i - j - 1: num_outchannels_conv3x3 = num_inchannels[i] conv3x3s.append( nn.Sequential( nn.Conv(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM))) else: num_outchannels_conv3x3 = num_inchannels[j] conv3x3s.append( nn.Sequential( nn.Conv(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM), nn.ReLU(False))) fuse_layer.append(nn.Sequential(*conv3x3s)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers)
def __init__(self): super(CoupledGenerators, self).__init__() self.init_size = (opt.img_size // 4) self.fc = nn.Sequential(nn.Linear(opt.latent_dim, (128 * (self.init_size ** 2)))) self.shared_conv = nn.Sequential(nn.BatchNorm(128), nn.Upsample(scale_factor=2), nn.Conv(128, 128, 3, stride=1, padding=1), nn.BatchNorm(128, eps=0.8), nn.LeakyReLU(0.2), nn.Upsample(scale_factor=2)) self.G1 = nn.Sequential(nn.Conv(128, 64, 3, stride=1, padding=1), nn.BatchNorm(64, eps=0.8), nn.LeakyReLU(0.2), nn.Conv(64, opt.channels, 3, stride=1, padding=1), nn.Tanh()) self.G2 = nn.Sequential(nn.Conv(128, 64, 3, stride=1, padding=1), nn.BatchNorm(64, eps=0.8), nn.LeakyReLU(0.2), nn.Conv(64, opt.channels, 3, stride=1, padding=1), nn.Tanh()) for m in self.modules(): weights_init_normal(m)
def __init__(self): super(Discriminator, self).__init__() self.down = nn.Sequential(nn.Conv(opt.channels, 64, 3, stride=2, padding=1), nn.ReLU()) self.down_size = (opt.img_size // 2) down_dim = (64 * ((opt.img_size // 2) ** 2)) self.embedding = nn.Linear(down_dim, 32) self.fc = nn.Sequential( nn.BatchNorm1d(32, 0.8), nn.ReLU(), nn.Linear(32, down_dim), nn.BatchNorm1d(down_dim), nn.ReLU() ) self.up = nn.Sequential(nn.Upsample(scale_factor=2), nn.Conv(64, opt.channels, 3, stride=1, padding=1))
def __init__(self, channel): super(aggregation, self).__init__() self.upsample = nn.Upsample(scale_factor=2, mode='bilinear') self.conv_upsample1 = BasicConv2d(channel, channel, 3, padding=1) self.conv_upsample2 = BasicConv2d(channel, channel, 3, padding=1) self.conv_upsample3 = BasicConv2d(channel, channel, 3, padding=1) self.conv_upsample4 = BasicConv2d(channel, channel, 3, padding=1) self.conv_upsample5 = BasicConv2d((2 * channel), (2 * channel), 3, padding=1) self.conv_concat2 = BasicConv2d((2 * channel), (2 * channel), 3, padding=1) self.conv_concat3 = BasicConv2d((3 * channel), (3 * channel), 3, padding=1) self.conv4 = BasicConv2d((3 * channel), (3 * channel), 3, padding=1) self.conv5 = nn.Conv((3 * channel), 1, 1)
def __init__(self, latent_dim, img_shape): super(Generator, self).__init__() (channels, self.h, self.w) = img_shape self.fc = nn.Linear(latent_dim, (self.h * self.w)) self.down1 = UNetDown((channels + 1), 64, normalize=False) self.down2 = UNetDown(64, 128) self.down3 = UNetDown(128, 256) self.down4 = UNetDown(256, 512) self.down5 = UNetDown(512, 512) self.down6 = UNetDown(512, 512) self.down7 = UNetDown(512, 512, normalize=False) self.up1 = UNetUp(512, 512) self.up2 = UNetUp(1024, 512) self.up3 = UNetUp(1024, 512) self.up4 = UNetUp(1024, 256) self.up5 = UNetUp(512, 128) self.up6 = UNetUp(256, 64) self.final = nn.Sequential(nn.Upsample(scale_factor=2), nn.Conv(128, channels, 3, stride=1, padding=1), nn.Tanh()) for m in self.modules(): weights_init_normal(m)
def __init__(self, input_shape, num_residual_blocks): super(GeneratorResNet, self).__init__() channels = input_shape[0] out_features = 64 model = [ nn.ReflectionPad2d(channels), nn.Conv(channels, out_features, 7), nn.InstanceNorm2d(out_features, affine=None), nn.ReLU() ] in_features = out_features for _ in range(2): out_features *= 2 model += [ nn.Conv(in_features, out_features, 3, stride=2, padding=1), nn.InstanceNorm2d(out_features, affine=None), nn.ReLU() ] in_features = out_features for _ in range(num_residual_blocks): model += [ResidualBlock(out_features)] for _ in range(2): out_features //= 2 model += [ nn.Upsample(scale_factor=2), nn.Conv(in_features, out_features, 3, stride=1, padding=1), nn.InstanceNorm2d(out_features, affine=None), nn.ReLU() ] in_features = out_features model += [ nn.ReflectionPad2d(channels), nn.Conv(out_features, channels, 7), nn.Tanh() ] self.model = nn.Sequential(*model) for m in self.modules(): weights_init_normal(m)
def __init__(self): super(Discriminator, self).__init__() self.down = nn.Sequential( nn.Conv(opt.channels, 64, 3, 2, 1), nn.Relu() ) self.down_size = (opt.img_size // 2) down_dim = (64 * ((opt.img_size // 2) ** 2)) self.fc = nn.Sequential( nn.Linear(down_dim, 32), nn.BatchNorm1d(32, 0.8), nn.Relu(), nn.Linear(32, down_dim), nn.BatchNorm1d(down_dim), nn.Relu() ) self.up = nn.Sequential( nn.Upsample(scale_factor=2), nn.Conv(64, opt.channels, 3, 1, 1) ) for m in self.modules(): weights_init_normal(m)
def __init__(self, in_channels=3, out_channels=3): super(GeneratorUNet, self).__init__() self.down1 = UNetDown(in_channels, 64, normalize=False) self.down2 = UNetDown(64, 128) self.down3 = UNetDown(128, 256) self.down4 = UNetDown(256, 512, dropout=0.5) self.down5 = UNetDown(512, 512, dropout=0.5) self.down6 = UNetDown(512, 512, dropout=0.5) self.down7 = UNetDown(512, 512, dropout=0.5) self.down8 = UNetDown(512, 512, normalize=False, dropout=0.5) self.up1 = UNetUp(512, 512, dropout=0.5) self.up2 = UNetUp(1024, 512, dropout=0.5) self.up3 = UNetUp(1024, 512, dropout=0.5) self.up4 = UNetUp(1024, 512, dropout=0.5) self.up5 = UNetUp(1024, 256) self.up6 = UNetUp(512, 128) self.up7 = UNetUp(256, 64) self.final = nn.Sequential(nn.Upsample(scale_factor=2), nn.ZeroPad2d((1, 0, 1, 0)), nn.Conv(128, out_channels, 4, padding=1), nn.Tanh()) for m in self.modules(): weights_init_normal(m)
def test_upsample(self): arr = np.random.randn(16, 10, 224, 224) check_equal(arr, jnn.Upsample(scale_factor=2), tnn.Upsample(scale_factor=2)) check_equal(arr, jnn.Upsample(scale_factor=0.2), tnn.Upsample(scale_factor=0.2))
def __init__(self, in_size, out_size): super(UNetUp, self).__init__() self.model = nn.Sequential(nn.Upsample(scale_factor=2), nn.Conv(in_size, out_size, 3, stride=1, padding=1, bias=False), nn.BatchNorm(out_size, 0.8), nn.Relu())
def __init__(self, cin, cout, zdim=128, nf=64, activation=nn.Tanh): super(EDDeconv, self).__init__() network = [ nn.Conv(cin, nf, 4, stride=2, padding=1, bias=False), nn.GroupNorm(16, nf), nn.LeakyReLU(scale=0.2), nn.Conv(nf, (nf * 2), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 2), (nf * 2)), nn.LeakyReLU(scale=0.2), nn.Conv((nf * 2), (nf * 4), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 4), (nf * 4)), nn.LeakyReLU(scale=0.2), nn.Conv((nf * 4), (nf * 8), 4, stride=2, padding=1, bias=False), nn.LeakyReLU(scale=0.2), nn.Conv((nf * 8), zdim, 4, stride=1, padding=0, bias=False), nn.ReLU() ] network += [ nn.ConvTranspose(zdim, (nf * 8), 4, stride=1, padding=0, bias=False), nn.ReLU(), nn.Conv((nf * 8), (nf * 8), 3, stride=1, padding=1, bias=False), nn.ReLU(), nn.ConvTranspose((nf * 8), (nf * 4), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 4), (nf * 4)), nn.ReLU(), nn.Conv((nf * 4), (nf * 4), 3, stride=1, padding=1, bias=False), nn.GroupNorm((16 * 4), (nf * 4)), nn.ReLU(), nn.ConvTranspose((nf * 4), (nf * 2), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 2), (nf * 2)), nn.ReLU(), nn.Conv((nf * 2), (nf * 2), 3, stride=1, padding=1, bias=False), nn.GroupNorm((16 * 2), (nf * 2)), nn.ReLU(), nn.ConvTranspose((nf * 2), nf, 4, stride=2, padding=1, bias=False), nn.GroupNorm(16, nf), nn.ReLU(), nn.Conv(nf, nf, 3, stride=1, padding=1, bias=False), nn.GroupNorm(16, nf), nn.ReLU(), nn.Upsample(scale_factor=2, mode='nearest'), nn.Conv(nf, nf, 3, stride=1, padding=1, bias=False), nn.GroupNorm(16, nf), nn.ReLU(), nn.Conv(nf, nf, 5, stride=1, padding=2, bias=False), nn.GroupNorm(16, nf), nn.ReLU(), nn.Conv(nf, cout, 5, stride=1, padding=2, bias=False) ] if (activation is not None): network += [activation()] self.network = nn.Sequential(*network)