def __init__(self, skeletoner, num_classes=68, heatmap_size=64):
        super().__init__()
        self.num_classes = num_classes
        self.heatmap_size = heatmap_size
        self.model = hg2(num_classes=self.num_classes, num_blocks=1)

        NormClass = nn.BatchNorm2d

        self.hm_to_coord = nn.Sequential(
            nn.Conv2d(num_classes, num_classes, 4, 2,
                      1), NormClass(num_classes),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(num_classes, num_classes, 4, 2,
                      1), NormClass(num_classes),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(num_classes, num_classes, 4, 2,
                      1), NormClass(num_classes),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(num_classes, num_classes, 4, 2, 1),
            NormClass(num_classes), nn.LeakyReLU(0.2, inplace=True),
            View(num_classes * 4 * 4),
            nn.Linear(num_classes * 4 * 4, num_classes * 2), nn.Sigmoid(),
            View(num_classes, 2))

        self.skeletoner = skeletoner
Ejemplo n.º 2
0
def unet2_256():

    return UNet2(
        down_blocks=[
            nn.Sequential(Conv2dBlock(3, 64, 1),
                          Conv2dBlock(64, 64, 4, downsample=True)),  # 128
            Conv2dBlock(64, 128, 4, downsample=True),  # 64
            Conv2dBlock(128, 256, 4, downsample=True),  # 32
            Conv2dBlock(256, 256, 4, downsample=True),  # 16
            Conv2dBlock(256, 512, 4, downsample=True),  # 8
            Conv2dBlock(512, 512, 4, downsample=True),  # 4
            nn.Sequential(View(-1), LinearBlock(512 * 4 * 4, 512),
                          LinearBlock(512, 512))
        ],
        up_blocks=[
            nn.Sequential(Conv2dBlock(67, 64, 3),
                          Conv2dBlock(64, 1, 3, activate=None)),
            TransposeConv2dBlock(128, 64, 4),
            TransposeConv2dBlock(256, 64, 4),
            TransposeConv2dBlock(512, 128, 4),
            TransposeConv2dBlock(512, 256, 4),
            TransposeConv2dBlock(512 + 256, 256, 4),
            TransposeConv2dBlock(512 + 256, 256, 4),
            nn.Sequential(LinearBlock(512, 256 * 4 * 4), View(256, 4, 4))
        ])
Ejemplo n.º 3
0
def unet2_200():

    return UNet2(
        down_blocks=[
            nn.Sequential(Conv2dBlock(1, 64, 1),
                          Conv2dBlock(64, 64, 4, downsample=True)),  # 100
            Conv2dBlock(64, 128, 4, downsample=True),  # 50
            Conv2dBlock(128, 256, 4, downsample=True),  # 25
            Conv2dBlock(256, 256, 4, downsample=True),  # 12
            Conv2dBlock(256, 512, 4, downsample=True),  # 6
            Conv2dBlock(512, 512, 4, downsample=True),  # 3
            nn.Sequential(View(-1), LinearBlock(512 * 3 * 3, 512),
                          LinearBlock(512, 512))
        ],
        up_blocks=[
            nn.Sequential(Conv2dBlock(65, 64, 3),
                          Conv2dBlock(64, 1, 3, activate=None)),
            TransposeConv2dBlock(128, 64, 4),
            TransposeConv2dBlock(256, 64, 4),
            nn.Sequential(TransposeConv2dBlock(512, 128, 4),
                          nn.ReplicationPad2d(1)),
            TransposeConv2dBlock(512, 256, 4),
            TransposeConv2dBlock(512 + 256, 256, 4),
            TransposeConv2dBlock(512 + 256, 256, 4),
            nn.Sequential(LinearBlock(512, 256 * 3 * 3), View(256, 3, 3))
        ])
Ejemplo n.º 4
0
    def __init__(self):
        super().__init__()

        self.down = nn.ModuleList([
            nn.Sequential(ConvLayer(3, 64, 1), ConvLayer(64, 64, 3, downsample=True)),
            ConvLayer(64, 128, 3, downsample=True),
            ConvLayer(128, 256, 3, downsample=True),
            ConvLayer(256, 256, 3, downsample=True),
            ConvLayer(256, 512, 3, downsample=True),
            ConvLayer(512, 512, 3, downsample=True),
            nn.Sequential(View(-1), EqualLinear(512 * 4 * 4, 512, activation='fused_lrelu')),
        ])

        self.middle = nn.ModuleList([
            ConvLayer(256, 128, 3),
            ConvLayer(256, 128, 3),
            ConvLayer(512, 256, 3),
            ConvLayer(512, 256, 3),
            nn.Sequential(EqualLinear(512, 256 * 4 * 4, activation='fused_lrelu'), View(256, 4, 4)),
        ])

        self.up = nn.ModuleList([
            nn.Sequential(ScaledConvTranspose2d(256, 68, 3), ConvLayer(68, 68, 1, activate=False), nn.Softplus()),
            ScaledConvTranspose2d(256, 128, 3),
            ScaledConvTranspose2d(512, 128, 3),
            ScaledConvTranspose2d(512, 256, 3),
        ])
Ejemplo n.º 5
0
    def __init__(self,
                 heatmapper,
                 num_classes=68,
                 heatmap_size=64,
                 image_size=256,
                 num_blocks=1):
        super().__init__()
        self.num_classes = num_classes
        self.heatmap_size = heatmap_size
        self.model = hg2(num_classes=self.num_classes, num_blocks=num_blocks)

        NormClass = nn.BatchNorm2d

        self.hm_to_coord = []

        num_convs = int(math.log(image_size // 4, 2)) - 2
        for _ in range(num_convs):
            self.hm_to_coord += [
                nn.Conv2d(num_classes, num_classes, 4, 2, 1),
                NormClass(num_classes),
                nn.LeakyReLU(0.2, inplace=True),
            ]

        self.hm_to_coord += [
            View(num_classes * 4 * 4),
            nn.Linear(num_classes * 4 * 4, num_classes * 2),
            nn.Sigmoid(),
            View(num_classes, 2)
        ]

        self.hm_to_coord = nn.Sequential(*self.hm_to_coord)

        self.heatmapper = heatmapper
        self.up = nn.Upsample(size=image_size)
Ejemplo n.º 6
0
    def __init__(self, channels=3):
        super(ConvICNN128, self).__init__()

        self.first_linear = nn.Sequential(
            EqualConv2d(channels, 64, kernel_size=3, padding=1, bias=True), )

        self.first_squared = nn.Sequential(
            EqualConv2d(channels, 64, kernel_size=3, padding=1, bias=True), )

        self.convex = nn.Sequential(
            nn.LeakyReLU(0.2),
            PosConv2d(64, 128, kernel_size=3, stride=2, bias=True, padding=1),
            nn.LeakyReLU(0.2),
            PosConv2d(128, 128, kernel_size=3, stride=2, bias=True, padding=1),
            nn.LeakyReLU(0.2),
            PosConv2d(128, 128, kernel_size=3, stride=2, bias=True, padding=1),
            nn.LeakyReLU(0.2),
            PosConv2d(128, 128, kernel_size=3, stride=2, bias=True, padding=1),
            nn.LeakyReLU(0.2),
            PosConv2d(128, 128, kernel_size=3, stride=2, bias=True, padding=1),
            nn.LeakyReLU(0.2),
            PosConv2d(128, 128, kernel_size=3, stride=2, bias=True, padding=1),
            nn.LeakyReLU(0.2), View(-1, 128 * 4 * 4), nn.LeakyReLU(0.2),
            PosLinear(128 * 4 * 4, 128, activation=False), nn.LeakyReLU(0.2),
            PosLinear(128, 1, activation=False)).cuda()
Ejemplo n.º 7
0
    def __init__(self, image_size: int, nc: int = 3, ndf: int = 64):
        super(PosDiscriminator, self).__init__()

        layers = [
            EqualConv2d(nc, ndf, 3, 1, 1),
            nn.LeakyReLU(0.2, inplace=True),
        ]

        tmp_size = image_size
        tmp_nc = ndf
        nc_next = -1
        while tmp_size > 4:
            tmp_size = tmp_size // 2
            nc_next = min(256, tmp_nc * 2)
            layers += [
                PosDownSampleBlock(tmp_nc, nc_next),
                nn.LeakyReLU(0.2, inplace=True),
            ]
            tmp_nc = nc_next

        layers += [
            View(-1),
            PosLinear(nc_next * 4 * 4, nc_next, activation=False),
            nn.LeakyReLU(0.2, inplace=True),
            PosLinear(nc_next, 1, activation=False)
        ]

        self.main = nn.Sequential(*layers)
Ejemplo n.º 8
0
    def __init__(self, noise_size: int, image_size: int, ngf=32):
        super(ResDCGenerator, self).__init__()
        n_up = int(math.log2(image_size / 4))
        assert 4 * (2**n_up) == image_size
        nc = 3

        layers = [
            nn.Linear(noise_size, noise_size, bias=False),
            View(-1, noise_size, 1, 1),
            nn.ConvTranspose2d(noise_size, ngf * 8, 4, 1, 0, bias=False),
            nn.InstanceNorm2d(ngf * 8),
            nn.ReLU(True),
        ]

        nc_l_next = -1
        for l in range(n_up):

            nc_l = max(ngf, (ngf * 8) // 2**l)
            nc_l_next = max(ngf, nc_l // 2)

            layers += [
                Up2xResidualBlock(nc_l,
                                  nc_l_next,
                                  PaddingType.REFLECT,
                                  nn.InstanceNorm2d,
                                  use_spectral_norm=False)
            ]

            if l == 2:
                layers += [SelfAttention2d(nc_l_next)]

        layers += [nn.Conv2d(nc_l_next, nc, 3, 1, 1, bias=False)]

        self.main = nn.Sequential(*layers)
Ejemplo n.º 9
0
def unet4_256():

    return UNet4(
        down_blocks=[
            nn.Sequential(Conv2dBlock(3, 64, 1),
                          Conv2dBlock(64, 64, 4, downsample=True)),  # 128
            # Conv2dBlock(64, 64, 4, downsample=True),  # 64
            Conv2dBlock(64, 128, 4, downsample=True),  # 64
            Conv2dBlock(128, 256, 4, downsample=True),  # 32
            Conv2dBlock(256, 256, 4, downsample=True),  # 16
            Conv2dBlock(256, 512, 4, downsample=True),  # 8
            Conv2dBlock(512, 512, 4, downsample=True),  # 4
            nn.Sequential(View(-1), LinearBlock(512 * 4 * 4, 512))
        ],
        middle_block=[
            nn.Sequential(Conv2dBlock(3, 64, 1), Conv2dBlock(64, 64, 3)),
            Conv2dBlock(64, 64, 3),
            # Conv2dBlock(64, 64, 3),
            Conv2dBlock(128, 128, 3),
            Conv2dBlock(256, 256, 3),
            Conv2dBlock(256, 256, 3),
            Conv2dBlock(512, 512, 3),
            Conv2dBlock(512, 512, 3),
            LinearBlock(512, 512)
        ],
        up_blocks=[
            Conv2dBlock(128, 64, 3),
            # TransposeConv2dBlock(128, 64, 4),
            TransposeConv2dBlock(128, 64, 4),
            TransposeConv2dBlock(256, 64, 4),
            TransposeConv2dBlock(512, 128, 4),
            TransposeConv2dBlock(512, 256, 4),
            TransposeConv2dBlock(512 + 256, 256, 4),
            TransposeConv2dBlock(512 + 256, 256, 4),
            nn.Sequential(LinearBlock(512, 256 * 4 * 4), View(256, 4, 4))
        ],
        final_blocks=[
            SkipPlus(Conv2dBlock(64, 1, 3, activate=None), upsample=None),
            SkipPlus(Conv2dBlock(64, 1, 3, activate=None)),
            # SkipPlus(Conv2dBlock(64, 1, 3)),
            SkipPlus(Conv2dBlock(64, 1, 3)),
            SkipPlus(Conv2dBlock(128, 1, 3)),
            SkipPlus(Conv2dBlock(256, 1, 3)),
            SkipPlus(Conv2dBlock(256, 1, 3)),
            SkipPlus(Conv2dBlock(256, 1, 3)),
            Conv2dBlock(256, 1, 3),
        ])
Ejemplo n.º 10
0
    def __init__(self, style_dim, count):
        super(StyleEncoder, self).__init__()
        self.model = [
            EqualConv2d(3, 16, 7, 1, 3),
            nn.LeakyReLU(0.2, inplace=True),
            EqualConv2d(16, 32, 4, 2, 1),
            nn.LeakyReLU(0.2, inplace=True),
            EqualConv2d(32, 64, 4, 2, 1),
            nn.LeakyReLU(0.2, inplace=True),
            EqualConv2d(64, 128, 4, 2, 1),
            nn.LeakyReLU(0.2, inplace=True),
            EqualConv2d(128, 256, 4, 2, 1),
            nn.LeakyReLU(0.2, inplace=True),
            EqualConv2d(256, 256, 4, 2, 1),
            nn.LeakyReLU(0.2, inplace=True),
            EqualConv2d(256, 256, 4, 2, 1),
            nn.LeakyReLU(0.2, inplace=True),
            View(-1),
            EqualLinear(256 * 4 * 4, style_dim * 2, activation="fused_lrelu"),
            EqualLinear(style_dim * 2, style_dim * count),
            View(count, style_dim)
        ]

        self.model = nn.Sequential(*self.model)
Ejemplo n.º 11
0
    def __init__(self, num_layers: int, nc: int, nc_min: int):
        super().__init__()

        self.nc = nc

        self.upsamples = nn.ModuleList()
        self.upsamples.append(
            nn.Sequential(EqualLinear(nc, nc * 4 * 2),
                          nn.LeakyReLU(0.2, inplace=True), View(nc // 2, 4,
                                                                4)))

        tmp_channel = nc // 2

        for i in range(num_layers - 1):
            nc_next = max(nc_min, tmp_channel // 2)
            self.upsamples.append(
                ScaledConvTranspose2d(tmp_channel, nc_next, 3))
            tmp_channel = nc_next
Ejemplo n.º 12
0
 def __init__(self, channel1, channel2, size2):
     super().__init__()
     modules = [
         EqualLinear(channel1, channel1 * 4 * 2),
         nn.LeakyReLU(0.2, inplace=True),
         View(channel1 // 2, 4, 4)
     ]
     tmp_size = 4
     tmp_channel = channel1 // 2
     min_nc = 16
     while tmp_size < size2:
         nc_next = max(min_nc, tmp_channel // 2)
         modules.append(ScaledConvTranspose2d(tmp_channel, nc_next, 3))
         tmp_channel = nc_next
         tmp_size *= 2
     assert (tmp_size == size2)
     nc_next = max(min_nc, tmp_channel // 2)
     modules.append(ConvLayer(nc_next, channel2, 1))
     self.main = nn.Sequential(*modules)
Ejemplo n.º 13
0
    def __init__(self, noise_size: int, image_size: int, ngf=64):
        super(DCGenerator, self).__init__()
        n_up = int(math.log2(image_size / 4))
        assert 4 * (2**n_up) == image_size
        nc = 3

        layers = [
            nn.Linear(noise_size, noise_size),
            nn.LeakyReLU(0.2, True),
            nn.Linear(noise_size, noise_size),
            nn.LeakyReLU(0.2, True),
            View(-1, noise_size, 1, 1),
            nn.ConvTranspose2d(noise_size, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.LeakyReLU(0.2, True),
        ]

        nc_l_next = -1
        for l in range(n_up):

            nc_l = max(ngf, (ngf * 8) // 2**l)
            nc_l_next = max(ngf, nc_l // 2)

            layers += [
                nn.ConvTranspose2d(nc_l,
                                   nc_l_next,
                                   4,
                                   stride=2,
                                   padding=1,
                                   bias=False),
                nn.BatchNorm2d(nc_l_next),
                nn.LeakyReLU(0.2, True),
            ]

            # if l == 2:
            #     layers += [SelfAttention2d(nc_l_next)]

        layers += [nn.Conv2d(nc_l_next, nc, 3, 1, 1, bias=False), nn.Tanh()]

        self.main = nn.Sequential(*layers)
Ejemplo n.º 14
0
    f"cuda:{args.cuda}" if torch.cuda.is_available() else "cpu")
torch.cuda.set_device(device)
W300Landmarks.batch_size = batch_size

starting_model_number = 90000
N = args.data_size
weights = torch.load(
    f'{Paths.default.models()}/lmgen_{N}_{str(starting_model_number).zfill(6)}.pt',
    map_location="cpu")

heatmapper = ToGaussHeatMap(256, 4)
hg = nn.Sequential(EqualLinear(100, 256, activation='fused_lrelu'),
                   EqualLinear(256, 256, activation='fused_lrelu'),
                   EqualLinear(256, 256, activation='fused_lrelu'),
                   EqualLinear(256, 256, activation='fused_lrelu'),
                   EqualLinear(256, 136), nn.Sigmoid(), View(68, 2))
hg.load_state_dict(weights['gh'])
hg = hg.cuda()
hm_discriminator = Discriminator(image_size, input_nc=1, channel_multiplier=1)
hm_discriminator.load_state_dict(weights["dh"])
hm_discriminator = hm_discriminator.cuda()

gan_model = StyleGanModel[nn.Module](hg, StyleGANLoss(hm_discriminator),
                                     (0.001, 0.0015))

writer = SummaryWriter(f"{Paths.default.board()}/lmgen{int(time.time())}")
WR.writer = writer

test_noise = torch.randn(batch_size, 100, device=device)

hm_accumulator = Accumulator(hg, decay=0.98, write_every=100)