コード例 #1
0
    def __init__(self, in_size, zsize=32, use_res=False, use_bn=False, depth=0):
        super().__init__()

        self.zsize = zsize

        #- Decoder
        upmode = 'bilinear'
        modules = [
            Linear(zsize, (in_size[0] // (p*q*r)) * (in_size[1] // (p*q*r)) * c), ReLU(),
            util.Reshape((c, in_size[0] // (p*q*r), in_size[1] // (p*q*r)))
        ]

        for _ in range(depth):
            modules.append( util.Block(c, c, deconv=True, use_res=use_res, batch_norm=use_bn) )


        modules.extend([
            Upsample(scale_factor=r, mode=upmode),
            util.Block(c, c, deconv=True, use_res=use_res, batch_norm=use_bn),
            Upsample(scale_factor=q, mode=upmode),
            util.Block(c, b, deconv=True, use_res=use_res, batch_norm=use_bn),
            Upsample(scale_factor=p, mode=upmode),
            util.Block(b, a, deconv=True, use_res=use_res, batch_norm=use_bn),
            ConvTranspose2d(a, 3, kernel_size=1, padding=0),
            Sigmoid()
        ])

        self.decoder = Sequential(*modules)
コード例 #2
0
    def __init__(self,
                 out_size,
                 channels,
                 zchannels,
                 zs=256,
                 k=3,
                 mapping=3,
                 batch_norm=False,
                 dropouts=None):
        super().__init__()

        self.out_size = out_size

        c, h, w = self.out_size
        self.channels = channels
        c1, c2, c3, c4, c5 = self.channels
        z0, z1, z2, z3, z4, z5 = zchannels

        # resnet blocks
        self.block5 = util.Block(c5, c4, kernel_size=k, batch_norm=batch_norm)
        self.block4 = util.Block(c4, c3, kernel_size=k, batch_norm=batch_norm)
        self.block3 = util.Block(c3, c2, kernel_size=k, batch_norm=batch_norm)
        self.block2 = util.Block(c2, c1, kernel_size=k, batch_norm=batch_norm)
        self.block1 = util.Block(c1, c, kernel_size=k, batch_norm=batch_norm)

        # affine mappings from latent space sample
        self.affine5 = nn.Linear(zs, 2 * util.prod((c5, h // 32, w // 32)))
        self.affine4 = nn.Linear(zs, 2 * util.prod((c4, h // 16, w // 16)))
        self.affine3 = nn.Linear(zs, 2 * util.prod((c3, h // 8, w // 8)))
        self.affine2 = nn.Linear(zs, 2 * util.prod((c2, h // 4, w // 4)))
        self.affine1 = nn.Linear(zs, 2 * util.prod((c1, h // 2, w // 2)))
        self.affine0 = nn.Linear(zs, 2 * util.prod(out_size))

        # 1x1 convolution from "noise space" sample
        self.tonoise5 = nn.Conv2d(z5, c5, kernel_size=1, padding=0)
        self.tonoise4 = nn.Conv2d(z4, c4, kernel_size=1, padding=0)
        self.tonoise3 = nn.Conv2d(z3, c3, kernel_size=1, padding=0)
        self.tonoise2 = nn.Conv2d(z2, c2, kernel_size=1, padding=0)
        self.tonoise1 = nn.Conv2d(z1, c1, kernel_size=1, padding=0)
        self.tonoise0 = nn.Conv2d(z0, c, kernel_size=1, padding=0)

        self.conv0 = nn.Conv2d(c, c, kernel_size=1)

        m = []
        for _ in range(mapping):
            m.append(nn.Linear(zs, zs))
            m.append(nn.ReLU())
        self.mapping = nn.Sequential(*m)

        self.dropouts = dropouts

        # constant, learnable input
        self.x5 = nn.Parameter(torch.randn(1, c5, h // 32, w // 32))
        self.x4 = nn.Parameter(torch.randn(1, c4, h // 16, w // 16))
        self.x3 = nn.Parameter(torch.randn(1, c3, h // 8, w // 8))
        self.x2 = nn.Parameter(torch.randn(1, c2, h // 4, w // 4))
        self.x1 = nn.Parameter(torch.randn(1, c1, h // 2, w // 2))
コード例 #3
0
    def __init__(self, out_size, channels, zs=256, k=3, dist='gaussian', mapping=3, batch_norm=False, dropouts=None):
        super().__init__()

        self.out_size = out_size

        c, h, w = self.out_size
        self.channels = channels
        c1, c2, c3, c4, c5 = self.channels

        # resnet blocks
        self.block5 = util.Block(c5, c4, kernel_size=k, deconv=True, batch_norm=batch_norm)
        self.block4 = util.Block(c4, c3, kernel_size=k, deconv=True, batch_norm=batch_norm)
        self.block3 = util.Block(c3, c2, kernel_size=k, deconv=True, batch_norm=batch_norm)
        self.block2 = util.Block(c2, c1, kernel_size=k, deconv=True, batch_norm=batch_norm)
        self.block1 = util.Block(c1, c,  kernel_size=k, deconv=True, batch_norm=batch_norm)

        # affine mappings from latent space sample
        self.affine5 = nn.Linear(zs, 2 * util.prod((c5, h//32, w//32)))
        self.affine4 = nn.Linear(zs, 2 * util.prod((c4, h//16, w//16)))
        self.affine3 = nn.Linear(zs, 2 * util.prod((c3, h//8, w//8)))
        self.affine2 = nn.Linear(zs, 2 * util.prod((c2, h//4, w//4)))
        self.affine1 = nn.Linear(zs, 2 * util.prod((c1, h//2, w//2)))
        self.affine0 = nn.Linear(zs, 2 * util.prod(out_size))

        # 1x1 convolution from "noise space" sample
        self.tonoise5 = nn.Conv2d(1, c5, kernel_size=1, padding=0)
        self.tonoise4 = nn.Conv2d(1, c4, kernel_size=1, padding=0)
        self.tonoise3 = nn.Conv2d(1, c3, kernel_size=1, padding=0)
        self.tonoise2 = nn.Conv2d(1, c2, kernel_size=1, padding=0)
        self.tonoise1 = nn.Conv2d(1, c1, kernel_size=1, padding=0)
        self.tonoise0 = nn.Conv2d(1, c,  kernel_size=1, padding=0)

        # mapping to distribution on image space
        if dist in ['gaussian','beta']:
            self.conv0 = nn.Conv2d(c, c*2, kernel_size=1)
        elif dist == 'bernoulli': # binary xent loss
            self.conv0 = nn.Conv2d(c, c, kernel_size=1)
        else:
            raise Exception('Output distribution {} not recognized'.format(dist))

        m = []
        for _ in range(mapping):
            m.append(nn.Linear(zs, zs))
            m.append(nn.ReLU())
        self.mapping = nn.Sequential(*m)

        self.dropouts = dropouts

        # constant, learnable input
        self.x5 = nn.Parameter(torch.randn(c5, h//32, w//32))
コード例 #4
0
    def __init__(self,
                 in_size,
                 channels,
                 zchannels,
                 zs=256,
                 k=3,
                 unmapping=3,
                 batch_norm=False):
        super().__init__()

        c, h, w = in_size
        c1, c2, c3, c4, c5 = channels
        z0, z1, z2, z3, z4, z5 = zchannels

        # resnet blocks
        self.block1 = util.Block(c, c1, kernel_size=k, batch_norm=batch_norm)
        self.block2 = util.Block(c1, c2, kernel_size=k, batch_norm=batch_norm)
        self.block3 = util.Block(c2, c3, kernel_size=k, batch_norm=batch_norm)
        self.block4 = util.Block(c3, c4, kernel_size=k, batch_norm=batch_norm)
        self.block5 = util.Block(c4, c5, kernel_size=k, batch_norm=batch_norm)

        # affine mappings to distribution on latent space
        self.affine0 = nn.Linear(util.prod(in_size), 2 * zs)
        self.affine1 = nn.Linear(util.prod((c1, h // 2, w // 2)), 2 * zs)
        self.affine2 = nn.Linear(util.prod((c2, h // 4, w // 4)), 2 * zs)
        self.affine3 = nn.Linear(util.prod((c3, h // 8, w // 8)), 2 * zs)
        self.affine4 = nn.Linear(util.prod((c4, h // 16, w // 16)), 2 * zs)
        self.affine5 = nn.Linear(util.prod((c5, h // 32, w // 32)), 2 * zs)

        self.affinez = nn.Linear(12 * zs, 2 * zs)

        # 1x1 convolution to distribution on "noise space"
        # (mean and sigma)
        self.tonoise0 = nn.Conv2d(c, z0 * 2, kernel_size=1, padding=0)
        self.tonoise1 = nn.Conv2d(c1, z1 * 2, kernel_size=1, padding=0)
        self.tonoise2 = nn.Conv2d(c2, z2 * 2, kernel_size=1, padding=0)
        self.tonoise3 = nn.Conv2d(c3, z3 * 2, kernel_size=1, padding=0)
        self.tonoise4 = nn.Conv2d(c4, z4 * 2, kernel_size=1, padding=0)
        self.tonoise5 = nn.Conv2d(c5, z5 * 2, kernel_size=1, padding=0)

        um = []
        for _ in range(unmapping):
            um.append(nn.ReLU())
            um.append(nn.Linear(zs * 2, zs * 2))
        self.unmapping = nn.Sequential(*um)
コード例 #5
0
    def __init__(self, in_size, zsize=32, use_res=False, use_bn=False, depth=0):
        super().__init__()
        self.zsize = zsize

        # - Encoder
        modules = [
            util.Block(3, a, use_res=use_res, batch_norm=use_bn),
            MaxPool2d((p, p)),
            util.Block(a, b, use_res=use_res, batch_norm=use_bn),
            MaxPool2d((q, q)),
            util.Block(b, c, use_res=use_res, batch_norm=use_bn),
            MaxPool2d((r, r)),
        ]

        for i in range(depth):
            modules.append( util.Block(c, c, use_res=use_res, batch_norm=use_bn))

        modules.extend([
            util.Flatten(),
            Linear((in_size[0] // (p*q*r)) * (in_size[1] //  (p*q*r)) * c, zsize * 2)
        ])

        self.encoder = Sequential(*modules)