コード例 #1
0
    def __init__(self,
                 num_channels=3,
                 image_size=64,
                 num_classes=2,
                 num_fc_layers=7,
                 fc_layer_size=1000):
        super().__init__(256,
                         num_classes=num_classes,
                         num_layers=num_fc_layers,
                         layer_size=fc_layer_size)
        assert image_size == 64, 'The SimpleDiscriminatorConv64 architecture is hardcoded for 64x64 images.'

        self.conv_encode = nn.Sequential(
            nn.Conv2d(num_channels, 32, 4, 2, 1),
            nn.ReLU(True),
            nn.Conv2d(32, 32, 4, 2, 1),
            nn.ReLU(True),
            nn.Conv2d(32, 64, 4, 2, 1),
            nn.ReLU(True),
            nn.Conv2d(64, 128, 4, 2, 1),
            nn.ReLU(True),
            nn.Conv2d(128, 256, 4, 2, 1),
            nn.ReLU(True),
            nn.Conv2d(256, 256, 4, 2, 1),
            nn.ReLU(True),
            self.Flatten(),
        )

        init_layers(self._modules)
コード例 #2
0
    def __init__(self, input_dim, image_size):
        super().__init__()

        self.main = nn.ModuleList()

        for i in range(input_dim):
            self.main.append(SingleTo2DChannel(image_size))

        init_layers(self._modules)
コード例 #3
0
    def __init__(self, latent_dim, num_channels, image_size):
        super().__init__(latent_dim, num_channels, image_size)

        self.main = nn.Sequential(
            nn.Linear(latent_dim, 400), nn.ReLU(),
            nn.Linear(400, image_size * image_size * num_channels),
            Reshape([num_channels, image_size, image_size]))

        init_layers(self._modules)
コード例 #4
0
    def __init__(self, latent_dim, num_channels, image_size):
        super().__init__(latent_dim, num_channels, image_size)

        self.main = nn.Sequential(
            Flatten3D(),
            nn.Linear(image_size * image_size * num_channels, 400),
            nn.ReLU())

        self.head_mu = nn.Linear(400, latent_dim)
        self.head_logvar = nn.Linear(400, latent_dim)

        init_layers(self._modules)
コード例 #5
0
    def __init__(self, image_size):
        super().__init__()

        self.main = nn.Sequential(
            nn.Linear(1, 64),
            nn.LeakyReLU(0.2, True),
            nn.Linear(64, 256),
            nn.LeakyReLU(0.2, True),
            nn.Linear(256, image_size * image_size),
            Reshape([1, image_size, image_size])
        )

        init_layers(self._modules)
コード例 #6
0
    def __init__(self, latent_dim, num_channels, image_size):
        super().__init__(latent_dim, num_channels, image_size)
        assert image_size == 64, 'This model only works with image size 64x64.'

        self.main = nn.Sequential(nn.Conv2d(num_channels, 32, 3, 2, 0),
                                  nn.ReLU(True), nn.Conv2d(32, 32, 3, 2, 0),
                                  nn.ReLU(True), nn.Conv2d(32, 64, 3, 2, 0),
                                  nn.ReLU(True), nn.Conv2d(64, 128, 3, 2, 0),
                                  nn.ReLU(True), nn.Conv2d(128, 256, 3, 2, 0),
                                  nn.ReLU(True), Flatten3D(),
                                  nn.Linear(256, latent_dim, bias=True))

        init_layers(self._modules)
コード例 #7
0
    def __init__(self, input_dim, num_classes, num_layers=7, layer_size=1000):
        super().__init__()

        self.main = nn.Sequential(
            nn.Linear(input_dim, layer_size),
            nn.LeakyReLU(0.2, True),
        )

        for i in range(num_layers - 2):
            self.main.add_module(module=nn.Linear(layer_size, layer_size),
                                 name='linear' + str(i))
            self.main.add_module(module=nn.LeakyReLU(0.2, True),
                                 name='lrelu' + str(i))

        self.main.add_module(module=nn.Linear(layer_size, num_classes),
                             name='output')

        init_layers(self._modules)
コード例 #8
0
    def __init__(self, latent_dim, num_channels, image_size):
        super().__init__(latent_dim, num_channels, image_size)
        assert image_size == 64, 'This model only works with image size 64x64.'

        self.main = nn.Sequential(Unsqueeze3D(),
                                  nn.Conv2d(latent_dim, 256, 1, 2),
                                  nn.ReLU(True),
                                  nn.ConvTranspose2d(256, 256, 4, 2, 1),
                                  nn.ReLU(True),
                                  nn.ConvTranspose2d(256, 128, 4, 2),
                                  nn.ReLU(True),
                                  nn.ConvTranspose2d(128, 128, 4, 2),
                                  nn.ReLU(True),
                                  nn.ConvTranspose2d(128, 64, 4, 2),
                                  nn.ReLU(True),
                                  nn.ConvTranspose2d(64, 64, 4, 2),
                                  nn.ReLU(True),
                                  nn.ConvTranspose2d(64, num_channels, 3, 1))
        # output shape = bs x 3 x 64 x 64

        init_layers(self._modules)