Beispiel #1
0
    def __init__(self,
                 generator,
                 adversary,
                 encoder,
                 x_dim,
                 z_dim,
                 y_dim,
                 optim=None,
                 optim_kwargs=None,
                 lambda_KL=10,
                 lambda_x=10,
                 adv_type="Discriminator",
                 feature_layer=None,
                 fixed_noise_size=32,
                 device=None,
                 ngpu=0,
                 folder="./veganModels/cVAEGAN",
                 secure=True):

        super().__init__(generator=generator,
                         adversary=adversary,
                         encoder=encoder,
                         x_dim=x_dim,
                         z_dim=z_dim,
                         y_dim=y_dim,
                         optim=optim,
                         optim_kwargs=optim_kwargs,
                         adv_type=adv_type,
                         feature_layer=feature_layer,
                         fixed_noise_size=fixed_noise_size,
                         device=device,
                         folder=folder,
                         ngpu=ngpu,
                         secure=secure)
        self.mu = nn.Sequential(
            nn.Flatten(),
            nn.Linear(np.prod(self.encoder.output_size), np.prod(z_dim)),
            LayerReshape(shape=z_dim)).to(self.device)
        self.log_variance = nn.Sequential(
            nn.Flatten(),
            nn.Linear(np.prod(self.encoder.output_size), np.prod(z_dim)),
            LayerReshape(shape=z_dim)).to(self.device)

        self.lambda_KL = lambda_KL
        self.lambda_x = lambda_x
        self.hyperparameters["lambda_KL"] = lambda_KL
        self.hyperparameters["lambda_x"] = lambda_x
        self.hyperparameters["adv_type"] = adv_type

        if self.secure:
            # TODO: Remove those lines or use them again, but not commented
            # if self.encoder.output_size == self.z_dim:
            #     raise ValueError(
            #         "Encoder output size is equal to z_dim, but for VAE algorithms the encoder last layers for mu and sigma " +
            #         "are constructed by the algorithm itself.\nSpecify up to the second last layer for this particular encoder."
            #     )
            assert (self.generator.output_size == self.x_dim), (
                "Decoder output shape must be equal to x_dim. {} vs. {}.".
                format(self.generator.output_size, self.x_dim))
Beispiel #2
0
    def __init__(self,
                 encoder,
                 decoder,
                 x_dim,
                 z_dim,
                 optim=None,
                 optim_kwargs=None,
                 lambda_KL=10,
                 fixed_noise_size=32,
                 device=None,
                 ngpu=0,
                 folder="./veganModels/VanillaVAE",
                 secure=True):

        self.decoder = Decoder(decoder,
                               input_size=z_dim,
                               device=device,
                               ngpu=ngpu,
                               secure=secure)
        self.encoder = Encoder(encoder,
                               input_size=x_dim,
                               device=device,
                               ngpu=ngpu,
                               secure=secure)
        self.autoencoder = Autoencoder(self.encoder, self.decoder)
        self.neural_nets = {"Autoencoder": self.autoencoder}

        super().__init__(x_dim=x_dim,
                         z_dim=z_dim,
                         optim=optim,
                         optim_kwargs=optim_kwargs,
                         feature_layer=None,
                         fixed_noise_size=fixed_noise_size,
                         device=device,
                         folder=folder,
                         ngpu=ngpu,
                         secure=secure)
        self.mu = nn.Sequential(
            nn.Flatten(),
            nn.Linear(np.prod(self.encoder.output_size), np.prod(z_dim)),
            LayerReshape(shape=z_dim)).to(self.device)
        self.log_variance = nn.Sequential(
            nn.Flatten(),
            nn.Linear(np.prod(self.encoder.output_size), np.prod(z_dim)),
            LayerReshape(shape=z_dim)).to(self.device)

        self.lambda_KL = lambda_KL
        self.hyperparameters["lambda_KL"] = lambda_KL

        if self.secure:
            # if self.encoder.output_size == self.z_dim:
            #     raise ValueError(
            #         "Encoder output size is equal to z_dim, but for VAE algorithms the encoder last layers for mu and sigma " +
            #         "are constructed by the algorithm itself.\nSpecify up to the second last layer for this particular encoder."
            #     )
            assert (self.decoder.output_size == self.x_dim), (
                "Decoder output shape must be equal to x_dim. {} vs. {}.".
                format(self.decoder.output_size, self.x_dim))
Beispiel #3
0
    def __init__(self,
                 generator,
                 adversary,
                 encoder,
                 x_dim,
                 z_dim,
                 y_dim,
                 optim=None,
                 optim_kwargs=None,
                 lambda_KL=10,
                 lambda_x=10,
                 lambda_z=10,
                 adv_type="Discriminator",
                 feature_layer=None,
                 fixed_noise_size=32,
                 device=None,
                 ngpu=0,
                 folder="./veganModels/cBicycleGAN",
                 secure=True):

        super().__init__(generator=generator,
                         adversary=adversary,
                         encoder=encoder,
                         x_dim=x_dim,
                         z_dim=z_dim,
                         y_dim=y_dim,
                         optim=optim,
                         optim_kwargs=optim_kwargs,
                         adv_type=adv_type,
                         feature_layer=feature_layer,
                         fixed_noise_size=fixed_noise_size,
                         device=device,
                         folder=folder,
                         ngpu=ngpu,
                         secure=secure)
        self.mu = nn.Sequential(
            nn.Flatten(),
            nn.Linear(np.prod(self.encoder.output_size), np.prod(z_dim)),
            LayerReshape(z_dim)).to(self.device)
        self.log_variance = nn.Sequential(
            nn.Flatten(),
            nn.Linear(np.prod(self.encoder.output_size), np.prod(z_dim)),
            LayerReshape(z_dim)).to(self.device)

        self.lambda_KL = lambda_KL
        self.lambda_x = lambda_x
        self.lambda_z = lambda_z
        self.hyperparameters["lambda_KL"] = lambda_KL
        self.hyperparameters["lambda_x"] = lambda_x
        self.hyperparameters["lambda_z"] = lambda_z

        if self.secure:
            assert (self.generator.output_size == self.x_dim), (
                "Generator output shape must be equal to x_dim. {} vs. {}.".
                format(self.generator.output_size, self.x_dim))
Beispiel #4
0
    def __init__(
            self,
            generator,
            adversary,
            encoder,
            x_dim,
            z_dim,
            optim=None,
            optim_kwargs=None,
            lambda_KL=10,
            lambda_x=10,
            lambda_z=10,
            adv_type="Discriminator",
            feature_layer=None,
            fixed_noise_size=32,
            device=None,
            ngpu=0,
            folder="./veganModels/BicycleGAN",
            secure=True):

        super().__init__(
            generator=generator, adversary=adversary, encoder=encoder,
            x_dim=x_dim, z_dim=z_dim, optim=optim, optim_kwargs=optim_kwargs, adv_type=adv_type, feature_layer=feature_layer,
            fixed_noise_size=fixed_noise_size, device=device, ngpu=ngpu, folder=folder, secure=secure
        )
        self.mu = nn.Sequential(
            nn.Flatten(),
            nn.Linear(np.prod(self.encoder.output_size), np.prod(z_dim)),
            LayerReshape(shape=z_dim)
        ).to(self.device)
        self.log_variance = nn.Sequential(
            nn.Flatten(),
            nn.Linear(np.prod(self.encoder.output_size), np.prod(z_dim)),
            LayerReshape(shape=z_dim)
        ).to(self.device)

        self.lambda_KL = lambda_KL
        self.lambda_x = lambda_x
        self.lambda_z = lambda_z
        self.hyperparameters["lambda_KL"] = lambda_KL
        self.hyperparameters["lambda_x"] = lambda_x
        self.hyperparameters["lambda_z"] = lambda_z

        if self.secure:
            assert self.encoder.output_size != self.z_dim, (
                "Encoder output size is equal to z_dim, but for VAE algorithms the encoder last layers for mu and sigma " +
                "are constructed by the algorithm itself.\nSpecify up to the second last layer for this particular encoder."
            )
Beispiel #5
0
 def __init__(self, x_dim, dec_in_dim):
     super().__init__()
     self.hidden_part = nn.Sequential(
         nn.Linear(in_features=np.prod(dec_in_dim),
                   out_features=np.prod([1, 8, 8])),
         LayerReshape(shape=[1, 8, 8]),
         nn.ConvTranspose2d(in_channels=1,
                            out_channels=64,
                            kernel_size=4,
                            stride=2,
                            padding=1),
         nn.BatchNorm2d(num_features=64),
         nn.LeakyReLU(0.2),
         nn.ConvTranspose2d(in_channels=64,
                            out_channels=32,
                            kernel_size=4,
                            stride=2,
                            padding=1),
         nn.BatchNorm2d(num_features=32),
         nn.LeakyReLU(0.2),
         nn.ConvTranspose2d(in_channels=32,
                            out_channels=x_dim[0],
                            kernel_size=3,
                            stride=1,
                            padding=1),
     )
     self.output = nn.Sigmoid()
Beispiel #6
0
    def __init__(self, x_dim, dec_in_dim):
        super().__init__()
        self.hidden_part = nn.Sequential(
            nn.Flatten(),
            nn.Linear(in_features=np.prod(dec_in_dim), out_features=np.prod([1, 8, 8])),
            LayerReshape(shape=[1, 8, 8]),
        )
        desired_output = x_dim[1]
        current_output = 8
        in_channels = 1
        i = 2

        while current_output != desired_output:
            out_channels = in_channels * 2
            current_output *= 2
            if current_output != desired_output:
                self.hidden_part.add_module("ConvTraspose{}".format(i), nn.ConvTranspose2d(
                    in_channels=in_channels, out_channels=out_channels, kernel_size=4, stride=2, padding=1
                    )
                )
                self.hidden_part.add_module("Batchnorm{}".format(i), nn.BatchNorm2d(num_features=out_channels))
                self.hidden_part.add_module("LeakyRelu{}".format(i), nn.LeakyReLU(0.1))

            else: # Last layer
                self.hidden_part.add_module("ConvTraspose{}".format(i), nn.ConvTranspose2d(
                    in_channels=in_channels, out_channels=3, kernel_size=4, stride=2, padding=1
                    )
                )
            in_channels = in_channels * 2
            i += 1
        self.output = nn.Sigmoid()
Beispiel #7
0
 def __init__(self, in_dim, last_layer, out_dim):
     super().__init__()
     in_dim = np.prod(in_dim)
     out_dim = [out_dim] if isinstance(out_dim, int) else out_dim
     self.hidden_part = torch.nn.Sequential(
         torch.nn.Flatten(), torch.nn.Linear(in_dim, 16),
         torch.nn.LeakyReLU(0.2), torch.nn.Linear(16, np.prod(out_dim)),
         LayerReshape(out_dim))
     self.output = last_layer()
Beispiel #8
0
 def __init__(self, z_dim):
     super().__init__()
     self.hidden_part = nn.Sequential(
         nn.Linear(z_dim, 128), nn.LeakyReLU(0.2), nn.Linear(128, 256),
         nn.LeakyReLU(0.2), nn.BatchNorm1d(256), nn.Linear(256, 512),
         nn.LeakyReLU(0.2), nn.BatchNorm1d(512), nn.Linear(512, 1024),
         nn.LeakyReLU(0.2), nn.BatchNorm1d(1024),
         nn.Linear(1024, int(np.prod(im_dim))), LayerReshape(im_dim))
     self.output = nn.Sigmoid()
Beispiel #9
0
 def __init__(self, x_dim, dec_in_dim):
     super().__init__()
     self.hidden_part = nn.Sequential(nn.Flatten(),
                                      nn.Linear(np.prod(dec_in_dim),
                                                256), nn.LeakyReLU(0.2),
                                      nn.Linear(256, 128),
                                      nn.LeakyReLU(0.2),
                                      nn.Linear(128, np.prod(x_dim)),
                                      LayerReshape(x_dim))
     self.output = nn.Identity()
Beispiel #10
0
 def __init__(self, enc_in_dim, z_dim, first_layer):
     super().__init__()
     self.hidden_part = nn.Sequential(first_layer, nn.Flatten(),
                                      nn.Linear(np.prod(enc_in_dim),
                                                256), nn.LeakyReLU(0.2),
                                      nn.Linear(256, 128),
                                      nn.LeakyReLU(0.2),
                                      nn.Linear(128, np.prod(z_dim)),
                                      LayerReshape(z_dim))
     self.output = nn.Identity()
Beispiel #11
0
    def __init__(self, x_dim, gen_in_dim):
        super().__init__()
        if len(gen_in_dim) == 1:
            out_shape = (128, 8, 8)
            self.linear_part = nn.Sequential(
                nn.Linear(in_features=gen_in_dim[0], out_features=1024),
                nn.LeakyReLU(0.1),
                nn.Linear(in_features=1024, out_features=np.prod(out_shape)),
                nn.LeakyReLU(0.1),
                LayerReshape(shape=out_shape)
            )
            gen_in_dim = out_shape
        else:
            self.linear_part = nn.Identity()

        self.hidden_part = nn.Sequential(
            nn.ConvTranspose2d(in_channels=gen_in_dim[0], out_channels=128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(num_features=128),
            nn.LeakyReLU(0.1),
            nn.ConvTranspose2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(num_features=128),
            nn.LeakyReLU(0.1),
        )

        desired_output = x_dim[1]
        current_output = gen_in_dim[1]
        in_channels = 128
        i = 3

        while current_output != desired_output:
            out_channels = in_channels // 2
            current_output *= 2
            if current_output != desired_output:
                self.hidden_part.add_module("ConvTraspose{}".format(i), nn.ConvTranspose2d(
                    in_channels=in_channels, out_channels=out_channels, kernel_size=4, stride=2, padding=1
                    )
                )
                self.hidden_part.add_module("Batchnorm{}".format(i), nn.BatchNorm2d(num_features=out_channels))
                self.hidden_part.add_module("LeakyRelu{}".format(i), nn.LeakyReLU(0.1))

            else: # Last layer
                self.hidden_part.add_module("ConvTraspose{}".format(i), nn.ConvTranspose2d(
                    in_channels=in_channels, out_channels=3, kernel_size=4, stride=2, padding=1
                    )
                )
            in_channels = in_channels // 2
            i += 1
        self.output = nn.Sigmoid()
Beispiel #12
0
 def __init__(self, enc_in_dim, z_dim):
     super().__init__()
     self.hidden_part = nn.Sequential(
         nn.Conv2d(in_channels=enc_in_dim[0], out_channels=32, kernel_size=3, stride=1, padding=1),
         nn.ReLU(),
         nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
         nn.ReLU(),
         nn.MaxPool2d(kernel_size=5, stride=2, padding=2),
         nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
         nn.ReLU(),
         nn.MaxPool2d(kernel_size=5, stride=2, padding=2),
         nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
         nn.ReLU(),
         nn.MaxPool2d(kernel_size=5, stride=2, padding=2),
         nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
         nn.Flatten(),
     )
     sample_input = torch.rand([2, *enc_in_dim])
     flattened_nodes = tuple(self.hidden_part(sample_input).shape)[1]
     self.linear = nn.Linear(in_features=flattened_nodes, out_features=np.prod(z_dim))
     self.reshape = LayerReshape(shape=z_dim)
     self.output = nn.Identity()
Beispiel #13
0
    def __init__(self,
                 generator,
                 adversary,
                 encoder,
                 x_dim,
                 z_dim,
                 y_dim,
                 c_dim_discrete,
                 c_dim_continuous,
                 optim=None,
                 optim_kwargs=None,
                 lambda_z=10,
                 feature_layer=None,
                 fixed_noise_size=32,
                 device=None,
                 ngpu=0,
                 folder="./veganModels/cInfoGAN",
                 secure=True):

        c_dim_discrete = [c_dim_discrete] if isinstance(
            c_dim_discrete, int) else c_dim_discrete
        assert c_dim_discrete == [0] or 0 not in c_dim_discrete, (
            "`c_dim_discrete` has multiple elements. Zero not allowed. Given: {}."
            .format(c_dim_discrete))
        assert isinstance(
            c_dim_continuous,
            int), ("`c_dim_continuous` must be of type int. Given: {}.".format(
                type(c_dim_continuous)))
        self.c_dim_discrete = tuple([i for i in list(c_dim_discrete)])
        self.c_dim_continuous = tuple([c_dim_continuous])
        self.c_dim = tuple(
            [sum(self.c_dim_discrete) + sum(self.c_dim_continuous)])
        if len(y_dim) == 3:
            intermediate_in_dim = get_input_dim(dim1=z_dim, dim2=y_dim)
            gen_in_dim = get_input_dim(dim1=intermediate_in_dim,
                                       dim2=self.c_dim)
        else:
            gen_in_dim = get_input_dim(dim1=z_dim,
                                       dim2=sum(self.c_dim) + sum(y_dim))
        adv_in_dim = get_input_dim(dim1=x_dim, dim2=y_dim)

        if secure:
            AbstractConditionalGenerativeModel._check_conditional_network_input(
                generator, in_dim=z_dim, y_dim=self.c_dim, name="Generator")
        self.generator = Generator(generator,
                                   input_size=gen_in_dim,
                                   device=device,
                                   ngpu=ngpu,
                                   secure=secure)
        self.adversary = Adversary(adversary,
                                   input_size=adv_in_dim,
                                   adv_type="Discriminator",
                                   device=device,
                                   ngpu=ngpu,
                                   secure=secure)
        self.encoder = Encoder(encoder,
                               input_size=adv_in_dim,
                               device=device,
                               ngpu=ngpu,
                               secure=secure)
        self.neural_nets = {
            "Generator": self.generator,
            "Adversary": self.adversary,
            "Encoder": self.encoder
        }

        super().__init__(x_dim=x_dim,
                         z_dim=z_dim,
                         y_dim=y_dim,
                         optim=optim,
                         optim_kwargs=optim_kwargs,
                         feature_layer=feature_layer,
                         fixed_noise_size=fixed_noise_size,
                         device=device,
                         folder=folder,
                         ngpu=ngpu,
                         secure=secure)
        if self.c_dim_discrete != (0, ):
            self.multinomial = nn.Sequential(
                nn.Flatten(),
                nn.Linear(np.prod(self.encoder.output_size),
                          np.sum(self.c_dim_discrete)),
                nn.Sigmoid()).to(self.device)

        if self.c_dim_continuous != (0, ):
            self.mu = nn.Sequential(
                nn.Flatten(),
                nn.Linear(np.prod(self.encoder.output_size),
                          np.sum(self.c_dim_continuous)),
                LayerReshape(self.c_dim_continuous)).to(self.device)
            self.log_variance = nn.Sequential(
                nn.Flatten(),
                nn.Linear(np.prod(self.encoder.output_size),
                          np.sum(self.c_dim_continuous)), nn.ReLU(),
                LayerReshape(self.c_dim_continuous)).to(self.device)

        self.lambda_z = lambda_z
        self.hyperparameters["lambda_z"] = lambda_z
        if self.secure:
            assert (self.generator.output_size == self.x_dim), (
                "Generator output shape must be equal to x_dim. {} vs. {}.".
                format(self.generator.output_size, self.x_dim))
Beispiel #14
0
    def __init__(self, x_dim, gen_in_dim):
        super().__init__()

        if len(gen_in_dim) == 1:
            self.prepare = nn.Sequential(
                nn.Linear(in_features=gen_in_dim[0], out_features=256),
                LayerReshape(shape=[1, 16, 16]))
            nr_channels = 1
        else:
            current_dim = z_dim[1]
            nr_channels = gen_in_dim[0]
            self.prepare = []
            while current_dim < 16:
                self.prepare.append(
                    nn.ConvTranspose2d(in_channels=nr_channels,
                                       out_channels=5,
                                       kernel_size=4,
                                       stride=2,
                                       padding=1))
                nr_channels = 5
                current_dim *= 2
            self.prepare = nn.Sequential(*self.prepare)

        self.encoding = nn.Sequential(
            nn.Conv2d(in_channels=nr_channels,
                      out_channels=64,
                      kernel_size=5,
                      stride=2,
                      padding=2),
            nn.BatchNorm2d(num_features=64),
            nn.LeakyReLU(0.2),
            nn.Conv2d(in_channels=64,
                      out_channels=128,
                      kernel_size=5,
                      stride=2,
                      padding=2),
            nn.BatchNorm2d(num_features=128),
            nn.LeakyReLU(0.2),
            nn.Conv2d(in_channels=128,
                      out_channels=256,
                      kernel_size=3,
                      stride=1,
                      padding=1),
            nn.BatchNorm2d(num_features=256),
            nn.LeakyReLU(0.2),
        )
        self.decoding = nn.Sequential(
            nn.ConvTranspose2d(in_channels=256,
                               out_channels=128,
                               kernel_size=4,
                               stride=2,
                               padding=1),
            nn.BatchNorm2d(num_features=128),
            nn.LeakyReLU(0.2),
            nn.ConvTranspose2d(in_channels=128,
                               out_channels=64,
                               kernel_size=4,
                               stride=2,
                               padding=1),
            nn.BatchNorm2d(num_features=64),
            nn.LeakyReLU(0.2),
            nn.ConvTranspose2d(in_channels=64,
                               out_channels=32,
                               kernel_size=4,
                               stride=2,
                               padding=1),
            nn.BatchNorm2d(num_features=32),
            nn.LeakyReLU(0.2),
            nn.ConvTranspose2d(in_channels=32,
                               out_channels=x_dim[0],
                               kernel_size=3,
                               stride=1,
                               padding=1),
        )
        self.output = nn.Sigmoid()
Beispiel #15
0
    im_dim = X_train.shape[1:]

    generator = nn.Sequential(
        nn.Linear(z_dim, 128),
        nn.LeakyReLU(0.2),
        nn.Linear(128, 256),
        nn.LeakyReLU(0.2),
        nn.BatchNorm1d(256),
        nn.Linear(256, 512),
        nn.LeakyReLU(0.2),
        nn.BatchNorm1d(512),
        nn.Linear(512, 1024),
        nn.LeakyReLU(0.2),
        nn.BatchNorm1d(1024),
        nn.Linear(1024, int(np.prod(im_dim))),
        LayerReshape(im_dim),
        nn.Sigmoid()
    )
    adversariat = nn.Sequential(
        nn.Flatten(),
        nn.Linear(int(np.prod(im_dim)), 512),
        nn.LeakyReLU(0.2),
        nn.Linear(512, 256),
        nn.LeakyReLU(0.2),
        nn.Linear(256, 1),
        nn.Sigmoid()
    )
    call_gan_training(generator, adversariat)

    #########################################################################
    # Flat network: OO
Beispiel #16
0
    call_gan_training(generator, adversary)

    #########################################################################
    # Flat network: Sequential
    #########################################################################
    z_dim = 2
    x_dim = X_train.shape[1:]

    generator = nn.Sequential(nn.Linear(z_dim, 128), nn.LeakyReLU(0.2),
                              nn.Linear(128, 256), nn.LeakyReLU(0.2),
                              nn.BatchNorm1d(256), nn.Linear(256, 512),
                              nn.LeakyReLU(0.2), nn.BatchNorm1d(512),
                              nn.Linear(512, 1024), nn.LeakyReLU(0.2),
                              nn.BatchNorm1d(1024),
                              nn.Linear(1024, int(np.prod(x_dim))),
                              LayerReshape(x_dim), nn.Sigmoid())
    adversary = nn.Sequential(nn.Flatten(), nn.Linear(int(np.prod(x_dim)),
                                                      512), nn.LeakyReLU(0.2),
                              nn.Linear(512, 256), nn.LeakyReLU(0.2),
                              nn.Linear(256, 1), nn.Sigmoid())
    call_gan_training(generator, adversary)

    #########################################################################
    # Flat network: OO
    #########################################################################
    z_dim = 128

    class MyGenerator(nn.Module):
        def __init__(self, z_dim):
            super().__init__()
            self.hidden_part = nn.Sequential(