Пример #1
0
    def __init__(self, x_dim, y_dim, z_dim, gen_architecture, disc_architecture, aux_architecture, last_layer_activation,
                 folder="./CWGAN", image_shape=None
        ):
        super(InfoGAN, self).__init__(x_dim, y_dim, z_dim, [gen_architecture, disc_architecture, aux_architecture],
                                   last_layer_activation, folder, image_shape, None)

        self._gen_architecture = self._architectures[0]
        self._disc_architecture = self._architectures[1]
        self._aux_architecture = self._architectures[2]

        ################# Define architecture
        self._gen_architecture.append([logged_dense, {"units": x_dim, "activation": tf.nn.sigmoid, "name": "Output"}])
        self._generator = ConditionalGenerator(self._gen_architecture, name="Generator")

        self._disc_architecture.append([logged_dense, {"units": 1, "activation": tf.nn.sigmoid, "name": "Output"}])
        self._disc = Discriminator(self._disc_architecture, name="Discriminator")

        self._aux_architecture.append([logged_dense, {"units": y_dim, "activation": tf.nn.softmax, "name": "Output"}])
        self._aux = Encoder(self._aux_architecture, name="Auxiliary")

        self._nets = [self._generator, self._disc, self._aux]

        ################# Connect inputs and networks
        self._output_gen = self._generator.generate_net(self._mod_Z_input)
        self._output_disc_fake = self._disc.generate_net(self._output_gen)
        self._output_disc_real = self._disc.generate_net(self._X_input)
        self._output_aux = self._aux.generate_net(self._output_gen)

        ################# Finalize
        self._init_folders()
        self._verify_init()
Пример #2
0
    def __init__(self, x_dim, y_dim, z_dim, enc_dim, gen_architecture, enc_architecture, dec_architecture,
                 last_layer_activation, folder="./CBEGAN", image_shape=None, append_y_at_every_layer=None
        ):
        super(CBEGAN, self).__init__(x_dim, y_dim, z_dim, [gen_architecture, enc_architecture, dec_architecture],
                                   last_layer_activation, folder, image_shape, append_y_at_every_layer)

        self._gen_architecture = self._architectures[0]
        self._enc_architecture = self._architectures[1]
        self._dec_architecture = self._architectures[2]

        self._enc_dim = enc_dim
        self._kt = 0
        self._gamma = 1.
        self._lambda = 0.001

        ################# Define architecture
        if len(self._x_dim) == 1:
            self._gen_architecture.append([logged_dense, {"units": x_dim, "activation": self._last_layer_activation, "name": "Output"}])
            self._dec_architecture.append([logged_dense, {"units": x_dim, "activation": self._last_layer_activation, "name": "Output"}])
        else:
            self._enc_architecture.append([tf.layers.flatten, {"name": "Flatten"}])
            self._gen_architecture[-1][1]["name"] = "Output"
        self._enc_architecture.append([logged_dense, {"units": enc_dim, "activation": tf.nn.tanh, "name": "Output"}])

        self._generator = ConditionalGenerator(self._gen_architecture, name="Generator")
        self._encoder = Encoder(self._enc_architecture, name="Encoder")
        self._decoder = Encoder(self._dec_architecture, name="Decoder")

        self._nets = [self._generator, self._encoder, self._decoder]

        ################# Connect inputs and networks
        self._output_gen = self._generator.generate_net(self._mod_Z_input,
                                                        append_elements_at_every_layer=self._append_at_every_layer,
                                                        tf_trainflag=self._is_training)

        with tf.name_scope("InputsEncoder"):
            if len(self._x_dim) == 1:
                self._input_fake = tf.concat(axis=1, values=[self._output_gen, self._Y_input], name="fake")
                self._input_real = tf.concat(axis=1, values=[self._X_input, self._Y_input], name="real")
            else:
                self._input_fake = image_condition_concat(inputs=self._output_gen, condition=self._Y_input, name="fake")
                self._input_real = image_condition_concat(inputs=self._X_input, condition=self._Y_input, name="real")

        self._output_encoder_real = self._encoder.generate_net(self._input_real, tf_trainflag=self._is_training)
        self._output_encoder_fake = self._encoder.generate_net(self._input_fake, tf_trainflag=self._is_training)

        self._output_decoder_real = self._decoder.generate_net(self._output_encoder_real, tf_trainflag=self._is_training)
        self._output_decoder_fake = self._decoder.generate_net(self._output_encoder_fake, tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()
Пример #3
0
    def __init__(self, x_dim, y_dim, z_dim, gen_architecture, critic_architecture, aux_architecture,
                 last_layer_activation, folder="./CC_CWGAN1", image_shape=None, append_y_at_every_layer=None
        ):
        super(CC_CWGAN1, self).__init__(x_dim, y_dim, z_dim, [gen_architecture, critic_architecture, aux_architecture],
                                   last_layer_activation, folder, image_shape, append_y_at_every_layer)

        self._gen_architecture = self._architectures[0]
        self._critic_architecture = self._architectures[1]
        self._aux_architecture = self._architectures[2]

        ################# Define architecture
        self._gen_architecture.append([logged_dense, {"units": x_dim, "activation": self._last_layer_activation, "name": "Output"}])
        self._generator = ConditionalGenerator(self._gen_architecture, name="Generator")

        self._critic_architecture.append([logged_dense, {"units": 1, "activation": tf.identity, "name": "Output"}])
        self._critic = Critic(self._critic_architecture, name="Critic")

        self._aux_architecture.append([logged_dense, {"units": y_dim, "activation": tf.identity, "name": "Output"}])
        self._aux = Encoder(self._aux_architecture, name="Auxiliary")

        self._nets = [self._generator, self._critic, self._aux]

        ################# Connect inputs and networks
        self._output_gen = self._generator.generate_net(self._mod_Z_input,
                                                        append_elements_at_every_layer=self._append_at_every_layer,
                                                        tf_trainflag=self._is_training)

        with tf.name_scope("InputsCritic"):
            self._input_fake = tf.concat(axis=1, values=[self._output_gen, self._Y_input], name="fake")
            self._input_real = tf.concat(axis=1, values=[self._X_input, self._Y_input], name="real")

        self._output_critic_real = self._critic.generate_net(self._input_real)
        self._output_critic_fake = self._critic.generate_net(self._input_fake)

        self._output_aux = self._aux.generate_net(self._output_gen)
        self._output_critic_real = self._critic.generate_net(self._input_real,
                                                        append_elements_at_every_layer=self._append_at_every_layer,
                                                        tf_trainflag=self._is_training)
        self._output_critic_fake = self._critic.generate_net(self._input_fake,
                                                        append_elements_at_every_layer=self._append_at_every_layer,
                                                        tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()
Пример #4
0
    def __init__(self,
                 x_dim,
                 y_dim,
                 z_dim,
                 dec_architecture,
                 enc_architecture,
                 last_layer_activation,
                 folder="./VAE",
                 image_shape=None,
                 append_y_at_every_layer=None):
        super(CVAE, self).__init__(x_dim, y_dim, z_dim,
                                   [dec_architecture, enc_architecture],
                                   last_layer_activation, folder, image_shape,
                                   append_y_at_every_layer)

        self._gen_architecture = self._architectures[0]
        self._enc_architecture = self._architectures[1]

        ################# Define architecture
        if len(self._x_dim) == 1:
            self._gen_architecture.append([
                logged_dense, {
                    "units": x_dim,
                    "activation": self._last_layer_activation,
                    "name": "Output"
                }
            ])
        else:
            self._enc_architecture.append(
                [tf.layers.flatten, {
                    "name": "Flatten"
                }])
            self._gen_architecture[-1][1]["name"] = "Output"
        last_layer_mean = [
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Mean"
            }
        ]
        last_layer_std = [
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Std"
            }
        ]

        self._encoder_mean = Encoder(self._enc_architecture +
                                     [last_layer_mean],
                                     name="Encoder")
        self._encoder_std = Encoder(self._enc_architecture + [last_layer_std],
                                    name="Encoder")
        self._generator = ConditionalDecoder(self._gen_architecture,
                                             name="Generator")

        self._nets = [self._generator, self._encoder_mean]

        ################# Connect inputs and networks
        with tf.name_scope("InputsEncoder"):
            if len(self._x_dim) == 1:
                self._mod_X_input = tf.concat(
                    axis=1, values=[self._X_input, self._Y_input], name="real")
            else:
                self._mod_X_input = image_condition_concat(
                    inputs=self._X_input, condition=self._Y_input, name="real")

        self._mean_layer = self._encoder_mean.generate_net(
            self._mod_X_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._std_layer = self._encoder_std.generate_net(
            self._mod_X_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._output_enc = self._mean_layer + tf.exp(
            0.5 * self._std_layer) * self._Z_input

        with tf.name_scope("InputsGenerator"):
            self._dec_input = tf.concat(
                axis=1,
                values=[self._output_enc, self._Y_input],
                name="latent")

        self._output_dec = self._generator.generate_net(
            self._dec_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._output_dec_from_encoding = self._generator.generate_net(
            self._mod_Z_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()
Пример #5
0
class CVAE(ConditionalGenerativeModel):
    def __init__(self,
                 x_dim,
                 y_dim,
                 z_dim,
                 dec_architecture,
                 enc_architecture,
                 last_layer_activation,
                 folder="./VAE",
                 image_shape=None,
                 append_y_at_every_layer=None):
        super(CVAE, self).__init__(x_dim, y_dim, z_dim,
                                   [dec_architecture, enc_architecture],
                                   last_layer_activation, folder, image_shape,
                                   append_y_at_every_layer)

        self._gen_architecture = self._architectures[0]
        self._enc_architecture = self._architectures[1]

        ################# Define architecture
        if len(self._x_dim) == 1:
            self._gen_architecture.append([
                logged_dense, {
                    "units": x_dim,
                    "activation": self._last_layer_activation,
                    "name": "Output"
                }
            ])
        else:
            self._enc_architecture.append(
                [tf.layers.flatten, {
                    "name": "Flatten"
                }])
            self._gen_architecture[-1][1]["name"] = "Output"
        last_layer_mean = [
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Mean"
            }
        ]
        last_layer_std = [
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Std"
            }
        ]

        self._encoder_mean = Encoder(self._enc_architecture +
                                     [last_layer_mean],
                                     name="Encoder")
        self._encoder_std = Encoder(self._enc_architecture + [last_layer_std],
                                    name="Encoder")
        self._generator = ConditionalDecoder(self._gen_architecture,
                                             name="Generator")

        self._nets = [self._generator, self._encoder_mean]

        ################# Connect inputs and networks
        with tf.name_scope("InputsEncoder"):
            if len(self._x_dim) == 1:
                self._mod_X_input = tf.concat(
                    axis=1, values=[self._X_input, self._Y_input], name="real")
            else:
                self._mod_X_input = image_condition_concat(
                    inputs=self._X_input, condition=self._Y_input, name="real")

        self._mean_layer = self._encoder_mean.generate_net(
            self._mod_X_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._std_layer = self._encoder_std.generate_net(
            self._mod_X_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._output_enc = self._mean_layer + tf.exp(
            0.5 * self._std_layer) * self._Z_input

        with tf.name_scope("InputsGenerator"):
            self._dec_input = tf.concat(
                axis=1,
                values=[self._output_enc, self._Y_input],
                name="latent")

        self._output_dec = self._generator.generate_net(
            self._dec_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._output_dec_from_encoding = self._generator.generate_net(
            self._mod_Z_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()

    def compile(self,
                logged_images=None,
                logged_labels=None,
                learning_rate=0.0001,
                optimizer=tf.train.AdamOptimizer):
        self._define_loss()
        with tf.name_scope("Optimizer"):
            vae_optimizer = optimizer(learning_rate=learning_rate)
            self._vae_optimizer = vae_optimizer.minimize(self._vae_loss,
                                                         name="VAE")
        self._summarise(logged_images=logged_images,
                        logged_labels=logged_labels)

    def _define_loss(self):
        with tf.name_scope("Loss") as scope:
            self._data_fidelity_loss = self._X_input * tf.log(
                1e-10 + self._output_dec) + (
                    1 - self._X_input) * tf.log(1e-10 + 1 - self._output_dec)
            self._data_fidelity_loss = -tf.reduce_sum(self._data_fidelity_loss,
                                                      1)

            self._KLdiv = 1 + self._std_layer - tf.square(
                self._mean_layer) - tf.exp(self._std_layer)
            self._KLdiv = -0.5 * tf.reduce_sum(self._KLdiv, 1)

            self._vae_loss = tf.reduce_mean(self._data_fidelity_loss +
                                            self._KLdiv)
            tf.summary.scalar("Loss", self._vae_loss)

    def train(self,
              x_train,
              y_train,
              x_test=None,
              y_test=None,
              epochs=100,
              batch_size=64,
              steps=5,
              log_step=3,
              gpu_options=None):
        self._set_up_training(log_step=log_step, gpu_options=gpu_options)
        self._set_up_test_train_sample(x_train, y_train, x_test, y_test)
        self._log_results(epoch=0, epoch_time=0)
        for epoch in range(epochs):
            batch_nr = 0
            loss_epoch = 0
            start = time.clock()
            trained_examples = 0
            while trained_examples < len(x_train):
                loss_batch = self._optimize(self._trainset, batch_size, steps)
                trained_examples += batch_size
                loss_epoch += loss_batch

            epoch_train_time = (time.clock() - start) / 60
            loss_epoch = np.round(loss_epoch, 2)

            print("Epoch {}: Loss: {}.".format(epoch, loss_epoch))

            if log_step is not None:
                self._log(epoch + 1, epoch_train_time)

    def _optimize(self, dataset, batch_size, steps):
        for i in range(steps):
            current_batch_x, current_batch_y = dataset.get_next_batch(
                batch_size)
            Z_noise = self.sample_noise(n=len(current_batch_x))
            _, loss_batch = self._sess.run(
                [self._vae_optimizer, self._vae_loss],
                feed_dict={
                    self._X_input: current_batch_x,
                    self._Y_input: current_batch_y,
                    self._Z_input: Z_noise,
                    self._is_training: True
                })
        return loss_batch

    def decode(self, inpt_x, is_encoded):
        if not is_encoded:
            inpt_x = self._encoder_mean.encode(noise=inpt_x, sess=self._sess)
        return self._generator.decode(inpt_x, self._sess)
Пример #6
0
    def __init__(self,
                 x_dim,
                 z_dim,
                 enc_dim,
                 gen_architecture,
                 enc_architecture,
                 dec_architecture,
                 last_layer_activation,
                 folder="./BEGAN",
                 image_shape=None):
        super(BEGAN, self).__init__(
            x_dim, z_dim,
            [gen_architecture, enc_architecture, dec_architecture],
            last_layer_activation, folder, image_shape)

        self._gen_architecture = self._architectures[0]
        self._enc_architecture = self._architectures[1]
        self._dec_architecture = self._architectures[2]

        self._enc_dim = enc_dim
        self._kt = 0
        self._gamma = 1.
        self._lambda = 0.001

        ################# Define architecture
        if len(self._x_dim) == 1:
            self._gen_architecture.append([
                logged_dense, {
                    "units": x_dim,
                    "activation": self._last_layer_activation,
                    "name": "Output"
                }
            ])
            self._dec_architecture.append([
                logged_dense, {
                    "units": x_dim,
                    "activation": self._last_layer_activation,
                    "name": "Output"
                }
            ])
        else:
            self._enc_architecture.append(
                [tf.layers.flatten, {
                    "name": "Flatten"
                }])
            self._gen_architecture[-1][1]["name"] = "Output"
        self._enc_architecture.append([
            logged_dense, {
                "units": enc_dim,
                "activation": tf.nn.tanh,
                "name": "Output"
            }
        ])

        self._generator = Generator(self._gen_architecture, name="Generator")
        self._encoder = Encoder(self._enc_architecture, name="Encoder")
        self._decoder = Encoder(self._dec_architecture, name="Decoder")

        self._nets = [self._generator, self._encoder, self._decoder]

        ################# Connect inputs and networks
        self._output_gen = self._generator.generate_net(
            self._Z_input, tf_trainflag=self._is_training)

        self._output_encoder_real = self._encoder.generate_net(
            self._X_input, tf_trainflag=self._is_training)
        self._output_encoder_fake = self._encoder.generate_net(
            self._output_gen, tf_trainflag=self._is_training)

        self._output_decoder_real = self._decoder.generate_net(
            self._output_encoder_real, tf_trainflag=self._is_training)
        self._output_decoder_fake = self._decoder.generate_net(
            self._output_encoder_fake, tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()
Пример #7
0
class BEGAN(GenerativeModel):
    def __init__(self,
                 x_dim,
                 z_dim,
                 enc_dim,
                 gen_architecture,
                 enc_architecture,
                 dec_architecture,
                 last_layer_activation,
                 folder="./BEGAN",
                 image_shape=None):
        super(BEGAN, self).__init__(
            x_dim, z_dim,
            [gen_architecture, enc_architecture, dec_architecture],
            last_layer_activation, folder, image_shape)

        self._gen_architecture = self._architectures[0]
        self._enc_architecture = self._architectures[1]
        self._dec_architecture = self._architectures[2]

        self._enc_dim = enc_dim
        self._kt = 0
        self._gamma = 1.
        self._lambda = 0.001

        ################# Define architecture
        if len(self._x_dim) == 1:
            self._gen_architecture.append([
                logged_dense, {
                    "units": x_dim,
                    "activation": self._last_layer_activation,
                    "name": "Output"
                }
            ])
            self._dec_architecture.append([
                logged_dense, {
                    "units": x_dim,
                    "activation": self._last_layer_activation,
                    "name": "Output"
                }
            ])
        else:
            self._enc_architecture.append(
                [tf.layers.flatten, {
                    "name": "Flatten"
                }])
            self._gen_architecture[-1][1]["name"] = "Output"
        self._enc_architecture.append([
            logged_dense, {
                "units": enc_dim,
                "activation": tf.nn.tanh,
                "name": "Output"
            }
        ])

        self._generator = Generator(self._gen_architecture, name="Generator")
        self._encoder = Encoder(self._enc_architecture, name="Encoder")
        self._decoder = Encoder(self._dec_architecture, name="Decoder")

        self._nets = [self._generator, self._encoder, self._decoder]

        ################# Connect inputs and networks
        self._output_gen = self._generator.generate_net(
            self._Z_input, tf_trainflag=self._is_training)

        self._output_encoder_real = self._encoder.generate_net(
            self._X_input, tf_trainflag=self._is_training)
        self._output_encoder_fake = self._encoder.generate_net(
            self._output_gen, tf_trainflag=self._is_training)

        self._output_decoder_real = self._decoder.generate_net(
            self._output_encoder_real, tf_trainflag=self._is_training)
        self._output_decoder_fake = self._decoder.generate_net(
            self._output_encoder_fake, tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()

    def compile(self,
                learning_rate_gen=0.0001,
                learning_rate_ae=0.0001,
                optimizer=tf.train.AdamOptimizer):
        self._define_loss()
        with tf.name_scope("Optimizer"):
            gen_optimizer = optimizer(learning_rate=learning_rate_gen)
            self._gen_optimizer = gen_optimizer.minimize(
                self._gen_loss,
                var_list=self._get_vars("Generator"),
                name="Generator")
            ae_optimizer = optimizer(learning_rate=learning_rate_ae)
            self._ae_optimizer = ae_optimizer.minimize(
                self._ae_loss,
                var_list=self._get_vars("Encoder") + self._get_vars("Decoder"),
                name="Autoencoder")
        self._summarise()

    def _define_loss(self):
        with tf.name_scope("Loss") as scope:
            self._loss_real = tf.reduce_mean(
                tf.abs(self._X_input - self._output_decoder_real))
            self._loss_fake = tf.reduce_mean(
                tf.abs(self._output_gen - self._output_decoder_fake))
            self._gen_loss = self._loss_fake
            tf.summary.scalar("Generator_loss", self._gen_loss)
            self._ae_loss = self._loss_real - self._kt * self._loss_fake
            tf.summary.scalar("Autoencoder_loss", self._ae_loss)
            self._global_loss = self._loss_real + tf.abs(
                self._gamma * self._loss_real - self._loss_fake)
            tf.summary.scalar("Global_loss", self._global_loss)

    def train(self,
              x_train,
              x_test=None,
              epochs=100,
              batch_size=64,
              log_step=3,
              steps=None,
              gpu_options=None):
        self._set_up_training(log_step=log_step, gpu_options=gpu_options)
        self._set_up_test_train_sample(x_train, x_test)
        self._log_results(epoch=0, epoch_time=0)
        for epoch in range(epochs):
            batch_nr = 0
            ae_loss_epoch = 0
            gen_loss_epoch = 0
            start = time.clock()
            trained_examples = 0
            while trained_examples < len(x_train):
                ae_loss_batch, gen_loss_batch, loss_real_batch, loss_fake_batch = self._optimize(
                    self._trainset, batch_size)
                trained_examples += batch_size

                ae_loss_epoch += ae_loss_batch
                gen_loss_epoch += gen_loss_batch

                self._kt = np.maximum(
                    np.minimum(
                        1., self._kt + self._lambda *
                        (self._gamma * loss_real_batch - loss_fake_batch)), 0.)

            epoch_train_time = (time.clock() - start) / 60
            ae_loss_epoch = np.round(ae_loss_epoch, 2)
            gen_loss_epoch = np.round(gen_loss_epoch, 2)

            print("Epoch {}: Autoencoder: {} \n\t\t\tGenerator: {}.".format(
                epoch + 1, ae_loss_epoch, gen_loss_epoch))

            if self._log_step is not None:
                self._log(epoch + 1, epoch_train_time)

    def _optimize(self, dataset, batch_size):
        current_batch_x = dataset.get_next_batch(batch_size)
        Z_noise = self.sample_noise(n=len(current_batch_x))
        _, ae_loss_batch, loss_real_batch, loss_fake_batch = self._sess.run(
            [
                self._ae_optimizer, self._ae_loss, self._loss_real,
                self._loss_fake
            ],
            feed_dict={
                self._X_input: current_batch_x,
                self._Z_input: Z_noise,
                self._is_training: True
            })

        Z_noise = self._generator.sample_noise(n=len(current_batch_x))
        _, gen_loss_batch = self._sess.run(
            [self._gen_optimizer, self._gen_loss],
            feed_dict={
                self._Z_input: Z_noise,
                self._is_training: True
            })

        return ae_loss_batch, gen_loss_batch, loss_real_batch, loss_fake_batch
Пример #8
0
class CC_CWGAN1(ConditionalGenerativeModel):
    def __init__(self, x_dim, y_dim, z_dim, gen_architecture, critic_architecture, aux_architecture,
                 last_layer_activation, folder="./CC_CWGAN1", image_shape=None, append_y_at_every_layer=None
        ):
        super(CC_CWGAN1, self).__init__(x_dim, y_dim, z_dim, [gen_architecture, critic_architecture, aux_architecture],
                                   last_layer_activation, folder, image_shape, append_y_at_every_layer)

        self._gen_architecture = self._architectures[0]
        self._critic_architecture = self._architectures[1]
        self._aux_architecture = self._architectures[2]

        ################# Define architecture
        self._gen_architecture.append([logged_dense, {"units": x_dim, "activation": self._last_layer_activation, "name": "Output"}])
        self._generator = ConditionalGenerator(self._gen_architecture, name="Generator")

        self._critic_architecture.append([logged_dense, {"units": 1, "activation": tf.identity, "name": "Output"}])
        self._critic = Critic(self._critic_architecture, name="Critic")

        self._aux_architecture.append([logged_dense, {"units": y_dim, "activation": tf.identity, "name": "Output"}])
        self._aux = Encoder(self._aux_architecture, name="Auxiliary")

        self._nets = [self._generator, self._critic, self._aux]

        ################# Connect inputs and networks
        self._output_gen = self._generator.generate_net(self._mod_Z_input,
                                                        append_elements_at_every_layer=self._append_at_every_layer,
                                                        tf_trainflag=self._is_training)

        with tf.name_scope("InputsCritic"):
            self._input_fake = tf.concat(axis=1, values=[self._output_gen, self._Y_input], name="fake")
            self._input_real = tf.concat(axis=1, values=[self._X_input, self._Y_input], name="real")

        self._output_critic_real = self._critic.generate_net(self._input_real)
        self._output_critic_fake = self._critic.generate_net(self._input_fake)

        self._output_aux = self._aux.generate_net(self._output_gen)
        self._output_critic_real = self._critic.generate_net(self._input_real,
                                                        append_elements_at_every_layer=self._append_at_every_layer,
                                                        tf_trainflag=self._is_training)
        self._output_critic_fake = self._critic.generate_net(self._input_fake,
                                                        append_elements_at_every_layer=self._append_at_every_layer,
                                                        tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()


    def compile(self, logged_images=None, logged_labels=None, learning_rate_gen=0.0001, learning_rate_critic=0.0001,
                learning_rate_aux=0.0001, optimizer=tf.train.RMSPropOptimizer):
        self._define_loss()
        with tf.name_scope("Optimizer"):
            gen_optimizer = optimizer(learning_rate=learning_rate_gen)
            self._gen_optimizer = gen_optimizer.minimize(self._gen_loss, var_list=self._get_vars(scope="Generator"), name="Generator")
            critic_optimizer = optimizer(learning_rate=learning_rate_critic)
            self._critic_optimizer = critic_optimizer.minimize(self._critic_loss, var_list=self._get_vars(scope="Critic"), name="Critic")
            aux_optimizer = optimizer(learning_rate=learning_rate_aux)
            self._aux_optimizer = aux_optimizer.minimize(self._aux_loss, var_list=self._get_vars(scope="Generator")+self._get_vars(scope="Auxiliary"), name="Auxiliary")
        self._clip_critic_param = [p.assign(tf.clip_by_value(p, -0.01, 0.01)) for p in self._get_vars("Critic")]
        self._summarise(logged_images=logged_images, logged_labels=logged_labels)


    def _define_loss(self):
        with tf.name_scope("Loss") as scope:
            self._gen_loss = -tf.reduce_mean(self._output_critic_fake)
            tf.summary.scalar("Generator_loss", self._gen_loss)
            self._critic_loss = -(tf.reduce_mean(self._output_critic_real) - tf.reduce_mean(self._output_critic_fake))
            tf.summary.scalar("Critic_loss", self._critic_loss)
            self._aux_loss = tf.reduce_mean(tf.abs(self._Y_input - self._output_aux))
            tf.summary.scalar("Auxiliary_loss", self._aux_loss)


    def train(self, x_train, y_train, x_test=None, y_test=None, epochs=100, batch_size=64, gen_steps=1, critic_steps=5, log_step=3):
        self._set_up_training(log_step=log_step)
        self._set_up_test_train_sample(x_train, y_train, x_test, y_test)
        self._log_results(epoch=0, epoch_time=0)
        for epoch in range(epochs):
            batch_nr = 0
            critic_loss_epoch = 0
            gen_loss_epoch = 0
            aux_loss_epoch = 0
            start = time.clock()
            trained_examples = 0
            while trained_examples < len(x_train):
                critic_loss_batch, gen_loss_batch, aux_loss_batch = self._optimize(self._trainset, batch_size, critic_steps, gen_steps)
                trained_examples += batch_size

                critic_loss_epoch += critic_loss_batch
                gen_loss_epoch += gen_loss_batch
                aux_loss_epoch += aux_loss_batch

            epoch_train_time = (time.clock() - start)/60
            critic_loss_epoch = np.round(critic_loss_epoch, 2)
            gen_loss_epoch = np.round(gen_loss_epoch, 2)

            print("Epoch {}: Critic: {} \n\t\t\tGenerator: {}\n\t\t\tAuxiliary: {}.".format(epoch, critic_loss_epoch, gen_loss_epoch, aux_loss_epoch))

            if log_step is not None:
                self._log(epoch+1, epoch_train_time)


    def _optimize(self, dataset, batch_size, critic_steps, gen_steps):
        for i in range(critic_steps):
            current_batch_x, current_batch_y = dataset.get_next_batch(batch_size)
            Z_noise = self.sample_noise(n=len(current_batch_x))
            _, critic_loss_batch, clipping_D = self._sess.run([
                                            self._critic_optimizer, self._critic_loss, self._clip_critic_param
                                            ],
                                            feed_dict={self._X_input: current_batch_x, self._Y_input: current_batch_y,
                                            self._Z_input: Z_noise, self._is_training: True
            })

        for _ in range(gen_steps):
            Z_noise = self._generator.sample_noise(n=len(current_batch_x))
            _, gen_loss_batch = self._sess.run([self._gen_optimizer, self._gen_loss],
                                               feed_dict={self._Z_input: Z_noise, self._Y_input: current_batch_y,
                                               self._is_training: True})
            _, aux_loss_batch = self._sess.run([self._aux_optimizer, self._aux_loss],
                                               feed_dict={self._Z_input: Z_noise, self._Y_input: current_batch_y,
                                               self._is_training: True})
        return critic_loss_batch, gen_loss_batch, aux_loss_batch


    def predict(self, inpt_x, inpt_y, is_encoded):
        if is_encoded:
            inpt_x = self._sess.run(self._mod_Z_input, feed_dict={self._Z_input: inpt_x, self._Y_input_oneHot: inpt_y})
            inpt_x = self._generator.generate_samples(noise=inpt_x, sess=self._sess)
        inpt = self._sess.run(self._input_real, feed_dict={self._X_input: inpt_x, self._Y_input: inpt_y})
        return self._critic.predict(inpt, self._sess)
Пример #9
0
    def __init__(self,
                 x_dim,
                 y_dim,
                 z_dim,
                 gen_architecture,
                 disc_architecture,
                 enc_architecture,
                 last_layer_activation,
                 folder="./Results/CBiGAN_log",
                 image_shape=None,
                 append_y_at_every_layer=None):
        super(CBiGAN, self).__init__(
            x_dim, y_dim, z_dim,
            [gen_architecture, disc_architecture, enc_architecture],
            last_layer_activation, folder, image_shape,
            append_y_at_every_layer)

        self._gen_architecture = self._architectures[0]
        self._disc_architecture = self._architectures[1]
        self._enc_architecture = self._architectures[2]

        ################# Define architecture
        if len(self._x_dim) == 1:
            self._gen_architecture.append([
                logged_dense, {
                    "units": x_dim,
                    "activation": self._last_layer_activation,
                    "name": "Output"
                }
            ])
        else:
            self._disc_architecture.append(
                [tf.layers.flatten, {
                    "name": "Flatten"
                }])
            self._enc_architecture.append(
                [tf.layers.flatten, {
                    "name": "Flatten"
                }])
            self._gen_architecture[-1][1]["name"] = "Output"
        self._disc_architecture.append([
            logged_dense, {
                "units": 1,
                "activation": tf.sigmoid,
                "name": "Output"
            }
        ])
        self._enc_architecture.append([
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Output"
            }
        ])

        self._generator = ConditionalGenerator(self._gen_architecture,
                                               name="Generator")
        self._discriminator = Discriminator(self._disc_architecture,
                                            name="Discriminator")
        self._encoder = Encoder(self._enc_architecture, name="Encoder")

        self._nets = [self._generator, self._discriminator, self._encoder]

        ################# Connect inputs and networks
        with tf.name_scope("InputsEncoder"):
            if len(self._x_dim) == 1:
                self._mod_X_input = tf.concat(
                    axis=1,
                    values=[self._X_input, self._Y_input],
                    name="modified_x")
            else:
                self._mod_X_input = image_condition_concat(
                    images=self._X_input, condition=self._Y_input, name="fake")

        self._output_gen = self._generator.generate_net(
            self._mod_Z_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._output_enc = self._encoder.generate_net(
            self._mod_X_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)

        with tf.name_scope("InputsCritic"):
            self._mod_output_enc = tf.concat(
                axis=1,
                values=[self._output_enc, self._Y_input],
                name="modified_encoder")
            if len(self._x_dim) == 1:
                self._disc_input_fake = tf.concat(
                    axis=1,
                    values=[self._output_gen, self._mod_Z_input],
                    name="fake")
                self._disc_input_real = tf.concat(
                    axis=1,
                    values=[self._X_input, self._mod_output_enc],
                    name="real")
            else:
                self._disc_input_fake = image_condition_concat(
                    inputs=self._output_gen,
                    condition=self._mod_Z_input,
                    name="fake")
                self._disc_input_real = image_condition_concat(
                    inputs=self._X_input,
                    condition=self._mod_output_enc,
                    name="real")

        self._output_disc_fake = self._discriminator.generate_net(
            self._disc_input_fake,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._output_disc_real = self._discriminator.generate_net(
            self._disc_input_real,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()
Пример #10
0
class InfoGAN(ConditionalGenerativeModel):
    def __init__(self, x_dim, y_dim, z_dim, gen_architecture, disc_architecture, aux_architecture, last_layer_activation,
                 folder="./CWGAN", image_shape=None
        ):
        super(InfoGAN, self).__init__(x_dim, y_dim, z_dim, [gen_architecture, disc_architecture, aux_architecture],
                                   last_layer_activation, folder, image_shape, None)

        self._gen_architecture = self._architectures[0]
        self._disc_architecture = self._architectures[1]
        self._aux_architecture = self._architectures[2]

        ################# Define architecture
        self._gen_architecture.append([logged_dense, {"units": x_dim, "activation": tf.nn.sigmoid, "name": "Output"}])
        self._generator = ConditionalGenerator(self._gen_architecture, name="Generator")

        self._disc_architecture.append([logged_dense, {"units": 1, "activation": tf.nn.sigmoid, "name": "Output"}])
        self._disc = Discriminator(self._disc_architecture, name="Discriminator")

        self._aux_architecture.append([logged_dense, {"units": y_dim, "activation": tf.nn.softmax, "name": "Output"}])
        self._aux = Encoder(self._aux_architecture, name="Auxiliary")

        self._nets = [self._generator, self._disc, self._aux]

        ################# Connect inputs and networks
        self._output_gen = self._generator.generate_net(self._mod_Z_input)
        self._output_disc_fake = self._disc.generate_net(self._output_gen)
        self._output_disc_real = self._disc.generate_net(self._X_input)
        self._output_aux = self._aux.generate_net(self._output_gen)

        ################# Finalize
        self._init_folders()
        self._verify_init()


    def compile(self, learning_rate=0.0003, learning_rate_gen=None, learning_rate_disc=None, learning_rate_aux=None, optimizer=tf.train.RMSPropOptimizer):
        self._define_loss()
        if learning_rate_gen is None:
            learning_rate_gen = learning_rate
        if learning_rate_disc is None:
            learning_rate_disc = learning_rate
        if learning_rate_aux is None:
            learning_rate_aux = learning_rate
        with tf.name_scope("Optimizer"):
            gen_optimizer = optimizer(learning_rate=learning_rate_gen)
            self._gen_optimizer = gen_optimizer.minimize(self._gen_loss, var_list=self._get_vars(scope="Generator"), name="Generator")
            disc_optimizer = optimizer(learning_rate=learning_rate_disc)
            self._disc_optimizer = disc_optimizer.minimize(self._disc_loss, var_list=self._get_vars(scope="Discriminator"), name="Discriminator")
            aux_optimizer = optimizer(learning_rate=learning_rate_aux)
            self._aux_optimizer = aux_optimizer.minimize(self._aux_loss, var_list=self._get_vars("Auxiliary")+self._get_vars("Generator"), name="Auxiliary")
        self._summarise(logged_labels=np.identity(self._y_dim))


    def _define_loss(self):
        with tf.name_scope("Loss") as scope:
            self._gen_loss = -tf.reduce_mean(tf.log(self._output_disc_fake+0.00001))
            tf.summary.scalar("Generator_loss", self._gen_loss)
            self._disc_loss  = -tf.reduce_mean(tf.log(self._output_disc_real+0.00001) + tf.log(1.0-self._output_disc_fake+0.00001))
            tf.summary.scalar("Discriminator_loss", self._disc_loss)
            self._aux_loss = -tf.reduce_mean(-tf.reduce_sum(tf.log(self._output_aux+0.00001)*self._Y_input, 1))
            tf.summary.scalar("Auxiliary_loss", self._aux_loss)


    def train(self, x_train, epochs=100, batch_size=64, gen_steps=1, disc_steps=1, log_step=3):
        self._set_up_training(log_step=log_step)
        self._trainset = Dataset(x_train)
        nr_test_samples = 5000 if len(x_train) >=5000 else len(x_train)
        self._x_test = self._trainset.sample(nr_test_samples)
        self._y_test = self.sample_condition(n=len(self._x_test))
        self._z_test = self._generator.sample_noise(n=nr_test_samples)
        for epoch in range(epochs):
            batch_nr = 0
            disc_loss_epoch = 0
            gen_loss_epoch = 0
            aux_loss_epoch = 0
            start = time.clock()
            trained_examples = 0
            while trained_examples < len(x_train):
                disc_loss_batch, gen_loss_batch, aux_loss_batch = self._optimize(self._trainset, batch_size, disc_steps, gen_steps)
                trained_examples += batch_size

                disc_loss_epoch += disc_loss_batch
                gen_loss_epoch += gen_loss_batch
                aux_loss_epoch += aux_loss_batch

            epoch_train_time = (time.clock() - start)/60
            disc_loss_epoch = np.round(disc_loss_epoch, 2)
            gen_loss_epoch = np.round(gen_loss_epoch, 2)
            aux_loss_epoch = np.round(aux_loss_epoch, 2)

            print("Epoch {}: Discriminator: {}\n\t\t\tGenerator: {}\n\t\t\tAuxiliary: {}.".format(epoch, disc_loss_epoch, gen_loss_epoch, aux_loss_epoch))

            if log_step is not None:
                self._log(epoch, epoch_train_time)


    def _optimize(self, dataset, batch_size, disc_steps, gen_steps):
        for i in range(disc_steps):
            current_batch_x = dataset.get_next_batch(batch_size)
            Z_noise = self.sample_noise(n=len(current_batch_x))
            C_noise = self.sample_condition(n=len(current_batch_x))
            latent_space = np.concatenate((Z_noise, C_noise), axis=1)
            _, disc_loss_batch = self._sess.run([self._disc_optimizer, self._disc_loss],
                                                feed_dict={self._X_input: current_batch_x, self._Y_input: C_noise, self._Z_input: Z_noise
            })

        for _ in range(gen_steps):
            Z_noise = self.sample_noise(n=len(current_batch_x))
            C_noise = self.sample_condition(n=len(current_batch_x))
            latent_space = np.concatenate((Z_noise, C_noise), axis=1)
            _, gen_loss_batch = self._sess.run([self._gen_optimizer, self._gen_loss], feed_dict={self._Y_input: C_noise, self._Z_input: Z_noise})
            _, aux_loss_batch = self._sess.run([self._aux_optimizer, self._aux_loss], feed_dict={self._Y_input: C_noise, self._Z_input: Z_noise})
        return disc_loss_batch, gen_loss_batch, aux_loss_batch


    def sample_condition(self, n):
        return np.random.multinomial(1, self._y_dim*[1/self._y_dim], size=n)


    def predict(self, inpt_x, is_encoded, inpt_c=None):
        if is_encoded:
            if inpt_c is None:
                raise ValueError("If input is encoded, the conditional input is also needed")
            else:
                input_x = np.concatenate((inpt_x, inpt_c), axis=1)
            inpt_x = self._generator.generate_samples(noise=inpt_x, sess=self._sess)
        return self._disc.predict(inpt_x, self._sess)
Пример #11
0
class CBiGAN(ConditionalGenerativeModel):
    def __init__(self,
                 x_dim,
                 y_dim,
                 z_dim,
                 gen_architecture,
                 disc_architecture,
                 enc_architecture,
                 last_layer_activation,
                 folder="./Results/CBiGAN_log",
                 image_shape=None,
                 append_y_at_every_layer=None):
        super(CBiGAN, self).__init__(
            x_dim, y_dim, z_dim,
            [gen_architecture, disc_architecture, enc_architecture],
            last_layer_activation, folder, image_shape,
            append_y_at_every_layer)

        self._gen_architecture = self._architectures[0]
        self._disc_architecture = self._architectures[1]
        self._enc_architecture = self._architectures[2]

        ################# Define architecture
        if len(self._x_dim) == 1:
            self._gen_architecture.append([
                logged_dense, {
                    "units": x_dim,
                    "activation": self._last_layer_activation,
                    "name": "Output"
                }
            ])
        else:
            self._disc_architecture.append(
                [tf.layers.flatten, {
                    "name": "Flatten"
                }])
            self._enc_architecture.append(
                [tf.layers.flatten, {
                    "name": "Flatten"
                }])
            self._gen_architecture[-1][1]["name"] = "Output"
        self._disc_architecture.append([
            logged_dense, {
                "units": 1,
                "activation": tf.sigmoid,
                "name": "Output"
            }
        ])
        self._enc_architecture.append([
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Output"
            }
        ])

        self._generator = ConditionalGenerator(self._gen_architecture,
                                               name="Generator")
        self._discriminator = Discriminator(self._disc_architecture,
                                            name="Discriminator")
        self._encoder = Encoder(self._enc_architecture, name="Encoder")

        self._nets = [self._generator, self._discriminator, self._encoder]

        ################# Connect inputs and networks
        with tf.name_scope("InputsEncoder"):
            if len(self._x_dim) == 1:
                self._mod_X_input = tf.concat(
                    axis=1,
                    values=[self._X_input, self._Y_input],
                    name="modified_x")
            else:
                self._mod_X_input = image_condition_concat(
                    images=self._X_input, condition=self._Y_input, name="fake")

        self._output_gen = self._generator.generate_net(
            self._mod_Z_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._output_enc = self._encoder.generate_net(
            self._mod_X_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)

        with tf.name_scope("InputsCritic"):
            self._mod_output_enc = tf.concat(
                axis=1,
                values=[self._output_enc, self._Y_input],
                name="modified_encoder")
            if len(self._x_dim) == 1:
                self._disc_input_fake = tf.concat(
                    axis=1,
                    values=[self._output_gen, self._mod_Z_input],
                    name="fake")
                self._disc_input_real = tf.concat(
                    axis=1,
                    values=[self._X_input, self._mod_output_enc],
                    name="real")
            else:
                self._disc_input_fake = image_condition_concat(
                    inputs=self._output_gen,
                    condition=self._mod_Z_input,
                    name="fake")
                self._disc_input_real = image_condition_concat(
                    inputs=self._X_input,
                    condition=self._mod_output_enc,
                    name="real")

        self._output_disc_fake = self._discriminator.generate_net(
            self._disc_input_fake,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._output_disc_real = self._discriminator.generate_net(
            self._disc_input_real,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()

    def compile(self,
                logged_images=None,
                logged_labels=None,
                learning_rate=0.0003,
                learning_rate_gen=None,
                learning_rate_disc=None,
                optimizer=tf.train.AdamOptimizer):
        self._define_loss()
        if learning_rate_gen is None:
            learning_rate_gen = learning_rate
        if learning_rate_disc is None:
            learning_rate_disc = learning_rate
        with tf.name_scope("Optimizer"):
            gen_optimizer = optimizer(learning_rate=learning_rate_gen)
            self._gen_optimizer = gen_optimizer.minimize(
                self._gen_loss,
                var_list=self._get_vars(scope="Generator") +
                self._get_vars(scope="Encoder"),
                name="Generator")
            disc_optimizer = optimizer(learning_rate=learning_rate_disc)
            self._disc_optimizer = disc_optimizer.minimize(
                self._disc_loss,
                var_list=self._get_vars(scope="Discriminator"),
                name="Discriminator")
        self._summarise(logged_images=logged_images,
                        logged_labels=logged_labels)

    def _define_loss(self):
        with tf.name_scope("Loss") as scope:
            self._gen_loss = -tf.reduce_mean(
                tf.log(self._output_disc_fake + 0.00001) +
                tf.log(1.0 - self._output_disc_real + 0.00001))
            tf.summary.scalar("Generator_loss", self._gen_loss)
            self._disc_loss = -tf.reduce_mean(
                tf.log(self._output_disc_real + 0.00001) +
                tf.log(1.0 - self._output_disc_fake + 0.00001))
            tf.summary.scalar("Discriminator_loss", self._disc_loss)

    def train(self,
              x_train,
              y_train,
              x_test=None,
              y_test=None,
              epochs=100,
              batch_size=64,
              gen_steps=1,
              disc_steps=1,
              steps=None,
              log_step=3,
              gpu_options=None):
        if steps is not None:
            gen_steps = 1
            disc_steps = steps
        self._set_up_training(log_step=log_step, gpu_options=gpu_options)
        self._set_up_test_train_sample(x_train, y_train, x_test, y_test)
        self._log_results(epoch=0, epoch_time=0)
        for epoch in range(epochs):
            batch_nr = 0
            disc_loss_epoch = 0
            gen_loss_epoch = 0
            start = time.clock()
            trained_examples = 0
            while trained_examples < len(x_train):
                disc_loss_batch, gen_loss_batch = self._optimize(
                    self._trainset, batch_size, disc_steps, gen_steps)
                trained_examples += batch_size

                disc_loss_epoch += disc_loss_batch
                gen_loss_epoch += gen_loss_batch

            epoch_train_time = (time.clock() - start) / 60
            disc_loss_epoch = np.round(disc_loss_epoch, 2)
            gen_loss_epoch = np.round(gen_loss_epoch, 2)

            acc_real = self.get_accuracy(inpt=self._x_test,
                                         inpt_y=self._y_test,
                                         labels=np.ones(len(self._x_test)),
                                         is_encoded=False)
            acc_fake = self.get_accuracy(inpt=self._z_test,
                                         inpt_y=self._y_test,
                                         labels=np.zeros(len(self._z_test)),
                                         is_encoded=True)
            print(
                "Epoch {}: Discriminator: {} ({})\n\t\t\tGenerator: {} ({}).".
                format(epoch, disc_loss_epoch, acc_real, gen_loss_epoch,
                       acc_fake))

            if log_step is not None:
                self._log(epoch + 1, epoch_train_time)

    def _optimize(self, dataset, batch_size, disc_steps, gen_steps):
        for i in range(disc_steps):
            current_batch_x, current_batch_y = dataset.get_next_batch(
                batch_size)
            Z_noise = self.sample_noise(n=len(current_batch_x))
            _, disc_loss_batch = self._sess.run(
                [self._disc_optimizer, self._disc_loss],
                feed_dict={
                    self._X_input: current_batch_x,
                    self._Y_input: current_batch_y,
                    self._Z_input: Z_noise,
                    self._is_training: True
                })

        for _ in range(gen_steps):
            Z_noise = self.sample_noise(n=len(current_batch_x))
            _, gen_loss_batch = self._sess.run(
                [self._gen_optimizer, self._gen_loss],
                feed_dict={
                    self._X_input: current_batch_x,
                    self._Y_input: current_batch_y,
                    self._Z_input: Z_noise,
                    self._is_training: True
                })
        return disc_loss_batch, gen_loss_batch

    def get_accuracy(self, inpt, inpt_y, labels, is_encoded):
        if not is_encoded:
            inpt = self._sess.run(self._disc_input_real,
                                  feed_dict={
                                      self._X_input: inpt,
                                      self._Y_input: inpt_y,
                                      self._is_training: False
                                  })
        else:
            inpt = self._sess.run(self._disc_input_fake,
                                  feed_dict={
                                      self._Z_input: inpt,
                                      self._Y_input: inpt_y,
                                      self._is_training: False
                                  })
        return self._discriminator.get_accuracy(inpt, labels, self._sess)

    def predict(self, inpt_x, inpt_y, is_encoded):
        if not is_encoded:
            inpt_x = self._encoder.encode(inpt=inpt_x, sess=self._sess)
        inpt_x = self._sess.run(self._mod_X_input,
                                feed_dict={
                                    self._X_input: inpt_x,
                                    self._Y_input: inpt_y,
                                    self._is_training: False
                                })
        return self._discriminator.predict(inpt_x, self._sess)

    def generate_image_from_noise(self, n):
        noise = self.sample_noise(n=n)
        return self.generate_samples(noise, self._sess)
Пример #12
0
    def __init__(self,
                 x_dim,
                 z_dim,
                 gen_architecture,
                 disc_architecture,
                 enc_architecture,
                 last_layer_activation,
                 folder="./WGAN",
                 image_shape=None):
        super(BiGAN, self).__init__(
            x_dim, z_dim,
            [gen_architecture, disc_architecture, enc_architecture],
            last_layer_activation, folder, image_shape)

        self._gen_architecture = self._architectures[0]
        self._disc_architecture = self._architectures[1]
        self._enc_architecture = self._architectures[2]

        ################# Define architecture
        self._gen_architecture.append([
            logged_dense, {
                "units": x_dim,
                "activation": self._last_layer_activation,
                "name": "Output"
            }
        ])
        self._generator = Generator(self._gen_architecture, name="Generator")

        self._disc_architecture.append([
            logged_dense, {
                "units": 1,
                "activation": tf.nn.sigmoid,
                "name": "Output"
            }
        ])
        self._discriminator = Discriminator(self._disc_architecture,
                                            name="Discriminator")

        self._enc_architecture.append([
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Output"
            }
        ])
        self._encoder = Encoder(self._enc_architecture, name="Encoder")

        self._nets = [self._generator, self._discriminator, self._encoder]

        ################# Connect inputs and networks
        self._output_gen = self._generator.generate_net(self._Z_input)
        self._output_enc = self._encoder.generate_net(self._X_input)

        with tf.name_scope("InputsDiscriminator"):
            self._disc_input_fake = tf.concat(
                axis=1, values=[self._output_gen, self._Z_input])
            self._disc_input_real = tf.concat(
                axis=1, values=[self._X_input, self._output_enc])

        self._output_disc_fake = self._discriminator.generate_net(
            self._disc_input_fake)
        self._output_disc_real = self._discriminator.generate_net(
            self._disc_input_real)

        ################# Finalize
        self._init_folders()
        self._verify_init()
Пример #13
0
class BiGAN(GenerativeModel):
    def __init__(self,
                 x_dim,
                 z_dim,
                 gen_architecture,
                 disc_architecture,
                 enc_architecture,
                 last_layer_activation,
                 folder="./WGAN",
                 image_shape=None):
        super(BiGAN, self).__init__(
            x_dim, z_dim,
            [gen_architecture, disc_architecture, enc_architecture],
            last_layer_activation, folder, image_shape)

        self._gen_architecture = self._architectures[0]
        self._disc_architecture = self._architectures[1]
        self._enc_architecture = self._architectures[2]

        ################# Define architecture
        self._gen_architecture.append([
            logged_dense, {
                "units": x_dim,
                "activation": self._last_layer_activation,
                "name": "Output"
            }
        ])
        self._generator = Generator(self._gen_architecture, name="Generator")

        self._disc_architecture.append([
            logged_dense, {
                "units": 1,
                "activation": tf.nn.sigmoid,
                "name": "Output"
            }
        ])
        self._discriminator = Discriminator(self._disc_architecture,
                                            name="Discriminator")

        self._enc_architecture.append([
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Output"
            }
        ])
        self._encoder = Encoder(self._enc_architecture, name="Encoder")

        self._nets = [self._generator, self._discriminator, self._encoder]

        ################# Connect inputs and networks
        self._output_gen = self._generator.generate_net(self._Z_input)
        self._output_enc = self._encoder.generate_net(self._X_input)

        with tf.name_scope("InputsDiscriminator"):
            self._disc_input_fake = tf.concat(
                axis=1, values=[self._output_gen, self._Z_input])
            self._disc_input_real = tf.concat(
                axis=1, values=[self._X_input, self._output_enc])

        self._output_disc_fake = self._discriminator.generate_net(
            self._disc_input_fake)
        self._output_disc_real = self._discriminator.generate_net(
            self._disc_input_real)

        ################# Finalize
        self._init_folders()
        self._verify_init()

    def compile(self,
                learning_rate=0.0003,
                learning_rate_gen=None,
                learning_rate_disc=None,
                optimizer=tf.train.AdamOptimizer):
        self._define_loss()
        if learning_rate_gen is None:
            learning_rate_gen = learning_rate
        if learning_rate_disc is None:
            learning_rate_disc = learning_rate
        with tf.name_scope("Optimizer"):
            gen_optimizer = optimizer(learning_rate=learning_rate_gen)
            self._gen_optimizer = gen_optimizer.minimize(
                self._gen_loss,
                var_list=self._get_vars(scope="Generator") +
                self._get_vars(scope="Encoder"),
                name="Generator")
            disc_optimizer = optimizer(learning_rate=learning_rate_disc)
            self._disc_optimizer = disc_optimizer.minimize(
                self._disc_loss,
                var_list=self._get_vars(scope="Discriminator"),
                name="Discriminator")
        self._summarise()

    def _define_loss(self):
        with tf.name_scope("Loss") as scope:
            self._gen_loss = -tf.reduce_mean(
                tf.log(self._output_disc_fake + 0.00001) +
                tf.log(1.0 - self._output_disc_real + 0.00001))
            tf.summary.scalar("Generator_loss", self._gen_loss)
            self._disc_loss = -tf.reduce_mean(
                tf.log(self._output_disc_real + 0.00001) +
                tf.log(1.0 - self._output_disc_fake + 0.00001))
            tf.summary.scalar("Discriminator_loss", self._disc_loss)

    def train(self,
              x_train,
              x_test=None,
              epochs=100,
              batch_size=64,
              gen_steps=1,
              disc_steps=1,
              log_step=3):
        self._set_up_training(log_step=log_step)
        self._set_up_test_train_sample(x_train, x_test)
        for epoch in range(epochs):
            batch_nr = 0
            disc_loss_epoch = 0
            gen_loss_epoch = 0
            start = time.clock()
            trained_examples = 0
            while trained_examples < len(x_train):
                disc_loss_batch, gen_loss_batch = self._optimize(
                    self._trainset, batch_size, disc_steps, gen_steps)
                trained_examples += batch_size

                disc_loss_epoch += disc_loss_batch
                gen_loss_epoch += gen_loss_batch

            epoch_train_time = (time.clock() - start) / 60
            disc_loss_epoch = np.round(disc_loss_epoch, 2)
            gen_loss_epoch = np.round(gen_loss_epoch, 2)

            acc_real = self.get_accuracy(inpt_x=self._x_test,
                                         labels=np.ones(len(self._x_test)))
            acc_fake = np.round(
                100 - self.get_accuracy(inpt_x=self._z_test,
                                        labels=np.zeros(len(self._z_test))), 2)
            print(
                "Epoch {}: Discriminator: {} ({})\n\t\t\tGenerator: {} ({}).".
                format(epoch, disc_loss_epoch, acc_real, gen_loss_epoch,
                       acc_fake))

            if log_step is not None:
                self._log(epoch, epoch_train_time)

    def _optimize(self, dataset, batch_size, disc_steps, gen_steps):
        for i in range(disc_steps):
            current_batch_x = dataset.get_next_batch(batch_size)
            Z_noise = self.sample_noise(n=len(current_batch_x))
            _, disc_loss_batch = self._sess.run(
                [self._disc_optimizer, self._disc_loss],
                feed_dict={
                    self._X_input: current_batch_x,
                    self._Z_input: Z_noise
                })

        for _ in range(gen_steps):
            Z_noise = self.sample_noise(n=len(current_batch_x))
            _, gen_loss_batch = self._sess.run(
                [self._gen_optimizer, self._gen_loss],
                feed_dict={
                    self._X_input: current_batch_x,
                    self._Z_input: Z_noise
                })
        return disc_loss_batch, gen_loss_batch

    def get_accuracy(self, inpt_x, labels):
        if labels[0] == 0:
            inpt_image = self._sess.run(self._output_gen,
                                        feed_dict={self._Z_input: inpt_x})
            inpt_x = np.concatenate((inpt_image, inpt_x), axis=1)
        else:
            inpt_encoding = self._encoder.encode(inpt=inpt_x, sess=self._sess)
            inpt_x = np.concatenate((inpt_x, inpt_encoding), axis=1)
        return self._discriminator.get_accuracy(inpt_x, labels, self._sess)

    def predict(self, inpt_x, is_encoded):
        if is_encoded:
            inpt_image = self._generator.generate_samples(noise=inpt_x,
                                                          sess=self._sess)
            inpt_x = np.concatenate((inpt_image, inpt_x), axis=1)
        else:
            inpt_encoding = self._generator.generate_samples(noise=inpt_x,
                                                             sess=self._sess)
            inpt_x = np.concatenate((inpt_x, inpt_encoding), axis=1)
        return self._discriminator.predict(inpt_x, self._sess)

    def generate_image_from_noise(self, n):
        noise = self.sample_noise(n=n)
        return self.generate_samples(noise, self._sess)

    def generate_image_from_image(self, inpt_x):
        encoding = self._encoder.encode(inpt=inpt_x, sess=self._sess)
        return self._generator.generate_samples(encoding, self._sess)
Пример #14
0
    def __init__(self,
                 x_dim,
                 z_dim,
                 dec_architecture,
                 enc_architecture,
                 last_layer_activation,
                 folder="./VAE",
                 image_shape=None):
        super(VAE, self).__init__(x_dim, z_dim,
                                  [dec_architecture, enc_architecture],
                                  last_layer_activation, folder, image_shape)

        self._gen_architecture = self._architectures[0]
        self._enc_architecture = self._architectures[1]

        ################# Define architecture
        last_layer_mean = [
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Mean"
            }
        ]
        self._encoder_mean = Encoder(self._enc_architecture +
                                     [last_layer_mean],
                                     name="Encoder")

        last_layer_std = [
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Std"
            }
        ]
        self._encoder_std = Encoder(self._enc_architecture + [last_layer_std],
                                    name="Encoder")

        self._gen_architecture.append([
            logged_dense, {
                "units": x_dim,
                "activation": self._last_layer_activation,
                "name": "Output"
            }
        ])
        self._decoder = Decoder(self._gen_architecture, name="Generator")

        self._nets = [self._decoder, self._encoder_mean]

        ################# Connect inputs and networks
        self._mean_layer = self._encoder_mean.generate_net(self._X_input)
        self._std_layer = self._encoder_std.generate_net(self._X_input)

        self._output_enc_with_noise = self._mean_layer + tf.exp(
            0.5 * self._std_layer) * self._Z_input

        self._output_dec = self._decoder.generate_net(
            self._output_enc_with_noise)
        self._output_dec_from_encoding = self._decoder.generate_net(
            self._Z_input)

        ################# Finalize
        self._init_folders()
        self._verify_init()
Пример #15
0
class VAE(GenerativeModel):
    def __init__(self,
                 x_dim,
                 z_dim,
                 dec_architecture,
                 enc_architecture,
                 last_layer_activation,
                 folder="./VAE",
                 image_shape=None):
        super(VAE, self).__init__(x_dim, z_dim,
                                  [dec_architecture, enc_architecture],
                                  last_layer_activation, folder, image_shape)

        self._gen_architecture = self._architectures[0]
        self._enc_architecture = self._architectures[1]

        ################# Define architecture
        last_layer_mean = [
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Mean"
            }
        ]
        self._encoder_mean = Encoder(self._enc_architecture +
                                     [last_layer_mean],
                                     name="Encoder")

        last_layer_std = [
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Std"
            }
        ]
        self._encoder_std = Encoder(self._enc_architecture + [last_layer_std],
                                    name="Encoder")

        self._gen_architecture.append([
            logged_dense, {
                "units": x_dim,
                "activation": self._last_layer_activation,
                "name": "Output"
            }
        ])
        self._decoder = Decoder(self._gen_architecture, name="Generator")

        self._nets = [self._decoder, self._encoder_mean]

        ################# Connect inputs and networks
        self._mean_layer = self._encoder_mean.generate_net(self._X_input)
        self._std_layer = self._encoder_std.generate_net(self._X_input)

        self._output_enc_with_noise = self._mean_layer + tf.exp(
            0.5 * self._std_layer) * self._Z_input

        self._output_dec = self._decoder.generate_net(
            self._output_enc_with_noise)
        self._output_dec_from_encoding = self._decoder.generate_net(
            self._Z_input)

        ################# Finalize
        self._init_folders()
        self._verify_init()

    def compile(self, learning_rate=0.0001, optimizer=tf.train.AdamOptimizer):
        self._define_loss()
        with tf.name_scope("Optimizer"):
            vae_optimizer = optimizer(learning_rate=learning_rate)
            self._vae_optimizer = vae_optimizer.minimize(self._vae_loss,
                                                         name="VAE")
        self._summarise()

    def _define_loss(self):
        with tf.name_scope("Loss") as scope:
            self._data_fidelity_loss = self._X_input * tf.log(
                1e-10 + self._output_dec) + (
                    1 - self._X_input) * tf.log(1e-10 + 1 - self._output_dec)
            self._data_fidelity_loss = -tf.reduce_sum(self._data_fidelity_loss,
                                                      1)

            self._KLdiv = 0.5 * (tf.square(self._mean_layer) +
                                 tf.exp(self._std_layer) - self._std_layer - 1)
            self._KLdiv = tf.reduce_sum(self._KLdiv, 1)

            self._vae_loss = tf.reduce_mean(self._data_fidelity_loss +
                                            self._KLdiv)
            tf.summary.scalar("Loss", self._vae_loss)

    def train(self,
              x_train,
              x_test,
              epochs=100,
              batch_size=64,
              steps=5,
              log_step=3):
        self._set_up_training(log_step=log_step)
        self._set_up_test_train_sample(x_train, x_test)
        for epoch in range(epochs):
            batch_nr = 0
            loss_epoch = 0
            start = time.clock()
            trained_examples = 0
            while trained_examples < len(x_train):
                loss_batch = self._optimize(self._trainset, batch_size, steps)
                trained_examples += batch_size
                loss_epoch += loss_batch

            epoch_train_time = (time.clock() - start) / 60
            loss_epoch = np.round(loss_epoch, 2)

            print("Epoch {}: Loss: {}.".format(epoch, loss_epoch))

            if log_step is not None:
                self._log(epoch, epoch_train_time)

    def _optimize(self, dataset, batch_size, steps):
        for i in range(steps):
            current_batch_x = dataset.get_next_batch(batch_size)
            Z_noise = self._decoder.sample_noise(n=len(current_batch_x))
            _, loss_batch = self._sess.run(
                [self._vae_optimizer, self._vae_loss],
                feed_dict={
                    self._X_input: current_batch_x,
                    self._Z_input: Z_noise
                })
        return loss_batch

    def decode(self, inpt_x, is_encoded):
        if not is_encoded:
            inpt_x = self._encoder_mean.encode(noise=inpt_x, sess=self._sess)
        return self._decoder.decode(inpt_x, self._sess)

    def generate_image_from_noise(self, n):
        noise = self.sample_noise(n=n)
        return self._decoder.generate_samples(noise, self._sess)

    def generate_image_from_image(self, inpt_x):
        encoding = self._encoder.encode(inpt=inpt_x, sess=self._sess)
        return self._decoder.generate_samples(encoding, self._sess)