Exemplo n.º 1
0
    def __init__(self, x_dim, y_dim, z_dim, enc_dim, gen_architecture, enc_architecture, dec_architecture,
                 last_layer_activation, folder="./CBEGAN", image_shape=None, append_y_at_every_layer=None
        ):
        super(CBEGAN, self).__init__(x_dim, y_dim, z_dim, [gen_architecture, enc_architecture, dec_architecture],
                                   last_layer_activation, folder, image_shape, append_y_at_every_layer)

        self._gen_architecture = self._architectures[0]
        self._enc_architecture = self._architectures[1]
        self._dec_architecture = self._architectures[2]

        self._enc_dim = enc_dim
        self._kt = 0
        self._gamma = 1.
        self._lambda = 0.001

        ################# Define architecture
        if len(self._x_dim) == 1:
            self._gen_architecture.append([logged_dense, {"units": x_dim, "activation": self._last_layer_activation, "name": "Output"}])
            self._dec_architecture.append([logged_dense, {"units": x_dim, "activation": self._last_layer_activation, "name": "Output"}])
        else:
            self._enc_architecture.append([tf.layers.flatten, {"name": "Flatten"}])
            self._gen_architecture[-1][1]["name"] = "Output"
        self._enc_architecture.append([logged_dense, {"units": enc_dim, "activation": tf.nn.tanh, "name": "Output"}])

        self._generator = ConditionalGenerator(self._gen_architecture, name="Generator")
        self._encoder = Encoder(self._enc_architecture, name="Encoder")
        self._decoder = Encoder(self._dec_architecture, name="Decoder")

        self._nets = [self._generator, self._encoder, self._decoder]

        ################# Connect inputs and networks
        self._output_gen = self._generator.generate_net(self._mod_Z_input,
                                                        append_elements_at_every_layer=self._append_at_every_layer,
                                                        tf_trainflag=self._is_training)

        with tf.name_scope("InputsEncoder"):
            if len(self._x_dim) == 1:
                self._input_fake = tf.concat(axis=1, values=[self._output_gen, self._Y_input], name="fake")
                self._input_real = tf.concat(axis=1, values=[self._X_input, self._Y_input], name="real")
            else:
                self._input_fake = image_condition_concat(inputs=self._output_gen, condition=self._Y_input, name="fake")
                self._input_real = image_condition_concat(inputs=self._X_input, condition=self._Y_input, name="real")

        self._output_encoder_real = self._encoder.generate_net(self._input_real, tf_trainflag=self._is_training)
        self._output_encoder_fake = self._encoder.generate_net(self._input_fake, tf_trainflag=self._is_training)

        self._output_decoder_real = self._decoder.generate_net(self._output_encoder_real, tf_trainflag=self._is_training)
        self._output_decoder_fake = self._decoder.generate_net(self._output_encoder_fake, tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()
Exemplo n.º 2
0
    def __init__(self, x_dim, y_dim, z_dim, gen_architecture, critic_architecture, last_layer_activation,
                 folder="./CWGAN", image_shape=None, append_y_at_every_layer=None
        ):
        super(CWGAN, self).__init__(x_dim, y_dim, z_dim, [gen_architecture, critic_architecture],
                                   last_layer_activation, folder, image_shape, append_y_at_every_layer)

        self._gen_architecture = self._architectures[0]
        self._critic_architecture = self._architectures[1]

        ################# Define architecture
        if len(self._x_dim) == 1:
            self._gen_architecture.append([logged_dense, {"units": x_dim, "activation": self._last_layer_activation, "name": "Output"}])
        else:
            self._critic_architecture.append([tf.layers.flatten, {"name": "Flatten"}])
            self._gen_architecture[-1][1]["name"] = "Output"
        self._critic_architecture.append([logged_dense, {"units": 1, "activation": tf.identity, "name": "Output"}])

        self._generator = ConditionalGenerator(self._gen_architecture, name="Generator")
        self._critic = Critic(self._critic_architecture, name="Critic")

        self._nets = [self._generator, self._critic]

        ################# Connect inputs and networks
        self._output_gen = self._generator.generate_net(self._mod_Z_input,
                                                        append_elements_at_every_layer=self._append_at_every_layer,
                                                        tf_trainflag=self._is_training)

        with tf.name_scope("InputsCritic"):
            if len(self._x_dim) == 1:
                self._input_fake = tf.concat(axis=1, values=[self._output_gen, self._Y_input], name="fake")
                self._input_real = tf.concat(axis=1, values=[self._X_input, self._Y_input], name="real")
            else:
                self._input_fake = image_condition_concat(inputs=self._output_gen, condition=self._Y_input, name="fake")
                self._input_real = image_condition_concat(inputs=self._X_input, condition=self._Y_input, name="real")

        # with tf.name_scope("InputsCritic"):
        #     self._input_fake = self._output_gen
        #     self._input_real = self._X_input

        self._output_critic_real = self._critic.generate_net(self._input_real, tf_trainflag=self._is_training)
        self._output_critic_fake = self._critic.generate_net(self._input_fake, tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()
Exemplo n.º 3
0
    def __init__(self,
                 x_dim,
                 y_dim,
                 z_dim,
                 gen_architecture,
                 critic_architecture,
                 last_layer_activation,
                 folder="./CWGANGP",
                 image_shape=None,
                 append_y_at_every_layer=None,
                 PatchGAN=False):
        super(CWGANGP, self).__init__(x_dim, y_dim, z_dim,
                                      [gen_architecture, critic_architecture],
                                      last_layer_activation, folder,
                                      image_shape, append_y_at_every_layer)

        self._gen_architecture = self._architectures[0]
        self._critic_architecture = self._architectures[1]
        self._is_patchgan = PatchGAN

        ################# Define architecture
        if self._is_patchgan:
            f_xy = self._critic_architecture[-1][-1]["filters"]
            assert f_xy == 1, "If is PatchGAN, last layer of Discriminator_XY needs 1 filter. Given: {}.".format(
                f_xy)

            a_xy = self._critic_architecture[-1][-1]["activation"]
            assert a_xy == tf.identity, "If is PatchGAN, last layer of Discriminator_XY needs tf.nn.sigmoid. Given: {}.".format(
                a_xy)
        else:
            self._critic_architecture.append(
                [tf.layers.flatten, {
                    "name": "Flatten"
                }])
            self._critic_architecture.append([
                logged_dense, {
                    "units": 1,
                    "activation": tf.identity,
                    "name": "Output"
                }
            ])

        if len(self._x_dim) == 1:
            self._gen_architecture.append([
                tf.layers.dense, {
                    "units": x_dim,
                    "activation": self._last_layer_activation,
                    "name": "Output"
                }
            ])
        else:
            self._gen_architecture[-1][1]["name"] = "Output"

        self._generator = ConditionalGenerator(self._gen_architecture,
                                               name="Generator")
        self._critic = Critic(self._critic_architecture, name="Critic")

        self._nets = [self._generator, self._critic]

        ################# Connect inputs and networks
        self._output_gen = self._generator.generate_net(
            self._mod_Z_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)

        with tf.name_scope("InputsCritic"):
            if len(self._x_dim) == 1:
                self._input_real = tf.concat(
                    axis=1, values=[self._X_input, self._Y_input], name="real")
                self._input_fake = tf.concat(
                    axis=1,
                    values=[self._output_gen, self._Y_input],
                    name="fake")
            else:
                self._input_real = image_condition_concat(
                    inputs=self._X_input, condition=self._Y_input, name="real")
                self._input_fake = image_condition_concat(
                    inputs=self._output_gen,
                    condition=self._Y_input,
                    name="fake")

        # with tf.name_scope("InputsCritic"):
        #     self._input_fake = self._output_gen
        #     self._input_real = self._X_input

        self._output_critic_real = self._critic.generate_net(
            self._input_real, tf_trainflag=self._is_training)
        self._output_critic_fake = self._critic.generate_net(
            self._input_fake, tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()

        if self._is_patchgan:
            print("PATCHGAN chosen with output: {}.".format(
                self._output_critic_real.shape))
Exemplo n.º 4
0
    def __init__(self,
                 x_dim,
                 y_dim,
                 z_dim,
                 dec_architecture,
                 enc_architecture,
                 last_layer_activation,
                 folder="./VAE",
                 image_shape=None,
                 append_y_at_every_layer=None):
        super(CVAE, self).__init__(x_dim, y_dim, z_dim,
                                   [dec_architecture, enc_architecture],
                                   last_layer_activation, folder, image_shape,
                                   append_y_at_every_layer)

        self._gen_architecture = self._architectures[0]
        self._enc_architecture = self._architectures[1]

        ################# Define architecture
        if len(self._x_dim) == 1:
            self._gen_architecture.append([
                logged_dense, {
                    "units": x_dim,
                    "activation": self._last_layer_activation,
                    "name": "Output"
                }
            ])
        else:
            self._enc_architecture.append(
                [tf.layers.flatten, {
                    "name": "Flatten"
                }])
            self._gen_architecture[-1][1]["name"] = "Output"
        last_layer_mean = [
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Mean"
            }
        ]
        last_layer_std = [
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Std"
            }
        ]

        self._encoder_mean = Encoder(self._enc_architecture +
                                     [last_layer_mean],
                                     name="Encoder")
        self._encoder_std = Encoder(self._enc_architecture + [last_layer_std],
                                    name="Encoder")
        self._generator = ConditionalDecoder(self._gen_architecture,
                                             name="Generator")

        self._nets = [self._generator, self._encoder_mean]

        ################# Connect inputs and networks
        with tf.name_scope("InputsEncoder"):
            if len(self._x_dim) == 1:
                self._mod_X_input = tf.concat(
                    axis=1, values=[self._X_input, self._Y_input], name="real")
            else:
                self._mod_X_input = image_condition_concat(
                    inputs=self._X_input, condition=self._Y_input, name="real")

        self._mean_layer = self._encoder_mean.generate_net(
            self._mod_X_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._std_layer = self._encoder_std.generate_net(
            self._mod_X_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._output_enc = self._mean_layer + tf.exp(
            0.5 * self._std_layer) * self._Z_input

        with tf.name_scope("InputsGenerator"):
            self._dec_input = tf.concat(
                axis=1,
                values=[self._output_enc, self._Y_input],
                name="latent")

        self._output_dec = self._generator.generate_net(
            self._dec_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._output_dec_from_encoding = self._generator.generate_net(
            self._mod_Z_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()
Exemplo n.º 5
0
    def __init__(self,
                 x_dim,
                 y_dim,
                 z_dim,
                 gen_architecture,
                 disc_architecture,
                 enc_architecture,
                 last_layer_activation,
                 folder="./Results/CBiGAN_log",
                 image_shape=None,
                 append_y_at_every_layer=None):
        super(CBiGAN, self).__init__(
            x_dim, y_dim, z_dim,
            [gen_architecture, disc_architecture, enc_architecture],
            last_layer_activation, folder, image_shape,
            append_y_at_every_layer)

        self._gen_architecture = self._architectures[0]
        self._disc_architecture = self._architectures[1]
        self._enc_architecture = self._architectures[2]

        ################# Define architecture
        if len(self._x_dim) == 1:
            self._gen_architecture.append([
                logged_dense, {
                    "units": x_dim,
                    "activation": self._last_layer_activation,
                    "name": "Output"
                }
            ])
        else:
            self._disc_architecture.append(
                [tf.layers.flatten, {
                    "name": "Flatten"
                }])
            self._enc_architecture.append(
                [tf.layers.flatten, {
                    "name": "Flatten"
                }])
            self._gen_architecture[-1][1]["name"] = "Output"
        self._disc_architecture.append([
            logged_dense, {
                "units": 1,
                "activation": tf.sigmoid,
                "name": "Output"
            }
        ])
        self._enc_architecture.append([
            logged_dense, {
                "units": z_dim,
                "activation": tf.identity,
                "name": "Output"
            }
        ])

        self._generator = ConditionalGenerator(self._gen_architecture,
                                               name="Generator")
        self._discriminator = Discriminator(self._disc_architecture,
                                            name="Discriminator")
        self._encoder = Encoder(self._enc_architecture, name="Encoder")

        self._nets = [self._generator, self._discriminator, self._encoder]

        ################# Connect inputs and networks
        with tf.name_scope("InputsEncoder"):
            if len(self._x_dim) == 1:
                self._mod_X_input = tf.concat(
                    axis=1,
                    values=[self._X_input, self._Y_input],
                    name="modified_x")
            else:
                self._mod_X_input = image_condition_concat(
                    images=self._X_input, condition=self._Y_input, name="fake")

        self._output_gen = self._generator.generate_net(
            self._mod_Z_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._output_enc = self._encoder.generate_net(
            self._mod_X_input,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)

        with tf.name_scope("InputsCritic"):
            self._mod_output_enc = tf.concat(
                axis=1,
                values=[self._output_enc, self._Y_input],
                name="modified_encoder")
            if len(self._x_dim) == 1:
                self._disc_input_fake = tf.concat(
                    axis=1,
                    values=[self._output_gen, self._mod_Z_input],
                    name="fake")
                self._disc_input_real = tf.concat(
                    axis=1,
                    values=[self._X_input, self._mod_output_enc],
                    name="real")
            else:
                self._disc_input_fake = image_condition_concat(
                    inputs=self._output_gen,
                    condition=self._mod_Z_input,
                    name="fake")
                self._disc_input_real = image_condition_concat(
                    inputs=self._X_input,
                    condition=self._mod_output_enc,
                    name="real")

        self._output_disc_fake = self._discriminator.generate_net(
            self._disc_input_fake,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)
        self._output_disc_real = self._discriminator.generate_net(
            self._disc_input_real,
            append_elements_at_every_layer=self._append_at_every_layer,
            tf_trainflag=self._is_training)

        ################# Finalize
        self._init_folders()
        self._verify_init()