Ejemplo n.º 1
0
    def __init__(
            self,
            latent_dimension,
            embedding_dimensions,
            embedding_activations=tf.nn.relu,
            var_scope="dirichlet_encoder",
            bn_before=False,
            bn_after=False,
            epsilon=0.0,
            embedding_mu_kernel_initializer=tf.initializers.glorot_normal(),
            embedding_mu_bias_initializer=tf.initializers.zeros(),
            latent_mu_kernel_initialiazer=tf.initializers.glorot_normal(),
            latent_mu_bias_initializer=tf.initializers.zeros(),
            embedding_var_kernel_initializer=tf.initializers.glorot_normal(),
            embedding_var_bias_initializer=tf.initializers.zeros(),
            latent_var_kernel_initialiazer=tf.initializers.glorot_normal(),
            latent_var_bias_initializer=tf.initializers.zeros(),
    ):

        Layer.__init__(self)
        Scope.__init__(self, var_scope)

        self.latent_dimension = latent_dimension
        self.embedding_dimensions = embedding_dimensions
        self.embedding_activation = embedding_activations
        self.bn_before = bn_before
        self.bn_after = bn_after
        self.epsilon = epsilon

        self.encoder = Encoder(
            latent_dim=self.latent_dimension,
            embedding_dimensions=self.embedding_dimensions,
            activation=self.embedding_activation,
            var_scope=self.v_name("encoder"),
            bn_before=self.bn_before,
            bn_after=self.bn_after,
            embedding_kernel_initializer=embedding_mu_kernel_initializer,
            embedding_bias_initializer=embedding_mu_bias_initializer,
            latent_kernel_initialiazer=latent_mu_kernel_initialiazer,
            latent_bias_initializer=latent_mu_bias_initializer,
        )
Ejemplo n.º 2
0
    def __init__(
        self,
        latent_dimension,
        embedding_dimensions,
        embedding_activation=tf.nn.relu,
        var_scope="binary_encoder",
        bn_before=False,
        bn_after=False,
        epsilon=0.0,
        embedding_kernel_initializer=tf.initializers.glorot_uniform(),
        embedding_bias_initializer=tf.initializers.zeros(),
        latent_kernel_initialiazer=tf.initializers.glorot_uniform(),
        latent_bias_initializer=tf.initializers.zeros(),
        embedding_dropout=0.0,
    ):
        Layer.__init__(self)
        Scope.__init__(self, var_scope)

        self.latent_dimension = latent_dimension
        self.embedding_dimensions = embedding_dimensions
        self.embedding_activation = embedding_activation
        self.bn_before = bn_before
        self.bn_after = bn_after
        self.epsilon = epsilon

        self.logits_encoder = Encoder(
            latent_dim=self.latent_dimension,
            embedding_dimensions=self.embedding_dimensions,
            activation=self.embedding_activation,
            var_scope=self.v_name("logits_encoder"),
            bn_before=self.bn_before,
            bn_after=self.bn_after,
            embedding_kernel_initializer=embedding_kernel_initializer,
            embedding_bias_initializer=embedding_bias_initializer,
            latent_kernel_initialiazer=latent_kernel_initialiazer,
            latent_bias_initializer=latent_bias_initializer,
            embedding_dropout=embedding_dropout,
        )
Ejemplo n.º 3
0
    def __init__(
        self,
        latent_dim,
        embedding_dimensions,
        activation,
        var_scope="encoder",
        bn_before=False,
        bn_after=False,
        embedding_kernel_initializer=tf.initializers.glorot_uniform(),
        embedding_bias_initializer=tf.initializers.zeros(),
        latent_kernel_initialiazer=tf.initializers.glorot_uniform(),
        latent_bias_initializer=tf.initializers.zeros(),
        embedding_dropout=0.0,
    ):

        # Activate V1 Type behaviour. Layer takes the dtype of its inputs
        V1_PARMS = {"autocast": False}

        Layer.__init__(self, **V1_PARMS)
        Scope.__init__(self, var_scope)
        self.latent_dim = latent_dim
        self.em_dim = embedding_dimensions

        # embeddings
        self.n_em = len(embedding_dimensions)
        self.embeddings = [None] * self.n_em
        self.embeddings_bn_before = [None] * self.n_em
        self.embeddings_bn_after = [None] * self.n_em
        self.activation = activation
        self.bn_before = bn_before
        self.bn_after = bn_after
        self.dropout_rate = embedding_dropout
        self.dropout = [None] * self.n_em

        for i, em in enumerate(self.em_dim):
            with tf.name_scope('embedding_{}'.format(i)):
                self.embeddings[i] = (tfk.layers.Dense(
                    units=em,
                    activation=None,
                    use_bias=True,
                    kernel_initializer=embedding_kernel_initializer,
                    bias_initializer=embedding_bias_initializer,
                    name='dense',
                    **V1_PARMS,
                ))
                if self.bn_before:
                    self.embeddings_bn_before[i] = (
                        tfk.layers.BatchNormalization(
                            axis=-1,
                            name='bn_before',
                            renorm=True,
                            **V1_PARMS,
                        ))

                if self.bn_after:
                    self.embeddings_bn_after[i] = (
                        tfk.layers.BatchNormalization(
                            axis=-1,
                            name="bn_after",
                            renorm=True,
                            **V1_PARMS,
                        ))

                if self.dropout_rate > 0.0:
                    self.dropout[i] = (tfk.layers.Dropout(
                        self.dropout_rate,
                        name='dropout',
                        **V1_PARMS,
                    ))

        self.latent_bn = tfk.layers.BatchNormalization(
            axis=-1,
            name=self.v_name("latent_bn"),
            **V1_PARMS,
        )
        self.latent = tfk.layers.Dense(
            units=self.latent_dim,
            activation=None,
            use_bias=True,
            kernel_initializer=latent_kernel_initialiazer,
            bias_initializer=latent_bias_initializer,
            name=self.v_name("latent_dense"),
            **V1_PARMS,
        )
Ejemplo n.º 4
0
    def __init__(
        self,
        components,
        input_dimension,
        embedding_dimensions,
        latent_dimensions,
        embedding_activations=tf.nn.relu,
        mixture_embedding_activations=None,
        mixture_embedding_dimensions=None,
        mixture_latent_dimensions=None,
        bn_before=False,
        bn_after=False,
        categorical_epsilon=0.0,
        latent_epsilon=0.0,
        latent_prior_epsilon=0.0,
        reconstruction_epsilon=0.0,
        kind="binary",
        learning_rate=0.01,
        gradient_clip=None,
        var_scope="gmvae",
        cat_embedding_kernel_initializer="glorot_uniform",
        cat_embedding_bias_initializer="zeros",
        cat_latent_kernel_initialiazer="glorot_uniform",
        cat_latent_bias_initializer="zeros",
        latent_mu_embedding_kernel_initializer="glorot_uniform",
        latent_mu_embedding_bias_initializer="zeros",
        latent_mu_latent_kernel_initialiazer="glorot_uniform",
        latent_mu_latent_bias_initializer="zeros",
        latent_var_embedding_kernel_initializer="glorot_uniform",
        latent_var_embedding_bias_initializer="zeros",
        latent_var_latent_kernel_initialiazer="glorot_uniform",
        latent_var_latent_bias_initializer="zeros",
        posterior_mu_embedding_kernel_initializer="glorot_uniform",
        posterior_mu_embedding_bias_initializer="zeros",
        posterior_mu_latent_kernel_initialiazer="glorot_uniform",
        posterior_mu_latent_bias_initializer="zeros",
        posterior_var_embedding_kernel_initializer="glorot_uniform",
        posterior_var_embedding_bias_initializer="zeros",
        posterior_var_latent_kernel_initialiazer="glorot_uniform",
        posterior_var_latent_bias_initializer="zeros",
        recon_embedding_kernel_initializer="glorot_uniform",
        recon_embedding_bias_initializer="zeros",
        recon_latent_kernel_initialiazer="glorot_uniform",
        recon_latent_bias_initializer="zeros",
        z_kl_lambda=1.0,
        c_kl_lambda=1.0,
        optimizer=tf.keras.optimizers.SGD(0.001),
        connected_weights=True,
        categorical_latent_embedding_dropout=0.0,
        mixture_latent_mu_embedding_dropout=0.0,
        mixture_latent_var_embedding_dropout=0.0,
        mixture_posterior_mu_dropout=0.0,
        mixture_posterior_var_dropout=0.0,
        recon_dropouut=0.0,
        latent_fixed_var=None,
    ):

        # instatiate
        Model.__init__(self)
        Scope.__init__(self, var_scope)

        self.mem_dim = (mixture_embedding_dimensions
                        if mixture_embedding_dimensions is not None else
                        self.embedding_dimensions)
        self.mem_act = (mixture_embedding_activations
                        if mixture_embedding_activations is not None else
                        self.embedding_activations)
        self.mem_lat = (mixture_latent_dimensions if mixture_latent_dimensions
                        is not None else self.latent_dimensions)

        self.cooling_distance = 0

        if cat_latent_bias_initializer is None:
            cat_latent_bias_initializer = tf.initializers.constant(
                np.log((1 / self.components) / (1 - 1 / self.components)))

        with tf.name_scope("categorical"):
            self.graph_qy_g_x = CategoricalEncoder(
                latent_dimension=components,
                embedding_dimensions=embedding_dimensions,
                embedding_activation=embedding_activations,
                var_scope=self.v_name("categorical_encoder"),
                bn_before=bn_before,
                bn_after=bn_after,
                epsilon=categorical_epsilon,
                embedding_kernel_initializer=cat_embedding_kernel_initializer,
                embedding_bias_initializer=cat_embedding_bias_initializer,
                latent_kernel_initialiazer=cat_latent_kernel_initialiazer,
                latent_bias_initializer=cat_latent_bias_initializer,
                embedding_dropout=categorical_latent_embedding_dropout,
            )

        self.marginal_autoencoder = MarginalAutoEncoder(
            self.input_dimension,
            self.mem_dim,
            self.mem_lat,
            kind=self.kind,
            var_scope=self.v_name("marginal_autoencoder"),
            latent_epsilon=self.latent_epsilon,
            reconstruction_epsilon=self.reconstruction_epsilon,
            embedding_activations=self.mem_act,
            latent_prior_epsilon=latent_prior_epsilon,
            latent_mu_embedding_kernel_initializer=
            latent_mu_embedding_kernel_initializer,
            latent_mu_embedding_bias_initializer=
            latent_mu_embedding_bias_initializer,
            latent_mu_latent_kernel_initialiazer=
            latent_mu_latent_kernel_initialiazer,
            latent_mu_latent_bias_initializer=latent_mu_latent_bias_initializer,
            latent_var_embedding_kernel_initializer=
            latent_var_embedding_kernel_initializer,
            latent_var_embedding_bias_initializer=
            latent_var_embedding_bias_initializer,
            latent_var_latent_kernel_initialiazer=
            latent_var_latent_kernel_initialiazer,
            latent_var_latent_bias_initializer=
            latent_var_latent_bias_initializer,
            posterior_mu_embedding_kernel_initializer=
            posterior_mu_embedding_kernel_initializer,
            posterior_mu_embedding_bias_initializer=
            posterior_mu_embedding_bias_initializer,
            posterior_mu_latent_kernel_initialiazer=
            posterior_mu_latent_kernel_initialiazer,
            posterior_mu_latent_bias_initializer=
            posterior_mu_latent_bias_initializer,
            posterior_var_embedding_kernel_initializer=
            posterior_var_embedding_kernel_initializer,
            posterior_var_embedding_bias_initializer=
            posterior_var_embedding_bias_initializer,
            posterior_var_latent_kernel_initialiazer=
            posterior_var_latent_kernel_initialiazer,
            posterior_var_latent_bias_initializer=
            posterior_var_latent_bias_initializer,
            recon_embedding_kernel_initializer=
            recon_embedding_kernel_initializer,
            recon_embedding_bias_initializer=recon_embedding_bias_initializer,
            recon_latent_kernel_initialiazer=recon_latent_kernel_initialiazer,
            recon_latent_bias_initializer=recon_latent_bias_initializer,
            connected_weights=connected_weights,
            latent_mu_embedding_dropout=mixture_latent_mu_embedding_dropout,
            latent_var_embedding_dropout=mixture_latent_var_embedding_dropout,
            posterior_mu_dropout=mixture_posterior_mu_dropout,
            posterior_var_dropout=mixture_posterior_var_dropout,
            recon_dropouut=recon_dropouut,
            latent_fixed_var=latent_fixed_var,
        )

        # self.optimizer = tf.keras.optimizers.Adam(self.learning_rate)
        self.optimizer = optimizer
Ejemplo n.º 5
0
    def __init__(
        self,
        descriminator_dimensions,
        components,
        input_dimension,
        embedding_dimensions,
        latent_dimensions,
        embedding_activations=tf.nn.relu,
        mixture_embedding_activations=None,
        mixture_embedding_dimensions=None,
        mixture_latent_dimensions=None,
        bn_before=False,
        bn_after=False,
        categorical_epsilon=0.0,
        latent_epsilon=0.0,
        latent_prior_epsilon=0.0,
        reconstruction_epsilon=0.0,
        kind="binary",
        learning_rate=0.01,
        gradient_clip=None,
        var_scope="gmvaegan",
        descr_embedding_kernel_initializer=tf.initializers.glorot_uniform(),
        descr_embedding_bias_initializer=tf.initializers.zeros(),
        descr_latent_kernel_initialiazer=tf.initializers.glorot_uniform(),
        descr_latent_bias_initializer=tf.initializers.zeros(),
        cat_embedding_kernel_initializer=tf.initializers.glorot_uniform(),
        cat_embedding_bias_initializer=tf.initializers.zeros(),
        cat_latent_kernel_initialiazer=tf.initializers.glorot_uniform(),
        cat_latent_bias_initializer=None,
        latent_mu_embedding_kernel_initializer=tf.initializers.glorot_uniform(
        ),
        latent_mu_embedding_bias_initializer=tf.initializers.zeros(),
        latent_mu_latent_kernel_initialiazer=tf.initializers.glorot_uniform(),
        latent_mu_latent_bias_initializer=tf.initializers.zeros(),
        latent_var_embedding_kernel_initializer=tf.initializers.glorot_uniform(
        ),
        latent_var_embedding_bias_initializer=tf.initializers.zeros(),
        latent_var_latent_kernel_initialiazer=tf.initializers.glorot_uniform(),
        latent_var_latent_bias_initializer=tf.initializers.constant(1.0),
        posterior_mu_embedding_kernel_initializer=tf.initializers.
        glorot_uniform(),
        posterior_mu_embedding_bias_initializer=tf.initializers.zeros(),
        posterior_mu_latent_kernel_initialiazer=tf.initializers.glorot_uniform(
        ),
        posterior_mu_latent_bias_initializer=tf.initializers.zeros(),
        posterior_var_embedding_kernel_initializer=tf.initializers.
        glorot_uniform(),
        posterior_var_embedding_bias_initializer=tf.initializers.zeros(),
        posterior_var_latent_kernel_initialiazer=tf.initializers.
        glorot_uniform(),
        posterior_var_latent_bias_initializer=tf.initializers.constant(1.0),
        recon_embedding_kernel_initializer=tf.initializers.glorot_uniform(),
        recon_embedding_bias_initializer=tf.initializers.zeros(),
        recon_latent_kernel_initialiazer=tf.initializers.glorot_uniform(),
        recon_latent_bias_initializer=tf.initializers.zeros(),
        z_kl_lambda=1.0,
        c_kl_lambda=1.0,
        vae_optimizer=tf.keras.optimizers.SGD(0.001),
        gan_optimizer=tf.keras.optimizers.SGD(0.001),
        dec_optimizer=tf.keras.optimizers.SGD(0.001),
        connected_weights=True,
        categorical_latent_embedding_dropout=0.0,
        mixture_latent_mu_embedding_dropout=0.0,
        mixture_latent_var_embedding_dropout=0.0,
        mixture_posterior_mu_dropout=0.0,
        mixture_posterior_var_dropout=0.0,
        recon_dropouut=0.0,
        latent_fixed_var=None,
    ):

        self.mem_dim = (mixture_embedding_dimensions
                        if mixture_embedding_dimensions is not None else
                        self.embedding_dimensions)
        self.mem_act = (mixture_embedding_activations
                        if mixture_embedding_activations is not None else
                        self.embedding_activations)
        self.mem_lat = (mixture_latent_dimensions if mixture_latent_dimensions
                        is not None else self.latent_dimensions)

        self.bn_before = bn_before
        self.bn_after = bn_after

        self.cat_eps = categorical_epsilon
        self.lat_eps = latent_epsilon
        self.rec_eps = reconstruction_epsilon

        self.kind = kind
        self.gradient_clip = gradient_clip
        self.learning_rate = learning_rate
        self.cooling_distance = 0

        Model.__init__(self)
        Scope.__init__(self, var_scope)

        self.gmvae = Gmvae(
            components=components,
            input_dimension=input_dimension,
            embedding_dimensions=embedding_dimensions,
            latent_dimensions=latent_dimensions,
            embedding_activations=embedding_activations,
            mixture_embedding_activations=mixture_embedding_activations,
            mixture_embedding_dimensions=mixture_embedding_dimensions,
            bn_before=bn_before,
            bn_after=bn_after,
            categorical_epsilon=categorical_epsilon,
            latent_epsilon=latent_epsilon,
            latent_prior_epsilon=latent_prior_epsilon,
            reconstruction_epsilon=reconstruction_epsilon,
            kind=kind,
            learning_rate=learning_rate,
            gradient_clip=gradient_clip,
            var_scope=self.v_name("gmvae"),
            cat_embedding_kernel_initializer=cat_embedding_kernel_initializer,
            cat_embedding_bias_initializer=cat_embedding_bias_initializer,
            cat_latent_kernel_initialiazer=cat_latent_kernel_initialiazer,
            cat_latent_bias_initializer=cat_latent_bias_initializer,
            latent_mu_embedding_kernel_initializer=
            latent_mu_embedding_kernel_initializer,
            latent_mu_embedding_bias_initializer=
            latent_mu_embedding_bias_initializer,
            latent_mu_latent_kernel_initialiazer=
            latent_mu_latent_kernel_initialiazer,
            latent_mu_latent_bias_initializer=latent_mu_latent_bias_initializer,
            latent_var_embedding_kernel_initializer=
            latent_var_embedding_kernel_initializer,
            latent_var_embedding_bias_initializer=
            latent_var_embedding_bias_initializer,
            latent_var_latent_kernel_initialiazer=
            latent_var_latent_kernel_initialiazer,
            latent_var_latent_bias_initializer=
            latent_var_latent_bias_initializer,
            posterior_mu_embedding_kernel_initializer=
            posterior_mu_embedding_kernel_initializer,
            posterior_mu_embedding_bias_initializer=
            posterior_mu_embedding_bias_initializer,
            posterior_mu_latent_kernel_initialiazer=
            posterior_mu_latent_kernel_initialiazer,
            posterior_mu_latent_bias_initializer=
            posterior_mu_latent_bias_initializer,
            posterior_var_embedding_kernel_initializer=
            posterior_var_embedding_kernel_initializer,
            posterior_var_embedding_bias_initializer=
            posterior_var_embedding_bias_initializer,
            posterior_var_latent_kernel_initialiazer=
            posterior_var_latent_kernel_initialiazer,
            posterior_var_latent_bias_initializer=
            posterior_var_latent_bias_initializer,
            recon_embedding_kernel_initializer=
            recon_embedding_kernel_initializer,
            recon_embedding_bias_initializer=recon_embedding_bias_initializer,
            recon_latent_kernel_initialiazer=recon_latent_kernel_initialiazer,
            recon_latent_bias_initializer=recon_latent_bias_initializer,
            z_kl_lambda=z_kl_lambda,
            c_kl_lambda=c_kl_lambda,
            optimizer=vae_optimizer,
            connected_weights=connected_weights,
            categorical_latent_embedding_dropout=
            categorical_latent_embedding_dropout,
            mixture_latent_mu_embedding_dropout=
            mixture_latent_mu_embedding_dropout,
            mixture_latent_var_embedding_dropout=
            mixture_latent_var_embedding_dropout,
            mixture_posterior_mu_dropout=mixture_posterior_mu_dropout,
            mixture_posterior_var_dropout=mixture_posterior_var_dropout,
            recon_dropouut=recon_dropouut,
            latent_fixed_var=latent_fixed_var,
        )
        self.descriminator = SigmoidEncoder(
            latent_dimension=1,
            embedding_dimensions=descriminator_dimensions,
            var_scope=self.v_name("graph_descriminator"),
            bn_before=bn_before,
            bn_after=bn_after,
            epsilon=0.0,
            embedding_kernel_initializer=descr_embedding_kernel_initializer,
            embedding_bias_initializer=descr_embedding_bias_initializer,
            latent_kernel_initialiazer=descr_latent_kernel_initialiazer,
            latent_bias_initializer=descr_latent_bias_initializer,
        )

        self.encoder_vars = (
            self.gmvae.marginal_autoencoder.graphs_qz_g_xy.trainable_variables
            + list(self.gmvae.graph_qy_g_x.trainable_variables))
        self.decoder_vars = self.gmvae.marginal_autoencoder.graphs_px_g_zy.trainable_variables
        self.gan_vars = self.descriminator.trainable_variables
Ejemplo n.º 6
0
    def __init__(
            self,
            input_dimension,
            embedding_dimensions,
            latent_dim,
            embedding_activations=tf.nn.tanh,
            kind="binary",
            var_scope="variational_autoencoder",
            bn_before=False,
            bn_after=False,
            latent_epsilon=0.0,
            reconstruction_epsilon=0.0,
            enc_mu_embedding_kernel_initializer="glorot_uniform",
            enc_mu_embedding_bias_initializer="zeros",
            enc_mu_latent_kernel_initialiazer="glorot_uniform",
            enc_mu_latent_bias_initializer="zeros",
            enc_var_embedding_kernel_initializer="glorot_uniform",
            enc_var_embedding_bias_initializer="zeros",
            enc_var_latent_kernel_initialiazer="glorot_uniform",
            enc_var_latent_bias_initializer="zeros",
            recon_embedding_kernel_initializer="glorot_uniform",
            recon_embedding_bias_initializer="zeros",
            recon_latent_kernel_initialiazer="glorot_uniform",
            recon_latent_bias_initializer="zeros",
            connected_weights=True,
            latent_mu_embedding_dropout=0.0,
            latent_var_embedding_dropout=0.0,
            recon_dropouut=0.0,
            latent_fixed_var=None,
            optimizer=tf.keras.optimizers.Adam(1e-3),
            gradient_clip=None,
    ):
        Model.__init__(self)
        Scope.__init__(self, var_scope)

        self.cooling_distance = 0

        # Encoder
        self.graph_qz_g_x = RandomNormalEncoder(
            latent_dimension=self.latent_dim,
            embedding_dimensions=self.embedding_dimensions,
            var_scope=self.v_name("graph_qz_g_x"),
            bn_before=self.bn_before,
            bn_after=self.bn_after,
            epsilon=self.latent_epsilon,
            embedding_mu_kernel_initializer=enc_mu_embedding_kernel_initializer,
            embedding_mu_bias_initializer=enc_mu_embedding_bias_initializer,
            latent_mu_kernel_initialiazer=enc_mu_latent_kernel_initialiazer,
            latent_mu_bias_initializer=enc_mu_latent_bias_initializer,
            embedding_var_kernel_initializer=
            enc_var_embedding_kernel_initializer,
            embedding_var_bias_initializer=enc_var_embedding_bias_initializer,
            latent_var_kernel_initialiazer=enc_var_latent_kernel_initialiazer,
            latent_var_bias_initializer=enc_var_latent_bias_initializer,
            connected_weights=connected_weights,
            embedding_mu_dropout=latent_mu_embedding_dropout,
            embedding_var_dropout=latent_var_embedding_dropout,
            fixed_var=latent_fixed_var,
        )

        # Decoder
        if self.kind == "binary":
            self.graph_px_g_z = SigmoidEncoder(
                latent_dimension=self.input_dimension,
                embedding_dimensions=self.embedding_dimensions[::-1],
                var_scope=self.v_name("graph_px_g_z"),
                bn_before=self.bn_before,
                bn_after=self.bn_after,
                epsilon=self.reconstruction_epsilon,
                embedding_kernel_initializer=recon_embedding_kernel_initializer,
                embedding_bias_initializer=recon_embedding_bias_initializer,
                latent_kernel_initialiazer=recon_latent_kernel_initialiazer,
                latent_bias_initializer=recon_latent_bias_initializer,
                embedding_dropout=recon_dropouut,
            )
        else:
            self.graph_px_g_z = RandomNormalEncoder(
                self.input_dimension,
                self.embedding_dimensions[::-1],
                var_scope=self.v_name("graph_px_g_z"),
                bn_before=self.bn_before,
                bn_after=self.bn_after,
                embedding_mu_dropout=recon_dropouut,
                embedding_var_dropout=recon_dropouut,
                fixed_var=1.0,
                epsilon=self.reconstruction_epsilon,
                embedding_mu_kernel_initializer=
                recon_embedding_kernel_initializer,
                embedding_mu_bias_initializer=recon_embedding_bias_initializer,
                latent_mu_kernel_initialiazer=recon_latent_kernel_initialiazer,
                latent_mu_bias_initializer=recon_latent_bias_initializer,
            )
Ejemplo n.º 7
0
    def __init__(
        self,
        latent_dimension,
        embedding_dimensions,
        embedding_activations=tf.nn.relu,
        var_scope="normal_encoder",
        bn_before=False,
        bn_after=False,
        epsilon=0.0,
        embedding_mu_kernel_initializer=tf.initializers.glorot_normal(),
        embedding_mu_bias_initializer=tf.initializers.zeros(),
        latent_mu_kernel_initialiazer=tf.initializers.glorot_normal(),
        latent_mu_bias_initializer=tf.initializers.zeros(),
        embedding_var_kernel_initializer=tf.initializers.glorot_normal(),
        embedding_var_bias_initializer=tf.initializers.zeros(),
        latent_var_kernel_initialiazer=tf.initializers.glorot_normal(),
        latent_var_bias_initializer=tf.initializers.ones(),
        fixed_mu=None,
        fixed_var=None,
        connected_weights=True,
        embedding_mu_dropout=0.0,
        embedding_var_dropout=0.0,
    ):
        """Probability Layer multivariate random normal

        Parameters:
        -----------
        latent_dimensions: int, Output dimension of the probabilistic layer
        embedding_dimensions: list, the dimension of each layer from input to
            output for the embedding layers of the encoder for mu and logvar
        embedding_activation = the tensorflow activation function to apply to 
            each layer of the embedding encoder for mu and logvar
        bn_before: bool, flag whether to apply batch normalisation before
            activation in the encoder for mu and logvar
        bn_after: bool, glag whether to apply batch normalisation after 
            activation in the encoder for mu and logvar
        fixed_mu: value (to be implemented) A fixed value for mu
        fixed_var: value (to be implemented) A fixed value for var
        connected_weights: bool, whether to train mu and var as a fully 
            connected network. 

        """
        Layer.__init__(self)
        Scope.__init__(self, var_scope)

        self.latent_dimension = latent_dimension
        self.embedding_dimensions = embedding_dimensions
        self.embedding_activation = embedding_activations
        self.bn_before = bn_before
        self.bn_after = bn_after
        self.epsilon = epsilon
        self.fixed_mu = fixed_mu
        self.fixed_var = fixed_var
        self.connected_weights = connected_weights

        if not self.connected_weights:
            self.mu = Encoder(
                latent_dim=self.latent_dimension,
                embedding_dimensions=self.embedding_dimensions,
                activation=self.embedding_activation,
                var_scope=self.v_name("mu_encoder"),
                bn_before=self.bn_before,
                bn_after=self.bn_after,
                embedding_kernel_initializer=embedding_mu_kernel_initializer,
                embedding_bias_initializer=embedding_mu_bias_initializer,
                latent_kernel_initialiazer=latent_mu_kernel_initialiazer,
                latent_bias_initializer=latent_mu_bias_initializer,
                embedding_dropout=embedding_mu_dropout,
            )
            self.logvar = Encoder(
                latent_dim=self.latent_dimension,
                embedding_dimensions=self.embedding_dimensions,
                activation=self.embedding_activation,
                var_scope=self.v_name("logvar_encoder"),
                bn_before=self.bn_before,
                bn_after=self.bn_after,
                embedding_kernel_initializer=embedding_var_kernel_initializer,
                embedding_bias_initializer=embedding_var_bias_initializer,
                latent_kernel_initialiazer=latent_var_kernel_initialiazer,
                latent_bias_initializer=latent_var_bias_initializer,
                embedding_dropout=embedding_var_dropout,
            )
        else:
            self.mu_logvar = Encoder(
                latent_dim=2 * self.latent_dimension,
                embedding_dimensions=[x for x in self.embedding_dimensions],
                activation=self.embedding_activation,
                var_scope=self.v_name("mu_encoder"),
                bn_before=self.bn_before,
                bn_after=self.bn_after,
                embedding_kernel_initializer=embedding_mu_kernel_initializer,
                embedding_bias_initializer=embedding_mu_bias_initializer,
                latent_kernel_initialiazer=latent_mu_kernel_initialiazer,
                latent_bias_initializer=latent_mu_bias_initializer,
                embedding_dropout=embedding_mu_dropout,
            )
Ejemplo n.º 8
0
    def __init__(
        self,
        input_dimension,
        embedding_dimensions,
        latent_dim,
        embedding_activations=tf.nn.tanh,
        kind="binary",
        var_scope="marginal_autoencoder",
        bn_before=False,
        bn_after=False,
        latent_epsilon=0.0,
        latent_prior_epsilon=0.0,
        reconstruction_epsilon=0.0,
        latent_mu_embedding_kernel_initializer="glorot_uniform",
        latent_mu_embedding_bias_initializer="zeros",
        latent_mu_latent_kernel_initialiazer="glorot_uniform",
        latent_mu_latent_bias_initializer="zeros",
        latent_var_embedding_kernel_initializer="glorot_uniform",
        latent_var_embedding_bias_initializer="zeros",
        latent_var_latent_kernel_initialiazer="glorot_uniform",
        latent_var_latent_bias_initializer="zeros",
        posterior_mu_embedding_kernel_initializer="glorot_uniform",
        posterior_mu_embedding_bias_initializer="zeros",
        posterior_mu_latent_kernel_initialiazer="glorot_uniform",
        posterior_mu_latent_bias_initializer="zeros",
        posterior_var_embedding_kernel_initializer="glorot_uniform",
        posterior_var_embedding_bias_initializer="zeros",
        posterior_var_latent_kernel_initialiazer="glorot_uniform",
        posterior_var_latent_bias_initializer="zeros",
        recon_embedding_kernel_initializer="glorot_uniform",
        recon_embedding_bias_initializer="zeros",
        recon_latent_kernel_initialiazer="glorot_uniform",
        recon_latent_bias_initializer="zeros",
        connected_weights=True,
        latent_mu_embedding_dropout=0.0,
        latent_var_embedding_dropout=0.0,
        posterior_mu_dropout=0.0,
        posterior_var_dropout=0.0,
        recon_dropouut=0.0,
        latent_fixed_var=None,
    ):
        Model.__init__(self)
        Scope.__init__(self, var_scope)

        with tf.name_scope("graph_qz_g_xy"):
            self.graphs_qz_g_xy = RandomNormalEncoder(
                latent_dimension=self.latent_dim,
                embedding_dimensions=self.embedding_dimensions,
                var_scope=self.v_name("graph_qz_g_xy"),
                bn_before=self.bn_before,
                bn_after=self.bn_after,
                epsilon=self.latent_epsilon,
                embedding_mu_kernel_initializer=
                latent_mu_embedding_kernel_initializer,
                embedding_mu_bias_initializer=
                latent_mu_embedding_bias_initializer,
                latent_mu_kernel_initialiazer=
                latent_mu_latent_kernel_initialiazer,
                latent_mu_bias_initializer=latent_mu_latent_bias_initializer,
                embedding_var_kernel_initializer=
                latent_var_embedding_kernel_initializer,
                embedding_var_bias_initializer=
                latent_var_embedding_bias_initializer,
                latent_var_kernel_initialiazer=
                latent_var_latent_kernel_initialiazer,
                latent_var_bias_initializer=latent_var_latent_bias_initializer,
                connected_weights=connected_weights,
                embedding_mu_dropout=latent_mu_embedding_dropout,
                embedding_var_dropout=latent_var_embedding_dropout,
                fixed_var=latent_fixed_var,
            )

        with tf.name_scope("graph_pz_g_y"):
            self.graphs_pz_g_y = RandomNormalEncoder(
                latent_dimension=self.latent_dim,
                embedding_dimensions=[],
                var_scope=self.v_name("graph_pz_g_y"),
                bn_before=self.bn_before,
                bn_after=self.bn_after,
                epsilon=self.latent_prior_epsilon,
                embedding_mu_kernel_initializer=
                posterior_mu_embedding_kernel_initializer,
                embedding_mu_bias_initializer=
                posterior_mu_embedding_bias_initializer,
                latent_mu_kernel_initialiazer=
                posterior_mu_latent_kernel_initialiazer,
                latent_mu_bias_initializer=posterior_mu_latent_bias_initializer,
                embedding_var_kernel_initializer=
                posterior_var_embedding_kernel_initializer,
                embedding_var_bias_initializer=
                posterior_var_embedding_bias_initializer,
                latent_var_kernel_initialiazer=
                posterior_var_latent_kernel_initialiazer,
                latent_var_bias_initializer=
                posterior_var_latent_bias_initializer,
                connected_weights=connected_weights,
                embedding_mu_dropout=posterior_mu_dropout,
                embedding_var_dropout=posterior_var_dropout,
                fixed_var=latent_fixed_var,
            )
        with tf.name_scope("graph_px_g_y"):
            if self.kind == "binary":
                self.graphs_px_g_zy = SigmoidEncoder(
                    latent_dimension=self.input_dimension,
                    embedding_dimensions=self.embedding_dimensions[::-1],
                    var_scope=self.v_name("graph_px_g_y"),
                    bn_before=self.bn_before,
                    bn_after=self.bn_after,
                    epsilon=self.reconstruction_epsilon,
                    embedding_kernel_initializer=
                    recon_embedding_kernel_initializer,
                    embedding_bias_initializer=recon_embedding_bias_initializer,
                    latent_kernel_initialiazer=recon_latent_kernel_initialiazer,
                    latent_bias_initializer=recon_latent_bias_initializer,
                    embedding_dropout=recon_dropouut,
                )
            else:
                self.graphs_px_g_zy = RandomNormalEncoder(
                    self.input_dimension,
                    self.embedding_dimensions[::-1],
                    bn_before=self.bn_before,
                    bn_after=self.bn_after,
                    embedding_mu_dropout=recon_dropouut,
                    embedding_var_dropout=recon_dropouut,
                    fixed_var=1.0,
                    epsilon=self.reconstruction_epsilon,
                    embedding_mu_kernel_initializer=
                    recon_embedding_kernel_initializer,
                    embedding_mu_bias_initializer=
                    recon_embedding_bias_initializer,
                    latent_mu_kernel_initialiazer=
                    recon_latent_kernel_initialiazer,
                    latent_mu_bias_initializer=recon_latent_bias_initializer,
                )
Ejemplo n.º 9
0
    def __init__(
        self,
        config: RandomNormalEncoder.Config,
        **kwargs,
    ):
        """Probability Layer multivariate random normal

        Parameters:
        -----------
        latent_dimensions: int, Output dimension of the probabilistic layer
        embedding_dimensions: list, the dimension of each layer from input to
            output for the embedding layers of the encoder for mu and logvar
        embedding_activation = the tensorflow activation function to apply to
            each layer of the embedding encoder for mu and logvar
        bn_before: bool, flag whether to apply batch normalisation before
            activation in the encoder for mu and logvar
        bn_after: bool, glag whether to apply batch normalisation after
            activation in the encoder for mu and logvar
        fixed_mu: value (to be implemented) A fixed value for mu
        fixed_var: value (to be implemented) A fixed value for var
        connected_weights: bool, whether to train mu and var as a fully
            connected network.

        """
        Layer.__init__(self, **kwargs)
        Scope.__init__(self, "normal_encoder")

        self.latent_dimension = config.latent_dim
        self.embedding_dimensions = config.embedding_dimensions
        self.embedding_activation = config.embedding_activations
        self.bn_before = config.bn_before
        self.bn_after = config.bn_after
        self.epsilon = config.epsilon
        self.fixed_mu = config.fixed_mu
        self.fixed_var = config.fixed_var
        self.connected_weights = config.connected_weights

        if not self.connected_weights:
            self.mu = Encoder(
                latent_dim=self.latent_dimension,
                embedding_dimensions=self.embedding_dimensions,
                activation=self.embedding_activation,
                var_scope=self.v_name("mu_encoder"),
                bn_before=self.bn_before,
                bn_after=self.bn_after,
                embedding_kernel_initializer=config.
                embedding_mu_kernel_initializer,
                embedding_bias_initializer=config.
                embedding_mu_bias_initializer,
                latent_kernel_initialiazer=config.
                latent_mu_kernel_initialiazer,
                latent_bias_initializer=config.latent_mu_bias_initializer,
                embedding_dropout=config.embedding_mu_dropout,
            )
            self.logvar = Encoder(
                latent_dim=self.latent_dimension,
                embedding_dimensions=self.embedding_dimensions,
                activation=self.embedding_activation,
                var_scope=self.v_name("logvar_encoder"),
                bn_before=self.bn_before,
                bn_after=self.bn_after,
                embedding_kernel_initializer=config.
                embedding_var_kernel_initializer,
                embedding_bias_initializer=config.
                embedding_var_bias_initializer,
                latent_kernel_initialiazer=config.
                latent_var_kernel_initialiazer,
                latent_bias_initializer=config.latent_var_bias_initializer,
                embedding_dropout=config.embedding_var_dropout,
            )
        else:
            self.mu_logvar = Encoder(
                latent_dim=2 * self.latent_dimension,
                embedding_dimensions=[x for x in self.embedding_dimensions],
                activation=self.embedding_activation,
                var_scope=self.v_name("mu_encoder"),
                bn_before=self.bn_before,
                bn_after=self.bn_after,
                embedding_kernel_initializer=config.
                embedding_mu_kernel_initializer,
                embedding_bias_initializer=config.
                embedding_mu_bias_initializer,
                latent_kernel_initialiazer=config.
                latent_mu_kernel_initialiazer,
                latent_bias_initializer=config.latent_mu_bias_initializer,
                embedding_dropout=config.embedding_mu_dropout,
            )
Ejemplo n.º 10
0
    def __init__(self, var_scope="GumbleSoftmax", axis=-1):

        Layer.__init__(self)
        Scope.__init__(self, var_scope)
        self.axis = axis