예제 #1
0
    def build_graph(self, encoder_layer_sizes, decoder_layer_sizes):
        with tf.variable_scope(self.name) as _:
            self.X = tf.placeholder(tf.float32,
                                    shape=(None, self.input_dim),
                                    name="X")
            self.epsilon = tf.placeholder(tf.float32,
                                          shape=(None, self.latent_dim),
                                          name="epsilon_Z")
            # make the priors trainable
            self.prior_means = tf.Variable(tf.random_normal(
                (self.n_classes, self.latent_dim), stddev=5.0),
                                           dtype=tf.float32,
                                           name="prior_means")
            self.prior_vars = tf.Variable(tf.ones(
                (self.n_classes, self.latent_dim)),
                                          dtype=tf.float32,
                                          name="prior_vars")
            self.prior_weights = tf.Variable(tf.ones(
                (self.n_classes)) / self.n_classes,
                                             dtype=tf.float32,
                                             name="prior_weights")

            self.encoder_network = FeedForwardNetwork(name="vae_encoder")
            self.mean, self.log_var = self.encoder_network.build(
                [("mean", self.latent_dim),
                 ("log_var", self.latent_dim)], encoder_layer_sizes, self.X)

            self.latent_variables = dict()
            self.latent_variables.update({
                "Z": (priors.NormalFactorial("representation",
                                             self.latent_dim), self.epsilon, {
                                                 "mean": self.mean,
                                                 "log_var": self.log_var,
                                             })
            })

            lv, eps, params = self.latent_variables["Z"]
            self.Z = lv.inverse_reparametrize(eps, params)

            self.cluster_weights = self.find_cluster_weights()

            self.decoder_network = FeedForwardNetwork(name="vae_decoder")
            self.decoded_X = self.decoder_network.build(
                [("vae_decoder", self.input_dim)], decoder_layer_sizes, self.Z)

            if self.input_type == "binary":
                self.reconstructed_X = tf.nn.sigmoid(self.decoded_X)
            elif self.input_type == "real":
                self.reconstructed_X = self.decoded_X
            else:
                raise NotImplementedError
예제 #2
0
    def build_graph(self, encoder_layer_sizes, decoder_layer_sizes):
        with tf.variable_scope(self.name) as _:
            self.X = tf.placeholder(tf.float32, shape=(
                None, self.input_dim), name="X")
            self.epsilon = tf.placeholder(
                tf.float32, shape=(None, self.latent_dim), name="reparametrization_variable"
            )

            encoder_network = FeedForwardNetwork(
                name="encoder_network",
                activation=self.activation,
                initializer=self.initializer
            )

            self.mean, self.log_var = encoder_network.build(
                [("mean", self.latent_dim), ("log_var", 10)],
                encoder_layer_sizes, self.X
            )

            self.latent_variables = {
                "Z": (
                    priors.NormalFactorial(
                        "latent_representation", self.latent_dim
                    ), self.epsilon,
                    {"mean": self.mean, "log_var": self.log_var}
                )
            }

            lv, eps, params = self.latent_variables["Z"]
            self.Z = lv.inverse_reparametrize(eps, params)

            self.decoder_network = FeedForwardNetwork(
                name="decoder_network",
                activation=self.activation,
                initializer=self.initializer
            )
            self.decoded_X = self.decoder_network.build(
                [("decoded_X", self.input_dim)
                 ], decoder_layer_sizes, self.Z
            )

            if self.input_type is None:
                self.reconstructed_X = self.decoded_X
            elif self.input_type == "real":
                self.reconstructed_X = self.decoded_X
            elif self.input_type == "binary":
                self.reconstructed_X = tf.nn.sigmoid(self.decoded_X)

        return self
예제 #3
0
    def build_graph(self, encoder_layer_sizes, decoder_layer_sizes):
        with tf.variable_scope(self.name) as _:
            self.X = tf.placeholder(tf.float32, name="X")

            self.A = tf.placeholder(
                tf.float32, shape=(None, None),
                name="adjacency_matrix"
            )
            self.A_orig = tf.placeholder(
                tf.float32, shape=(None, None),
                name="adjacency_matrix_orig"
            )

            self.epsilon_real = tf.placeholder(
                tf.float32, shape=(None, self.latent_dim),
                name="real_reparametrization_variable"
            )
            self.epsilon_binary = tf.placeholder(
                tf.float32, shape=(None, self.latent_dim),
                name="binary_reparametrization_variable"
            )
            self.rbm_prior_samples = tf.placeholder(
                tf.float32, shape=(None, self.latent_dim), name="rbm_prior_samples"
            )

            self.temperature = tf.placeholder_with_default(
                0.2, shape=(), name="temperature"
            )
            self.dropout = tf.placeholder_with_default(
                0.0, shape=(), name="dropout"
            )

            self.bias = tf.get_variable(
                "bias", shape=(1,), dtype=tf.float32,
                initializer=tf.initializers.zeros
            )

            real_encoder_network = GraphConvolutionalNetwork(
                name="real_encoder_network",
                dropout=self.dropout,
                activation=self.activation,
                initializer=self.initializer
            )
            self.mean, self.log_var = real_encoder_network.build(
                self.input_dim,
                [("mean", self.latent_dim), ("log_var", self.latent_dim)],
                encoder_layer_sizes, self.A, self.X
            )

            binary_encoder_network = GraphConvolutionalNetwork(
                name="binary_encoder_network",
                dropout=self.dropout,
                activation=self.activation,
                initializer=self.initializer
            )
            self.log_ratios = binary_encoder_network.build(
                self.input_dim,
                [("log_ratios", self.latent_dim)],
                encoder_layer_sizes, self.A, self.X
            )

            self.latent_variables = {
                "Z_real": (
                    priors.NormalFactorial(
                        "latent_representation", self.latent_dim
                    ), self.epsilon_real,
                    {"mean": self.mean, "log_var": self.log_var}
                ),
                "Z_binary": (
                    priors.RBMPrior(
                        "rbm_prior", self.visible_dim, self.hidden_dim, beta=10.0, trainable=True
                    ), self.epsilon_binary,
                    {"log_ratios": self.log_ratios, "temperature": self.temperature,
                        "samples": self.rbm_prior_samples}
                )
            }

            lv, eps, params = self.latent_variables["Z_real"]
            self.Z_real = lv.inverse_reparametrize(eps, params)

            lv, eps, params = self.latent_variables["Z_binary"]
            self.Z_binary = lv.inverse_reparametrize(eps, params)
            self.latent_variables["Z_binary"][2]["zeta"] = self.Z_binary

            self.Z = self.Z_binary * self.Z_real

            # features_dim = decoder_layer_sizes[-1]
            # decoder_layer_sizes = decoder_layer_sizes[:-1]

            # self.decoder_network = FeedForwardNetwork(
            #     name="decoder_network",
            #     activation=self.activation,
            #     initializer=self.initializer
            # )
            # self.node_features = self.decoder_network.build(
            #     [("node_features", features_dim)], decoder_layer_sizes, self.Z
            # )

            # self.link_weights = tf.matmul(
            #     self.node_features, self.node_features, transpose_b=True
            # ) + self.bias

            self.link_weights = tf.matmul(
                self.Z, self.Z, transpose_b=True
            ) + self.bias

            self.preds = tf.reshape(self.link_weights, (-1,))
            self.labels = tf.reshape(self.A_orig, (-1,))

            correct_prediction = tf.equal(
                tf.cast(tf.greater_equal(
                    tf.sigmoid(self.preds), 0.5), tf.int32),
                tf.cast(self.labels, tf.int32)
            )
            self.accuracy = tf.reduce_mean(
                tf.cast(correct_prediction, tf.float32))

        return self
예제 #4
0
    def build_graph(self, encoder_layer_sizes, decoder_layer_sizes):
        with tf.variable_scope(self.name) as _:
            self.X = tf.placeholder(tf.float32, name="X")

            self.A = tf.placeholder(
                tf.float32, shape=(None, None),
                name="adjacency_matrix"
            )
            self.A_orig = tf.placeholder(
                tf.float32, shape=(None, None),
                name="adjacency_matrix_orig"
            )

            self.bias = tf.get_variable(
                "bias", shape=(1,), dtype=tf.float32,
                initializer=tf.initializers.zeros
            )

            self.epsilon = tf.placeholder(
                tf.float32, shape=(None, self.latent_dim),
                name="reparametrization_variable"
            )

            self.dropout = tf.placeholder_with_default(
                0.0, shape=(), name="dropout"
            )

            encoder_network = GraphConvolutionalNetwork(
                name="encoder_network",
                dropout=self.dropout,
                activation=self.activation,
                initializer=self.initializer
            )
            self.mean, self.log_var = encoder_network.build(
                self.input_dim,
                [("mean", self.latent_dim), ("log_var", self.latent_dim)],
                encoder_layer_sizes, self.A, self.X
            )

            self.latent_variables = {
                "Z": (
                    priors.NormalFactorial(
                        "latent_representation", self.latent_dim
                    ), self.epsilon,
                    {"mean": self.mean, "log_var": self.log_var}
                )
            }

            lv, eps, params = self.latent_variables["Z"]
            self.Z = lv.inverse_reparametrize(eps, params)

            features_dim = decoder_layer_sizes[-1]
            decoder_layer_sizes = decoder_layer_sizes[:-1]

            self.decoder_network = FeedForwardNetwork(
                name="decoder_network",
                activation=self.activation,
                initializer=self.initializer
            )
            self.node_features = self.decoder_network.build(
                [("node_features", features_dim)], decoder_layer_sizes, self.Z
            )

            self.link_weights = tf.matmul(
                self.node_features, self.node_features, transpose_b=True
            ) + self.bias

            self.preds = tf.reshape(self.link_weights, (-1,))
            self.labels = tf.reshape(self.A_orig, (-1,))

            correct_prediction = tf.equal(
                tf.cast(tf.greater_equal(
                    tf.sigmoid(self.preds), 0.5), tf.int32),
                tf.cast(self.labels, tf.int32)
            )
            self.accuracy = tf.reduce_mean(
                tf.cast(correct_prediction, tf.float32))

        return self
예제 #5
0
    def build_graph(self, encoder_layer_sizes, decoder_layer_sizes):
        with tf.variable_scope(self.name) as _:
            self.X = tf.placeholder(tf.float32,
                                    shape=(None, self.input_dim),
                                    name="X")
            self.epsilon = tf.placeholder(tf.float32,
                                          shape=(None, self.latent_dim),
                                          name="epsilon_Z")
            self.cluster = tf.placeholder(tf.float32,
                                          shape=(None, 1, self.n_classes),
                                          name="epsilon_C")
            self.temperature = tf.placeholder_with_default(1.0,
                                                           shape=None,
                                                           name="temperature")

            self.latent_variables = dict()

            self.c_encoder_network = FeedForwardNetwork(
                name="c/encoder_network")
            self.logits = self.c_encoder_network.build(
                [("logits", self.n_classes)], encoder_layer_sizes["C"], self.X)

            self.latent_variables.update({
                "C": (priors.DiscreteFactorial("cluster", 1,
                                               self.n_classes), self.cluster, {
                                                   "logits": self.logits,
                                                   "temperature":
                                                   self.temperature
                                               })
            })
            lv, eps, params = self.latent_variables["C"]
            self.C = lv.inverse_reparametrize(eps, params)

            self.means = list()
            self.log_vars = list()

            self.z_encoder_networks = [
                FeedForwardNetwork(name="z/encoder_network_%d" % k)
                for k in range(self.n_classes)
            ]
            for k in range(self.n_classes):
                mean, log_var = self.z_encoder_networks[k].build(
                    [("mean", self.latent_dim), ("log_var", self.latent_dim)],
                    encoder_layer_sizes["Z"], self.X)

                self.means.append(mean)
                self.log_vars.append(log_var)

            self.mean = tf.add_n([
                self.means[i] * self.C[:, :, i] for i in range(self.n_classes)
            ])
            self.log_var = tf.log(
                tf.add_n([
                    tf.exp(self.log_vars[i]) * self.C[:, :, i]
                    for i in range(self.n_classes)
                ]))

            self.latent_variables.update({
                "Z": (priors.NormalFactorial("representation",
                                             self.latent_dim), self.epsilon, {
                                                 "mean": self.mean,
                                                 "log_var": self.log_var
                                             })
            })

            lv, eps, params = self.latent_variables["Z"]
            self.Z = lv.inverse_reparametrize(eps, params)

            self.decoder_network = FeedForwardNetwork(name="decoder_network")
            self.decoded_X = self.decoder_network.build(
                [("decoded_X", self.input_dim)], decoder_layer_sizes, self.Z)

            if self.input_type == "binary":
                self.reconstructed_X = tf.nn.sigmoid(self.decoded_X)
            elif self.input_type == "real":
                self.reconstructed_X = self.decoded_X
            else:
                raise NotImplementedError

        return self