Exemple #1
0
    def D_x(self, inputs, units):
        # with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):

        # if type(inputs) is tuple:
        #     for x in inputs:
        #         print("inputs hape: ", x.shape)
        outputs0 = self.discriminator(inputs,
                                      units=units[:-1],
                                      training=self.training,
                                      dropout_rate=self.dropout_rate)

        outputs1 = multi_dense_layers(outputs0,
                                      units=units[-1],
                                      activation=tf.nn.tanh,
                                      training=self.training,
                                      dropout_rate=self.dropout_rate)

        if self.batch_discriminator:
            outputs_batch = keras.layers.Dense(units[-2] // 8,
                                               activation=tf.tanh)(outputs0)
            outputs_batch = keras.layers.Dense(
                units[-2] // 8,
                activation=tf.nn.tanh)(tf.reduce_mean(outputs_batch,
                                                      0,
                                                      keepdims=True), )
            outputs_batch = tf.tile(outputs_batch, (tf.shape(outputs0)[0], 1))

            outputs1 = tf.concat((outputs1, outputs_batch), -1)

        outputs = keras.layers.Dense(units=1)(outputs1)

        return outputs, outputs1
    def D_x(self, inputs, units):
        with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):
            graph_readouts = self.discriminator(  # units: (GCN units, readout unit)
                inputs,
                units=units[:-1],
                training=self.training,
                dropout_rate=self.dropout_rate)

            graph_features = multi_dense_layers(graph_readouts,
                                                units=units[-1],
                                                activation=tf.nn.tanh,
                                                training=self.training,
                                                dropout_rate=self.dropout_rate)

            adj_tensor, _, node_tensor = inputs
            flat_adj_tensor = tf.squeeze(tf.layers.dense(adj_tensor, 1),
                                         axis=-1)
            batch_adj_dev = reduce_dev(flat_adj_tensor, axis=0, lam1=0.6)
            batch_node_dev = reduce_dev(node_tensor, axis=0, lam1=0.6)
            batch_features = tf.concat(
                (flatten(batch_adj_dev), flatten(batch_node_dev)), -1)
            batch_features = tf.layers.dense(batch_features,
                                             units=units[-2] // 8,
                                             activation=tf.nn.relu)

            batch_features = tf.tile(batch_features,
                                     (tf.shape(graph_readouts)[0], 1))
            final_features = tf.concat((graph_features, batch_features),
                                       axis=-1)

            logits = tf.layers.dense(final_features, units=1)

        return logits, graph_features
Exemple #3
0
    def D_x(self, inputs, units):
        with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):
            outputs0 = self.discriminator(inputs,
                                          units=units[:-1],
                                          training=self.training,
                                          dropout_rate=self.dropout_rate)

            outputs1 = multi_dense_layers(outputs0,
                                          units=units[-1],
                                          activation=tf.nn.tanh,
                                          training=self.training,
                                          dropout_rate=self.dropout_rate)

            if self.batch_discriminator:
                outputs_batch = tf.layers.dense(outputs0,
                                                units[-2] // 8,
                                                activation=tf.tanh)
                outputs_batch = tf.layers.dense(tf.reduce_mean(outputs_batch,
                                                               0,
                                                               keep_dims=True),
                                                units[-2] // 8,
                                                activation=tf.nn.tanh)
                outputs_batch = tf.tile(outputs_batch,
                                        (tf.shape(outputs0)[0], 1))

                outputs1 = tf.concat((outputs1, outputs_batch), -1)

            outputs = tf.layers.dense(outputs1, units=1)

        return outputs, outputs1
Exemple #4
0
def decoder_dot(inputs,
                units,
                vertexes,
                edges,
                nodes,
                training,
                dropout_rate=0.):
    output = multi_dense_layers(inputs,
                                units[:-1],
                                activation=tf.nn.tanh,
                                dropout_rate=dropout_rate,
                                training=training)
    with tf.variable_scope('edges_logits'):
        edges_logits = tf.reshape(
            tf.layers.dense(inputs=output,
                            units=edges * vertexes * units[-1],
                            activation=None), (-1, edges, vertexes, units[-1]))
        edges_logits = tf.transpose(
            tf.matmul(edges_logits, tf.matrix_transpose(edges_logits)),
            (0, 2, 3, 1))
        edges_logits = tf.layers.dropout(edges_logits,
                                         dropout_rate,
                                         training=training)
    with tf.variable_scope('nodes_logits'):
        nodes_logits = tf.layers.dense(inputs=output,
                                       units=vertexes * nodes,
                                       activation=None)
        nodes_logits = tf.reshape(nodes_logits, (-1, vertexes, nodes))
        nodes_logits = tf.layers.dropout(nodes_logits,
                                         dropout_rate,
                                         training=training)
    return edges_logits, nodes_logits
    def D_x(self, inputs, units):
        with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):
            graph_readouts = self.discriminator(  # units: (GCN units, readout unit)
                inputs,
                units=units[:-1],
                training=self.training,
                dropout_rate=self.dropout_rate)

            graph_features = multi_dense_layers(graph_readouts,
                                                units=units[-1],
                                                activation=tf.nn.tanh,
                                                training=self.training,
                                                dropout_rate=self.dropout_rate)

            if self.batch_discriminator:
                batch_features = tf.layers.dense(graph_readouts,
                                                 units[-2] // 8,
                                                 activation=tf.tanh)
                batch_features = tf.layers.dense(tf.reduce_mean(
                    batch_features, 0, keep_dims=True),
                                                 units[-2] // 8,
                                                 activation=tf.nn.tanh)

                batch_features = tf.tile(batch_features,
                                         (tf.shape(graph_readouts)[0], 1))
                final_features = tf.concat((graph_features, batch_features),
                                           -1)
            else:
                final_features = graph_features

            logits = tf.layers.dense(final_features, units=1)

        return logits, graph_features
    def D_x(self, inputs, units):
        with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):
            graph_readouts = self.discriminator(  # units: (GCN units, readout unit)
                inputs,
                units=units[:-1],
                training=self.training,
                dropout_rate=self.dropout_rate)

            batch_dev = reduce_dev(inputs=graph_readouts, axis=0, lam1=0.6)
            batch_mean = tf.reduce_mean(graph_readouts, axis=0, keepdims=True)
            batch_features = tf.concat([batch_mean, batch_dev], axis=-1)
            batch_features = tf.layers.dense(batch_features,
                                             units=units[-2] // 8,
                                             activation=tf.nn.relu)

            batch_features = tf.tile(batch_features,
                                     (tf.shape(graph_readouts)[0], 1))
            final_features = tf.concat((graph_readouts, batch_features),
                                       axis=-1)

            final_features = multi_dense_layers(final_features,
                                                units=units[-1],
                                                activation=tf.nn.tanh,
                                                training=self.training,
                                                dropout_rate=self.dropout_rate)

            logits = tf.layers.dense(final_features, units=1)

        return logits, graph_readouts
Exemple #7
0
    def V_x(self, inputs, units):
        with tf.variable_scope('value', reuse=tf.AUTO_REUSE):
            outputs = self.discriminator(inputs, units=units[:-1], training=self.training,
                                         dropout_rate=self.dropout_rate)

            outputs = multi_dense_layers(outputs, units=units[-1], activation=tf.nn.tanh, training=self.training,
                                         dropout_rate=self.dropout_rate)

            outputs = tf.layers.dense(outputs, units=1, activation=tf.nn.sigmoid)

        return outputs
Exemple #8
0
def decoder_adj(inputs, units, vertexes, edges, nodes, training, dropout_rate=0.):
    output = multi_dense_layers(inputs, units, activation=tf.nn.tanh, dropout_rate=dropout_rate, training=training)

    # with tf.variable_scope('edges_logits'):
    edges_logits = tf.reshape(keras.layers.Dense(units=edges * vertexes * vertexes,
                                              activation=None)(output), (-1, edges, vertexes, vertexes))

    edges_logits = tf.transpose((edges_logits + tf.transpose(edges_logits, (0, 1, 3, 2))) / 2, (0, 2, 3, 1))

    edges_logits = keras.layers.Dropout(dropout_rate)(edges_logits, training=training)

    # with tf.variable_scope('nodes_logits'):
    nodes_logits = keras.layers.Dense(units=vertexes * nodes, activation=None)(inputs=output)
    nodes_logits = tf.reshape(nodes_logits, (-1, vertexes, nodes))
    nodes_logits = keras.layers.Dropout(dropout_rate)(nodes_logits, training=training)

    return edges_logits, nodes_logits
    def D_x(self, inputs, units):
        with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):
            graph_readouts = self.discriminator(  # units: (GCN units, readout unit)
                inputs,
                units=units[:-1],
                training=self.training,
                dropout_rate=self.dropout_rate)

            graph_features = multi_dense_layers(graph_readouts,
                                                units=units[-1],
                                                activation=tf.nn.tanh,
                                                training=self.training,
                                                dropout_rate=self.dropout_rate)

            pac_features = tf.reduce_max(graph_features, axis=0, keepdims=True)
            logits = tf.layers.dense(pac_features, units=1)

        return logits, graph_features
    def V_x(self, inputs, units):  # output reward estimations

        with tf.variable_scope('value', reuse=tf.AUTO_REUSE):
            graph_readouts = self.discriminator(  # units: (GCN units, readout unit)
                inputs,
                units=units[:-1],
                training=self.training,
                dropout_rate=self.dropout_rate)

            graph_readouts = multi_dense_layers(graph_readouts,
                                                units=units[-1],
                                                activation=tf.nn.tanh,
                                                training=self.training,
                                                dropout_rate=self.dropout_rate)

            logits = tf.layers.dense(graph_readouts,
                                     units=1,
                                     activation=tf.nn.sigmoid)

        return logits
Exemple #11
0
def decoder_rnn(inputs,
                units,
                vertexes,
                edges,
                nodes,
                training,
                dropout_rate=0.):
    output = multi_dense_layers(inputs,
                                units[:-1],
                                activation=tf.nn.tanh,
                                dropout_rate=dropout_rate,
                                training=training)

    with tf.variable_scope('edges_logits'):
        edges_logits, _ = tf.nn.dynamic_rnn(
            cell=tf.nn.rnn_cell.LSTMCell(units[-1] * 4),
            inputs=tf.tile(tf.expand_dims(output, axis=1), (1, vertexes, 1)),
            dtype=output.dtype)

        edges_logits = tf.layers.dense(edges_logits, edges * units[-1])
        edges_logits = tf.transpose(
            tf.reshape(edges_logits, (-1, vertexes, edges, units[-1])),
            (0, 2, 1, 3))
        edges_logits = tf.transpose(
            tf.matmul(edges_logits, tf.matrix_transpose(edges_logits)),
            (0, 2, 3, 1))
        edges_logits = tf.layers.dropout(edges_logits,
                                         dropout_rate,
                                         training=training)

    with tf.variable_scope('nodes_logits'):
        nodes_logits, _ = tf.nn.dynamic_rnn(
            cell=tf.nn.rnn_cell.LSTMCell(units[-1] * 4),
            inputs=tf.tile(tf.expand_dims(output, axis=1), (1, vertexes, 1)),
            dtype=output.dtype)
        nodes_logits = tf.layers.dense(nodes_logits, nodes)
        nodes_logits = tf.layers.dropout(nodes_logits,
                                         dropout_rate,
                                         training=training)

    return edges_logits, nodes_logits
Exemple #12
0
    def __init__(self,
                 vertexes,
                 edges,
                 nodes,
                 features,
                 embedding_dim,
                 encoder_units,
                 decoder_units,
                 variational,
                 encoder,
                 decoder,
                 soft_gumbel_softmax=False,
                 hard_gumbel_softmax=False,
                 with_features=True):
        """

        :param vertexes:  the atoms num of molecular
        :param edges: the bond num
        :param nodes: the atom num type
        :param features:
        :param embedding_dim:
        :param encoder_units:
        :param decoder_units:
        :param variational:
        :param encoder:
        :param decoder:
        :param soft_gumbel_softmax:
        :param hard_gumbel_softmax:
        :param with_features:
        """
        self.vertexes, self.nodes, self.edges, self.embedding_dim, self.encoder, self.decoder = \
            vertexes, nodes, edges, embedding_dim, encoder, decoder

        self.training = tf.placeholder_with_default(False, shape=())
        self.variational = tf.placeholder_with_default(variational, shape=())
        self.soft_gumbel_softmax = tf.placeholder_with_default(
            soft_gumbel_softmax, shape=())
        self.hard_gumbel_softmax = tf.placeholder_with_default(
            hard_gumbel_softmax, shape=())
        self.temperature = tf.placeholder_with_default(1., shape=())

        self.edges_labels = tf.placeholder(dtype=tf.int64,
                                           shape=(None, vertexes, vertexes))
        self.nodes_labels = tf.placeholder(dtype=tf.int64,
                                           shape=(None, vertexes))
        self.node_features = tf.placeholder(dtype=tf.float32,
                                            shape=(None, vertexes, features))

        self.rewardR = tf.placeholder(dtype=tf.float32, shape=(None, 1))
        self.rewardF = tf.placeholder(dtype=tf.float32, shape=(None, 1))
        self.adjacency_tensor = tf.one_hot(self.edges_labels,
                                           depth=edges,
                                           dtype=tf.float32)
        self.node_tensor = tf.one_hot(self.nodes_labels,
                                      depth=nodes,
                                      dtype=tf.float32)

        with tf.variable_scope('encoder'):
            outputs = self.encoder(
                (self.adjacency_tensor,
                 self.node_features if with_features else None,
                 self.node_tensor),
                units=encoder_units[:-1],
                training=self.training,
                dropout_rate=0.)

            outputs = multi_dense_layers(outputs,
                                         units=encoder_units[-1],
                                         activation=tf.nn.tanh,
                                         training=self.training,
                                         dropout_rate=0.)

            self.embeddings_mean = tf.layers.dense(outputs,
                                                   embedding_dim,
                                                   activation=None)
            self.embeddings_std = tf.layers.dense(outputs,
                                                  embedding_dim,
                                                  activation=tf.nn.softplus)
            self.q_z = tf.distributions.Normal(self.embeddings_mean,
                                               self.embeddings_std)

            self.embeddings = tf.cond(self.variational,
                                      lambda: self.q_z.sample(),
                                      lambda: self.embeddings_mean)

        with tf.variable_scope('decoder'):
            self.edges_logits, self.nodes_logits = self.decoder(
                self.embeddings,
                decoder_units,
                vertexes,
                edges,
                nodes,
                training=self.training,
                dropout_rate=0.)

        with tf.name_scope('outputs'):
            (self.edges_softmax, self.nodes_softmax), \
            (self.edges_argmax, self.nodes_argmax), \
            (self.edges_gumbel_logits, self.nodes_gumbel_logits), \
            (self.edges_gumbel_softmax, self.nodes_gumbel_softmax), \
            (self.edges_gumbel_argmax, self.nodes_gumbel_argmax) = postprocess_logits(
                (self.edges_logits, self.nodes_logits), temperature=self.temperature)

            self.edges_hat = tf.case(
                {
                    self.soft_gumbel_softmax:
                    lambda: self.edges_gumbel_softmax,
                    self.hard_gumbel_softmax:
                    lambda: tf.stop_gradient(self.edges_gumbel_argmax - self.
                                             edges_gumbel_softmax) + self.
                    edges_gumbel_softmax
                },
                default=lambda: self.edges_softmax,
                exclusive=True)

            self.nodes_hat = tf.case(
                {
                    self.soft_gumbel_softmax:
                    lambda: self.nodes_gumbel_softmax,
                    self.hard_gumbel_softmax:
                    lambda: tf.stop_gradient(self.nodes_gumbel_argmax - self.
                                             nodes_gumbel_softmax) + self.
                    nodes_gumbel_softmax
                },
                default=lambda: self.nodes_softmax,
                exclusive=True)

        with tf.name_scope('V_x_real'):
            self.value_logits_real = self.V_x(
                (self.adjacency_tensor, None, self.node_tensor),
                units=encoder_units)
        with tf.name_scope('V_x_fake'):
            self.value_logits_fake = self.V_x(
                (self.edges_hat, None, self.nodes_hat), units=encoder_units)