Exemplo n.º 1
0
    def call(self, inputs):
        data_a, data_x, data = inputs
        self.embeddings = self.sample_z(self.batch)

        # generate
        self.edges_logits, self.nodes_logits = self.decoder(
            self.embeddings,
            self.decoder_units,
            self.vertexes,
            self.edges,
            self.nodes,
            training=False,
            dropout_rate=self.dropout_rate)
        (self.edges_softmax, self.nodes_softmax), \
        (self.edges_argmax, self.nodes_argmax), \
        (self.edges_gumbel_logits, self.nodes_gumbel_logits), \
        (self.edges_gumbel_softmax, self.nodes_gumbel_softmax), \
        (self.edges_gumbel_argmax, self.nodes_gumbel_argmax) = postprocess_logits(
            (self.edges_logits, self.nodes_logits), temperature=self.temperature)

        self.adjacency_tensor = tf.one_hot(data_a,
                                           depth=self.edges,
                                           dtype=tf.float32)
        self.node_tensor = tf.one_hot(data_x,
                                      depth=self.nodes,
                                      dtype=tf.float32)

        # self.rewardR = reward(mols)  # 采样分子

        n, e = self.nodes_gumbel_argmax, self.edges_gumbel_argmax
        n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
        gen_mols = [
            data.matrices2mol(n_, e_, strict=True) for n_, e_ in zip(n, e)
        ]
        self.rewardF = reward(gen_mols)  # 生成分子

        self.edges_hat = tf.case(
            [
                (self.soft_gumbel_softmax,
                 lambda: self.edges_gumbel_softmax),  #
                (self.hard_gumbel_softmax, lambda: tf.stop_gradient(
                    self.edges_gumbel_argmax - self.edges_gumbel_softmax) +
                 self.edges_gumbel_softmax)
            ],
            #
            default=lambda: self.edges_softmax,
            exclusive=True)

        self.nodes_hat = tf.case(
            [(self.soft_gumbel_softmax, lambda: self.nodes_gumbel_softmax),
             (self.hard_gumbel_softmax, lambda: tf.stop_gradient(
                 self.nodes_gumbel_argmax - self.nodes_gumbel_softmax) + self.
              nodes_gumbel_softmax)],
            default=lambda: self.nodes_softmax,
            exclusive=True)

        self.logits_real, self.features_real = self.D_x(
            (self.adjacency_tensor, None, self.node_tensor),
            units=self.discriminator_units)

        self.logits_fake, self.features_fake = self.D_x(
            (self.edges_hat, None, self.nodes_hat),
            units=self.discriminator_units)

        self.value_logits_real = self.V_x(
            (self.adjacency_tensor, None, self.node_tensor),
            units=self.discriminator_units)

        self.value_logits_fake = self.V_x(
            (self.edges_hat, None, self.nodes_hat),
            units=self.discriminator_units)
Exemplo n.º 2
0
    def __init__(self,
                 vertexes,
                 edges,
                 nodes,
                 embedding_dim,
                 decoder_units,
                 discriminator_units,
                 decoder,
                 discriminator,
                 soft_gumbel_softmax=False,
                 hard_gumbel_softmax=False,
                 batch_discriminator=True):
        self.vertexes, self.edges, self.nodes, self.embedding_dim, self.decoder_units, self.discriminator_units, \
        self.decoder, self.discriminator, self.batch_discriminator = vertexes, edges, nodes, embedding_dim, decoder_units, \
                                                                     discriminator_units, decoder, discriminator, batch_discriminator

        self.training = tf.placeholder_with_default(False, shape=())
        self.dropout_rate = tf.placeholder_with_default(0., shape=())
        self.soft_gumbel_softmax = tf.placeholder_with_default(
            soft_gumbel_softmax, shape=())
        self.hard_gumbel_softmax = tf.placeholder_with_default(
            hard_gumbel_softmax, shape=())
        self.temperature = tf.placeholder_with_default(1., shape=())

        self.edges_labels = tf.placeholder(dtype=tf.int64,
                                           shape=(None, vertexes, vertexes))
        self.nodes_labels = tf.placeholder(dtype=tf.int64,
                                           shape=(None, vertexes))
        self.embeddings = tf.placeholder(dtype=tf.float32,
                                         shape=(None, embedding_dim))

        #self.rewardR = tf.placeholder(dtype=tf.float32, shape=(None, 1))
        #self.rewardF = tf.placeholder(dtype=tf.float32, shape=(None, 1))
        self.adjacency_tensor = tf.one_hot(self.edges_labels,
                                           depth=edges,
                                           dtype=tf.float32)
        self.node_tensor = tf.one_hot(self.nodes_labels,
                                      depth=nodes,
                                      dtype=tf.float32)

        with tf.variable_scope('generator'):
            self.edges_logits, self.nodes_logits = self.decoder(
                self.embeddings,
                decoder_units,
                vertexes,
                edges,
                nodes,
                training=self.training,
                dropout_rate=self.dropout_rate)

        with tf.name_scope('outputs'):
            (self.edges_softmax, self.nodes_softmax), \
            (self.edges_argmax, self.nodes_argmax), \
            (self.edges_gumbel_logits, self.nodes_gumbel_logits), \
            (self.edges_gumbel_softmax, self.nodes_gumbel_softmax), \
            (self.edges_gumbel_argmax, self.nodes_gumbel_argmax) = postprocess_logits(
                (self.edges_logits, self.nodes_logits), temperature=self.temperature)

            self.edges_hat = tf.case(
                {
                    self.soft_gumbel_softmax:
                    lambda: self.edges_gumbel_softmax,
                    self.hard_gumbel_softmax:
                    lambda: tf.stop_gradient(self.edges_gumbel_argmax - self.
                                             edges_gumbel_softmax) + self.
                    edges_gumbel_softmax
                },
                default=lambda: self.edges_softmax,
                exclusive=True)

            self.nodes_hat = tf.case(
                {
                    self.soft_gumbel_softmax:
                    lambda: self.nodes_gumbel_softmax,
                    self.hard_gumbel_softmax:
                    lambda: tf.stop_gradient(self.nodes_gumbel_argmax - self.
                                             nodes_gumbel_softmax) + self.
                    nodes_gumbel_softmax
                },
                default=lambda: self.nodes_softmax,
                exclusive=True)

        with tf.name_scope('D_x_real'):
            self.logits_real, self.features_real = self.D_x(
                (self.adjacency_tensor, None, self.node_tensor),
                units=discriminator_units)
        with tf.name_scope('D_x_fake'):
            self.logits_fake, self.features_fake = self.D_x(
                (self.edges_hat, None, self.nodes_hat),
                units=discriminator_units)
Exemplo n.º 3
0
    def __init__(self,
                 vertexes,
                 edges,
                 nodes,
                 features,
                 embedding_dim,
                 encoder_units,
                 decoder_units,
                 variational,
                 encoder,
                 decoder,
                 soft_gumbel_softmax=False,
                 hard_gumbel_softmax=False,
                 with_features=True):
        """

        :param vertexes:  the atoms num of molecular
        :param edges: the bond num
        :param nodes: the atom num type
        :param features:
        :param embedding_dim:
        :param encoder_units:
        :param decoder_units:
        :param variational:
        :param encoder:
        :param decoder:
        :param soft_gumbel_softmax:
        :param hard_gumbel_softmax:
        :param with_features:
        """
        self.vertexes, self.nodes, self.edges, self.embedding_dim, self.encoder, self.decoder = \
            vertexes, nodes, edges, embedding_dim, encoder, decoder

        self.training = tf.placeholder_with_default(False, shape=())
        self.variational = tf.placeholder_with_default(variational, shape=())
        self.soft_gumbel_softmax = tf.placeholder_with_default(
            soft_gumbel_softmax, shape=())
        self.hard_gumbel_softmax = tf.placeholder_with_default(
            hard_gumbel_softmax, shape=())
        self.temperature = tf.placeholder_with_default(1., shape=())

        self.edges_labels = tf.placeholder(dtype=tf.int64,
                                           shape=(None, vertexes, vertexes))
        self.nodes_labels = tf.placeholder(dtype=tf.int64,
                                           shape=(None, vertexes))
        self.node_features = tf.placeholder(dtype=tf.float32,
                                            shape=(None, vertexes, features))

        self.rewardR = tf.placeholder(dtype=tf.float32, shape=(None, 1))
        self.rewardF = tf.placeholder(dtype=tf.float32, shape=(None, 1))
        self.adjacency_tensor = tf.one_hot(self.edges_labels,
                                           depth=edges,
                                           dtype=tf.float32)
        self.node_tensor = tf.one_hot(self.nodes_labels,
                                      depth=nodes,
                                      dtype=tf.float32)

        with tf.variable_scope('encoder'):
            outputs = self.encoder(
                (self.adjacency_tensor,
                 self.node_features if with_features else None,
                 self.node_tensor),
                units=encoder_units[:-1],
                training=self.training,
                dropout_rate=0.)

            outputs = multi_dense_layers(outputs,
                                         units=encoder_units[-1],
                                         activation=tf.nn.tanh,
                                         training=self.training,
                                         dropout_rate=0.)

            self.embeddings_mean = tf.layers.dense(outputs,
                                                   embedding_dim,
                                                   activation=None)
            self.embeddings_std = tf.layers.dense(outputs,
                                                  embedding_dim,
                                                  activation=tf.nn.softplus)
            self.q_z = tf.distributions.Normal(self.embeddings_mean,
                                               self.embeddings_std)

            self.embeddings = tf.cond(self.variational,
                                      lambda: self.q_z.sample(),
                                      lambda: self.embeddings_mean)

        with tf.variable_scope('decoder'):
            self.edges_logits, self.nodes_logits = self.decoder(
                self.embeddings,
                decoder_units,
                vertexes,
                edges,
                nodes,
                training=self.training,
                dropout_rate=0.)

        with tf.name_scope('outputs'):
            (self.edges_softmax, self.nodes_softmax), \
            (self.edges_argmax, self.nodes_argmax), \
            (self.edges_gumbel_logits, self.nodes_gumbel_logits), \
            (self.edges_gumbel_softmax, self.nodes_gumbel_softmax), \
            (self.edges_gumbel_argmax, self.nodes_gumbel_argmax) = postprocess_logits(
                (self.edges_logits, self.nodes_logits), temperature=self.temperature)

            self.edges_hat = tf.case(
                {
                    self.soft_gumbel_softmax:
                    lambda: self.edges_gumbel_softmax,
                    self.hard_gumbel_softmax:
                    lambda: tf.stop_gradient(self.edges_gumbel_argmax - self.
                                             edges_gumbel_softmax) + self.
                    edges_gumbel_softmax
                },
                default=lambda: self.edges_softmax,
                exclusive=True)

            self.nodes_hat = tf.case(
                {
                    self.soft_gumbel_softmax:
                    lambda: self.nodes_gumbel_softmax,
                    self.hard_gumbel_softmax:
                    lambda: tf.stop_gradient(self.nodes_gumbel_argmax - self.
                                             nodes_gumbel_softmax) + self.
                    nodes_gumbel_softmax
                },
                default=lambda: self.nodes_softmax,
                exclusive=True)

        with tf.name_scope('V_x_real'):
            self.value_logits_real = self.V_x(
                (self.adjacency_tensor, None, self.node_tensor),
                units=encoder_units)
        with tf.name_scope('V_x_fake'):
            self.value_logits_fake = self.V_x(
                (self.edges_hat, None, self.nodes_hat), units=encoder_units)
Exemplo n.º 4
0
    def call(self, inputs=None, training=False):

        if inputs is not None:
            assert len(
                inputs
            ) == 2, "inputs must contain adjacent and node feature matrix."
            data_a, data_x = inputs

        # decoder_units, vertexes, edges, nodes = inputs
        sample_encoding = self.sample_z(self.batch)
        edges_logits, nodes_logits = self.G_x(sample_encoding)

        (edges_softmax, nodes_softmax), \
        (edges_argmax, nodes_argmax), \
        (edges_gumbel_logits, nodes_gumbel_logits), \
        (edges_gumbel_softmax, nodes_gumbel_softmax), \
        (edges_gumbel_argmax, nodes_gumbel_argmax) = postprocess_logits((edges_logits, nodes_logits),
                                                                        temperature=self.temperature)

        edges_hat = tf.case(
            [(self.soft_gumbel_softmax, lambda: edges_gumbel_softmax),
             (self.hard_gumbel_softmax, lambda: tf.stop_gradient(
                 edges_gumbel_argmax - edges_gumbel_softmax) +
              edges_gumbel_softmax)],
            default=lambda: edges_softmax,
            exclusive=True)

        nodes_hat = tf.case(
            [(self.soft_gumbel_softmax, lambda: nodes_gumbel_softmax),
             (self.hard_gumbel_softmax, lambda: tf.stop_gradient(
                 nodes_gumbel_argmax - nodes_gumbel_softmax) +
              nodes_gumbel_softmax)],
            default=lambda: nodes_softmax,
            exclusive=True)

        if not training:
            return nodes_gumbel_argmax, edges_gumbel_argmax

        # sample real
        if inputs is None:
            raise ValueError
        adjacency_tensor = tf.one_hot(data_a,
                                      depth=self.edges,
                                      dtype=tf.float32)
        node_tensor = tf.one_hot(data_x, depth=self.nodes, dtype=tf.float32)

        logits_real, features_real = self.D_x(
            (adjacency_tensor, None, node_tensor), training=training)
        value_logits_real, _ = self.V_x((adjacency_tensor, None, node_tensor),
                                        training=training)

        # generate
        logits_fake, features_fake = self.D_x((edges_hat, None, nodes_hat),
                                              training=training)
        value_logits_fake, _ = self.V_x((edges_hat, None, nodes_hat),
                                        training=training)

        # print("D_x: ", self.D_x.trainable_variables)
        real_sample = (adjacency_tensor, node_tensor, logits_real,
                       features_real, value_logits_real)
        gen_sample = (edges_hat, nodes_hat, logits_fake, features_fake,
                      value_logits_fake)
        gen_raw = (edges_softmax, nodes_softmax, edges_argmax, nodes_argmax,
                   edges_gumbel_logits, nodes_gumbel_logits,
                   edges_gumbel_softmax, nodes_gumbel_softmax,
                   edges_gumbel_argmax, nodes_gumbel_argmax)
        return real_sample, gen_sample, gen_raw
Exemplo n.º 5
0
    def __init__(
            self,
            vertexes,  # max atom num?
            edges,  # bond num types
            nodes,  # atom num types
            embedding_dim,  # z space dim
            decoder_units,  # tuple, decoder setup (z = Dense(z, dim=units_k)^{(k)})
            discriminator_units,  # tuple, discr. setup (GCN units, Readout units, MLP units)
            decoder,  # callable fn. in models.__init__
            discriminator,  # callable fn. in models.__init__
            soft_gumbel_softmax=False,
            hard_gumbel_softmax=False,
            batch_discriminator=True):

        self.vertexes, self.edges, self.nodes, \
        self.embedding_dim, self.decoder_units, self.discriminator_units, \
        self.decoder, self.discriminator, self.batch_discriminator = \
            vertexes, edges, nodes, embedding_dim, decoder_units, \
            discriminator_units, decoder, discriminator, batch_discriminator

        self.training = tf.placeholder_with_default(False, shape=())
        self.dropout_rate = tf.placeholder_with_default(0., shape=())
        self.soft_gumbel_softmax = tf.placeholder_with_default(
            soft_gumbel_softmax, shape=())
        self.hard_gumbel_softmax = tf.placeholder_with_default(
            hard_gumbel_softmax, shape=())
        self.temperature = tf.placeholder_with_default(
            1., shape=())  # temperature for softmax

        self.edges_labels = tf.placeholder(dtype=tf.int64,
                                           shape=(None, vertexes, vertexes))
        self.nodes_labels = tf.placeholder(dtype=tf.int64,
                                           shape=(None, vertexes))
        self.embeddings = tf.placeholder(dtype=tf.float32,
                                         shape=(None, embedding_dim))

        self.rewardR = tf.placeholder(dtype=tf.float32, shape=(None, 1))
        self.rewardF = tf.placeholder(dtype=tf.float32, shape=(None, 1))
        self.adjacency_tensor = tf.one_hot(self.edges_labels,
                                           depth=edges,
                                           dtype=tf.float32)
        self.node_tensor = tf.one_hot(self.nodes_labels,
                                      depth=nodes,
                                      dtype=tf.float32)

        with tf.variable_scope('generator'):
            self.edges_logits, self.nodes_logits = \
                self.decoder(self.embeddings, decoder_units, vertexes, edges, nodes,
                             training=self.training, dropout_rate=self.dropout_rate)

        with tf.name_scope('outputs'):
            (self.edges_softmax, self.nodes_softmax), \
            (self.edges_argmax, self.nodes_argmax), \
            (self.edges_gumbel_logits, self.nodes_gumbel_logits), \
            (self.edges_gumbel_softmax, self.nodes_gumbel_softmax), \
            (self.edges_gumbel_argmax, self.nodes_gumbel_argmax) = \
                postprocess_logits((self.edges_logits, self.nodes_logits), temperature=self.temperature)

            self.edges_hat = tf.case(
                {
                    self.soft_gumbel_softmax:
                    lambda: self.edges_gumbel_softmax,
                    self.hard_gumbel_softmax:
                    lambda: tf.stop_gradient(self.edges_gumbel_argmax - self.
                                             edges_gumbel_softmax) + self.
                    edges_gumbel_softmax
                },
                default=lambda: self.edges_softmax,
                exclusive=True)

            self.nodes_hat = tf.case(
                {
                    self.soft_gumbel_softmax:
                    lambda: self.nodes_gumbel_softmax,
                    self.hard_gumbel_softmax:
                    lambda: tf.stop_gradient(self.nodes_gumbel_argmax - self.
                                             nodes_gumbel_softmax) + self.
                    nodes_gumbel_softmax
                },
                default=lambda: self.nodes_softmax,
                exclusive=True)

        with tf.name_scope('D_x_real'):
            self.logits_real, self.features_real = self.D_x(
                (self.adjacency_tensor, None, self.node_tensor),
                units=discriminator_units)
        with tf.name_scope('D_x_fake'):
            self.logits_fake, self.features_fake = self.D_x(
                (self.edges_hat, None, self.nodes_hat),
                units=discriminator_units)

        with tf.name_scope('V_x_real'):
            self.value_logits_real = self.V_x(
                (self.adjacency_tensor, None, self.node_tensor),
                units=discriminator_units)
        with tf.name_scope('V_x_fake'):
            self.value_logits_fake = self.V_x(
                (self.edges_hat, None, self.nodes_hat),
                units=discriminator_units)
Exemplo n.º 6
0
    def __init__(self, vertexes, edges, nodes, embedding_dim, decoder_units, discriminator_units,
                 decoder, discriminator, soft_gumbel_softmax=False, hard_gumbel_softmax=False,
                 batch_discriminator=True, unrolling_steps=1, batch_dim=32, latent_opt=False, noise_sigma=0.0, test_epoch=0):
        self.vertexes, self.edges, self.nodes, self.embedding_dim, self.decoder_units, self.discriminator_units, \
        self.decoder, self.discriminator, self.batch_discriminator, self.unrolling_steps, self.latent_opt, self.noise_sigma, self.test_epoch = vertexes, edges, nodes, embedding_dim, decoder_units, discriminator_units, decoder, discriminator, batch_discriminator, unrolling_steps, latent_opt, noise_sigma, test_epoch

        self.training = tf.placeholder_with_default(False, shape=())
        self.dropout_rate = tf.placeholder_with_default(0., shape=())
        self.soft_gumbel_softmax = tf.placeholder_with_default(soft_gumbel_softmax, shape=())
        self.hard_gumbel_softmax = tf.placeholder_with_default(hard_gumbel_softmax, shape=())
        self.temperature = tf.placeholder_with_default(1., shape=())

        self.edges_labels = tf.placeholder(dtype=tf.int64, shape=(None, vertexes, vertexes))
        self.nodes_labels = tf.placeholder(dtype=tf.int64, shape=(None, vertexes))
        self.embeddings = tf.placeholder(dtype=tf.float32, shape=(None, embedding_dim))
        self.input_len = tf.placeholder(dtype=tf.int64, shape=[])
        self.is_training = True

        self.rewardR = tf.placeholder(dtype=tf.float32, shape=(None, 1))
        self.rewardF = tf.placeholder(dtype=tf.float32, shape=(None, 1))
        self.adjacency_tensor = tf.one_hot(self.edges_labels, depth=edges, dtype=tf.float32)
        self.node_tensor = tf.one_hot(self.nodes_labels, depth=nodes, dtype=tf.float32)

        print(f"self.is_training: {self.is_training}")

        with tf.variable_scope("input_LO"):
            # self.embeddings_LO = tf.Variable(tf.zeros(shape=(tf.shape(self.embeddings)[0], embedding_dim), dtype=tf.float32))
            if self.is_training:
                self.embeddings_LO = tf.Variable(tf.zeros(shape=[batch_dim, embedding_dim]), name="latent_z")
            else:
                self.embeddings_LO = self.embeddings

        with tf.variable_scope('generator'):
            self.edges_logits, self.nodes_logits = self.decoder(self.embeddings_LO, decoder_units, vertexes, edges, nodes, training=self.training, dropout_rate=self.dropout_rate)

        with tf.name_scope('outputs'):
            (self.edges_softmax, self.nodes_softmax), \
            (self.edges_argmax, self.nodes_argmax), \
            (self.edges_gumbel_logits, self.nodes_gumbel_logits), \
            (self.edges_gumbel_softmax, self.nodes_gumbel_softmax), \
            (self.edges_gumbel_argmax, self.nodes_gumbel_argmax) = postprocess_logits(
                (self.edges_logits, self.nodes_logits), temperature=self.temperature)

            self.edges_hat = tf.case({self.soft_gumbel_softmax: lambda: self.edges_gumbel_softmax, self.hard_gumbel_softmax: lambda: tf.stop_gradient( self.edges_gumbel_argmax - self.edges_gumbel_softmax) + self.edges_gumbel_softmax}, default=lambda: self.edges_softmax, exclusive=True)

            self.nodes_hat = tf.case({self.soft_gumbel_softmax: lambda: self.nodes_gumbel_softmax, self.hard_gumbel_softmax: lambda: tf.stop_gradient(
                                          self.nodes_gumbel_argmax - self.nodes_gumbel_softmax) + self.nodes_gumbel_softmax},
                                     default=lambda: self.nodes_softmax,
                                     exclusive=True)

        def Adding_Gaussian_Noise(input, noise_sigma=0.0):
            noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=noise_sigma, dtype=tf.float32)
            return input + noise

        with tf.name_scope('D_x_real'):
            self.logits_real, self.features_real = self.D_x((Adding_Gaussian_Noise(self.adjacency_tensor, noise_sigma=self.noise_sigma), None, Adding_Gaussian_Noise(self.node_tensor, noise_sigma=self.noise_sigma)), units=discriminator_units)
        with tf.name_scope('D_x_fake'):
            self.logits_fake, self.features_fake = self.D_x((Adding_Gaussian_Noise(self.edges_hat, noise_sigma=self.noise_sigma), None, Adding_Gaussian_Noise(self.nodes_hat, noise_sigma=self.noise_sigma)), units=discriminator_units)

        with tf.name_scope('V_x_real'):
            self.value_logits_real = self.V_x((self.adjacency_tensor, None, self.node_tensor), units=discriminator_units)
        with tf.name_scope('V_x_fake'):
            self.value_logits_fake = self.V_x((self.edges_hat, None, self.nodes_hat), units=discriminator_units)