예제 #1
0
    def build_graph(self):
        # Build placeholders
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.pair_features = Feature(shape=(None, self.n_pair_feat))
        self.atom_split = Feature(shape=(None, ), dtype=tf.int32)
        self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)

        message_passing = MessagePassing(self.T,
                                         message_fn='enn',
                                         update_fn='gru',
                                         n_hidden=self.n_hidden,
                                         in_layers=[
                                             self.atom_features,
                                             self.pair_features,
                                             self.atom_to_pair
                                         ])

        atom_embeddings = Dense(self.n_hidden, in_layers=[message_passing])

        mol_embeddings = SetGather(
            self.M,
            self.batch_size,
            n_hidden=self.n_hidden,
            in_layers=[atom_embeddings, self.atom_split])

        dense1 = Dense(out_channels=2 * self.n_hidden,
                       activation_fn=tf.nn.relu,
                       in_layers=[mol_embeddings])
        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            if self.mode == "classification":
                classification = Dense(out_channels=2,
                                       activation_fn=None,
                                       in_layers=[dense1])
                softmax = SoftMax(in_layers=[classification])
                self.add_output(softmax)

                label = Label(shape=(None, 2))
                self.labels_fd.append(label)
                cost = SoftMaxCrossEntropy(in_layers=[label, classification])
                costs.append(cost)
            if self.mode == "regression":
                regression = Dense(out_channels=1,
                                   activation_fn=None,
                                   in_layers=[dense1])
                self.add_output(regression)

                label = Label(shape=(None, 1))
                self.labels_fd.append(label)
                cost = L2Loss(in_layers=[label, regression])
                costs.append(cost)
        if self.mode == "classification":
            all_cost = Concat(in_layers=costs, axis=1)
        elif self.mode == "regression":
            all_cost = Stack(in_layers=costs, axis=1)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)
예제 #2
0
def test_MP_pickle():
  tg = TensorGraph()
  atom_feature = Feature(shape=(None, 75))
  pair_feature = Feature(shape=(None, 14))
  atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)
  MP = MessagePassing(5, in_layers=[atom_feature, pair_feature, atom_to_pair])
  tg.add_output(MP)
  tg.set_loss(MP)
  tg.build()
  tg.save()
예제 #3
0
    def build_graph(self):
        # Build placeholders
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.pair_features = Feature(shape=(None, self.n_pair_feat))
        self.atom_split = Feature(shape=(None, ), dtype=tf.int32)
        self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)

        message_passing = MessagePassing(self.T,
                                         message_fn='enn',
                                         update_fn='gru',
                                         n_hidden=self.n_hidden,
                                         in_layers=[
                                             self.atom_features,
                                             self.pair_features,
                                             self.atom_to_pair
                                         ])

        atom_embeddings = Dense(self.n_hidden, in_layers=[message_passing])

        mol_embeddings = SetGather(
            self.M,
            self.batch_size,
            n_hidden=self.n_hidden,
            in_layers=[atom_embeddings, self.atom_split])

        dense1 = Dense(out_channels=2 * self.n_hidden,
                       activation_fn=tf.nn.relu,
                       in_layers=[mol_embeddings])

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=dense1,
                                       out_channels=n_tasks * n_classes)
                             ])
            logits = TrimGraphOutput([logits, weights])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(
                shape=(None, n_tasks),
                in_layers=[Dense(in_layers=dense1, out_channels=n_tasks)])
            output = TrimGraphOutput([output, weights])
            self.add_output(output)
            if self.uncertainty:
                log_var = Reshape(
                    shape=(None, n_tasks),
                    in_layers=[Dense(in_layers=dense1, out_channels=n_tasks)])
                log_var = TrimGraphOutput([log_var, weights])
                var = Exp(log_var)
                self.add_variance(var)
                diff = labels - output
                weighted_loss = weights * (diff * diff / var + log_var)
                weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1]))
            else:
                weighted_loss = ReduceSum(
                    L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)