def build_graph(self):
    self.atom_flags = Feature(shape=(None, self.max_atoms, self.max_atoms))
    self.atom_feats = Feature(shape=(None, self.max_atoms, self.n_feat))
    previous_layer = self.atom_feats

    Hiddens = []
    for n_hidden in self.layer_structures:
      Hidden = Dense(
          out_channels=n_hidden,
          activation_fn=tf.nn.tanh,
          in_layers=[previous_layer])
      Hiddens.append(Hidden)
      previous_layer = Hiddens[-1]

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      regression = Dense(
          out_channels=1, activation_fn=None, in_layers=[Hiddens[-1]])
      output = BPGather(self.max_atoms, in_layers=[regression, self.atom_flags])
      self.add_output(output)

      label = Label(shape=(None, 1))
      self.labels_fd.append(label)
      cost = L2Loss(in_layers=[label, output])
      costs.append(cost)

    all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
    def _build_graph(self):
        self.atom_flags = Feature(shape=(None,
                                         self.max_atoms * self.max_atoms))
        self.atom_feats = Feature(shape=(None, self.max_atoms * self.n_feat))

        reshaped_atom_feats = Reshape(in_layers=[self.atom_feats],
                                      shape=(-1, self.max_atoms, self.n_feat))
        reshaped_atom_flags = Reshape(in_layers=[self.atom_flags],
                                      shape=(-1, self.max_atoms,
                                             self.max_atoms))

        previous_layer = reshaped_atom_feats

        Hiddens = []
        for n_hidden in self.layer_structures:
            Hidden = Dense(out_channels=n_hidden,
                           activation_fn=tf.nn.tanh,
                           in_layers=[previous_layer])
            Hiddens.append(Hidden)
            previous_layer = Hiddens[-1]

        regression = Dense(out_channels=1 * self.n_tasks,
                           activation_fn=None,
                           in_layers=[Hiddens[-1]])
        output = BPGather(self.max_atoms,
                          in_layers=[regression, reshaped_atom_flags])
        self.add_output(output)

        label = Label(shape=(None, self.n_tasks, 1))
        loss = ReduceSum(L2Loss(in_layers=[label, output]))
        weights = Weights(shape=(None, self.n_tasks))

        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
Exemple #3
0
    def _build_graph(self):

        self.one_hot_seq = Feature(shape=(None, self.pad_length,
                                          self.num_amino_acids),
                                   dtype=tf.float32)

        conv1 = Conv1D(kernel_size=2,
                       filters=512,
                       in_layers=[self.one_hot_seq])

        maxpool1 = MaxPool1D(strides=2, padding="VALID", in_layers=[conv1])
        conv2 = Conv1D(kernel_size=3, filters=512, in_layers=[maxpool1])
        flattened = Flatten(in_layers=[conv2])
        dense1 = Dense(out_channels=400,
                       in_layers=[flattened],
                       activation_fn=tf.nn.tanh)
        dropout = Dropout(dropout_prob=self.dropout_p, in_layers=[dense1])
        output = Dense(out_channels=1, in_layers=[dropout], activation_fn=None)
        self.add_output(output)

        if self.mode == "regression":
            label = Label(shape=(None, 1))
            loss = L2Loss(in_layers=[label, output])
        else:
            raise NotImplementedError(
                "Classification support not added yet. Missing details in paper."
            )
        weights = Weights(shape=(None, ))
        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
    def build_graph(self):
        # Build placeholders
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.pair_features = Feature(shape=(None, self.n_pair_feat))
        self.atom_split = Feature(shape=(None, ), dtype=tf.int32)
        self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)

        message_passing = MessagePassing(self.T,
                                         message_fn='enn',
                                         update_fn='gru',
                                         n_hidden=self.n_hidden,
                                         in_layers=[
                                             self.atom_features,
                                             self.pair_features,
                                             self.atom_to_pair
                                         ])

        atom_embeddings = Dense(self.n_hidden, in_layers=[message_passing])

        mol_embeddings = SetGather(
            self.M,
            self.batch_size,
            n_hidden=self.n_hidden,
            in_layers=[atom_embeddings, self.atom_split])

        dense1 = Dense(out_channels=2 * self.n_hidden,
                       activation_fn=tf.nn.relu,
                       in_layers=[mol_embeddings])
        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            if self.mode == "classification":
                classification = Dense(out_channels=2,
                                       activation_fn=None,
                                       in_layers=[dense1])
                softmax = SoftMax(in_layers=[classification])
                self.add_output(softmax)

                label = Label(shape=(None, 2))
                self.labels_fd.append(label)
                cost = SoftMaxCrossEntropy(in_layers=[label, classification])
                costs.append(cost)
            if self.mode == "regression":
                regression = Dense(out_channels=1,
                                   activation_fn=None,
                                   in_layers=[dense1])
                self.add_output(regression)

                label = Label(shape=(None, 1))
                self.labels_fd.append(label)
                cost = L2Loss(in_layers=[label, regression])
                costs.append(cost)
        if self.mode == "classification":
            all_cost = Concat(in_layers=costs, axis=1)
        elif self.mode == "regression":
            all_cost = Stack(in_layers=costs, axis=1)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)
Exemple #5
0
def test_L2loss_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = L2Loss(in_layers=[feature, feature])
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
    def build_graph(self):
        """Building graph structures:
        Features => DAGLayer => DAGGather => Classification or Regression
        """
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.parents = Feature(shape=(None, self.max_atoms, self.max_atoms),
                               dtype=tf.int32)
        self.calculation_orders = Feature(shape=(None, self.max_atoms),
                                          dtype=tf.int32)
        self.calculation_masks = Feature(shape=(None, self.max_atoms),
                                         dtype=tf.bool)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)
        self.n_atoms = Feature(shape=(), dtype=tf.int32)
        dag_layer1 = DAGLayer(n_graph_feat=self.n_graph_feat,
                              n_atom_feat=self.n_atom_feat,
                              max_atoms=self.max_atoms,
                              batch_size=self.batch_size,
                              in_layers=[
                                  self.atom_features, self.parents,
                                  self.calculation_orders,
                                  self.calculation_masks, self.n_atoms
                              ])
        dag_gather = DAGGather(n_graph_feat=self.n_graph_feat,
                               n_outputs=self.n_outputs,
                               max_atoms=self.max_atoms,
                               in_layers=[dag_layer1, self.membership])

        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            if self.mode == "classification":
                classification = Dense(out_channels=2,
                                       activation_fn=None,
                                       in_layers=[dag_gather])
                softmax = SoftMax(in_layers=[classification])
                self.add_output(softmax)

                label = Label(shape=(None, 2))
                self.labels_fd.append(label)
                cost = SoftMaxCrossEntropy(in_layers=[label, classification])
                costs.append(cost)
            if self.mode == "regression":
                regression = Dense(out_channels=1,
                                   activation_fn=None,
                                   in_layers=[dag_gather])
                self.add_output(regression)

                label = Label(shape=(None, 1))
                self.labels_fd.append(label)
                cost = L2Loss(in_layers=[label, regression])
                costs.append(cost)
        if self.mode == "classification":
            all_cost = Concat(in_layers=costs, axis=1)
        elif self.mode == "regression":
            all_cost = Stack(in_layers=costs, axis=1)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)
Exemple #7
0
  def build_graph(self):
    self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32)
    # Character embedding
    self.Embedding = DTNNEmbedding(
        n_embedding=self.n_embedding,
        periodic_table_length=len(self.char_dict.keys()) + 1,
        in_layers=[self.smiles_seqs])
    self.pooled_outputs = []
    self.conv_layers = []
    for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters):
      # Multiple convolutional layers with different filter widths
      self.conv_layers.append(
          Conv1D(
              kernel_size=filter_size,
              filters=num_filter,
              padding='valid',
              in_layers=[self.Embedding]))
      # Max-over-time pooling
      self.pooled_outputs.append(
          ReduceMax(axis=1, in_layers=[self.conv_layers[-1]]))
    # Concat features from all filters(one feature per filter)
    concat_outputs = Concat(axis=1, in_layers=self.pooled_outputs)
    dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs])
    dense = Dense(
        out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout])
    # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
    self.gather = Highway(in_layers=[dense])

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      if self.mode == "classification":
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[self.gather])
        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.labels_fd.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == "regression":
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[self.gather])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.labels_fd.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      all_cost = Stack(in_layers=costs, axis=1)
    elif self.mode == "regression":
      all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
Exemple #8
0
  def build_graph(self):
    self.vertex_features = Feature(shape=(None, self.max_atoms, 75))
    self.adj_matrix = Feature(shape=(None, self.max_atoms, 1, self.max_atoms))
    self.mask = Feature(shape=(None, self.max_atoms, 1))

    gcnn1 = BatchNorm(
        GraphCNN(
            num_filters=64,
            in_layers=[self.vertex_features, self.adj_matrix, self.mask]))
    gcnn1 = Dropout(self.dropout, in_layers=gcnn1)
    gcnn2 = BatchNorm(
        GraphCNN(num_filters=64, in_layers=[gcnn1, self.adj_matrix, self.mask]))
    gcnn2 = Dropout(self.dropout, in_layers=gcnn2)
    gc_pool, adj_matrix = GraphCNNPool(
        num_vertices=32, in_layers=[gcnn2, self.adj_matrix, self.mask])
    gc_pool = BatchNorm(gc_pool)
    gc_pool = Dropout(self.dropout, in_layers=gc_pool)
    gcnn3 = BatchNorm(GraphCNN(num_filters=32, in_layers=[gc_pool, adj_matrix]))
    gcnn3 = Dropout(self.dropout, in_layers=gcnn3)
    gc_pool2, adj_matrix2 = GraphCNNPool(
        num_vertices=8, in_layers=[gcnn3, adj_matrix])
    gc_pool2 = BatchNorm(gc_pool2)
    gc_pool2 = Dropout(self.dropout, in_layers=gc_pool2)
    flattened = Flatten(in_layers=gc_pool2)
    readout = Dense(
        out_channels=256, activation_fn=tf.nn.relu, in_layers=flattened)
    costs = []
    self.my_labels = []
    for task in range(self.n_tasks):
      if self.mode == 'classification':
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[readout])

        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.my_labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == 'regression':
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[readout])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.my_labels.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      entropy = Stack(in_layers=costs, axis=-1)
    elif self.mode == "regression":
      entropy = Stack(in_layers=costs, axis=1)
    self.my_task_weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[entropy, self.my_task_weights])
    self.set_loss(loss)
Exemple #9
0
    def _build(self):
        self.A_tilda_k = list()
        for k in range(1, self.k_max + 1):
            self.A_tilda_k.append(
                Feature(name="graph_adjacency_{}".format(k),
                        dtype=tf.float32,
                        shape=[None, self.max_nodes, self.max_nodes]))
        self.X = Feature(name='atom_features',
                         dtype=tf.float32,
                         shape=[None, self.max_nodes, self.num_node_features])

        graph_layers = list()
        adaptive_filters = list()

        for index, k in enumerate(range(1, self.k_max + 1)):

            in_layers = [self.A_tilda_k[index], self.X]

            adaptive_filters.append(
                AdaptiveFilter(batch_size=self.batch_size,
                               in_layers=in_layers,
                               num_nodes=self.max_nodes,
                               num_node_features=self.num_node_features,
                               combine_method=self.combine_method))

            graph_layers.append(
                KOrderGraphConv(batch_size=self.batch_size,
                                in_layers=in_layers +
                                [adaptive_filters[index]],
                                num_nodes=self.max_nodes,
                                num_node_features=self.num_node_features,
                                init='glorot_uniform'))

        graph_features = Concat(in_layers=graph_layers, axis=2)
        graph_features = ReLU(in_layers=[graph_features])
        flattened = Flatten(in_layers=[graph_features])

        dense1 = Dense(in_layers=[flattened],
                       out_channels=64,
                       activation_fn=tf.nn.relu)
        dense2 = Dense(in_layers=[dense1],
                       out_channels=16,
                       activation_fn=tf.nn.relu)
        dense3 = Dense(in_layers=[dense2],
                       out_channels=1 * self.n_tasks,
                       activation_fn=None)
        output = Reshape(in_layers=[dense3], shape=(-1, self.n_tasks, 1))
        self.add_output(output)

        label = Label(shape=(None, self.n_tasks, 1))
        weights = Weights(shape=(None, self.n_tasks))
        loss = ReduceSum(L2Loss(in_layers=[label, output]))

        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
Exemple #10
0
    def _build_graph(self):
        self.smiles_seqs = Feature(shape=(None, self.seq_length),
                                   dtype=tf.int32)
        # Character embedding
        Embedding = DTNNEmbedding(
            n_embedding=self.n_embedding,
            periodic_table_length=len(self.char_dict.keys()) + 1,
            in_layers=[self.smiles_seqs])
        pooled_outputs = []
        conv_layers = []
        for filter_size, num_filter in zip(self.kernel_sizes,
                                           self.num_filters):
            # Multiple convolutional layers with different filter widths
            conv_layers.append(
                Conv1D(kernel_size=filter_size,
                       filters=num_filter,
                       padding='valid',
                       in_layers=[Embedding]))
            # Max-over-time pooling
            pooled_outputs.append(
                ReduceMax(axis=1, in_layers=[conv_layers[-1]]))
        # Concat features from all filters(one feature per filter)
        concat_outputs = Concat(axis=1, in_layers=pooled_outputs)
        dropout = Dropout(dropout_prob=self.dropout,
                          in_layers=[concat_outputs])
        dense = Dense(out_channels=200,
                      activation_fn=tf.nn.relu,
                      in_layers=[dropout])
        # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
        gather = Highway(in_layers=[dense])

        if self.mode == "classification":
            logits = Dense(out_channels=self.n_tasks * 2,
                           activation_fn=None,
                           in_layers=[gather])
            logits = Reshape(shape=(-1, self.n_tasks, 2), in_layers=[logits])
            output = SoftMax(in_layers=[logits])
            self.add_output(output)
            labels = Label(shape=(None, self.n_tasks, 2))
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])

        else:
            vals = Dense(out_channels=self.n_tasks * 1,
                         activation_fn=None,
                         in_layers=[gather])
            vals = Reshape(shape=(-1, self.n_tasks, 1), in_layers=[vals])
            self.add_output(vals)
            labels = Label(shape=(None, self.n_tasks, 1))
            loss = ReduceSum(L2Loss(in_layers=[labels, vals]))

        weights = Weights(shape=(None, self.n_tasks))
        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
Exemple #11
0
 def test_l2_loss(self):
     """Test that L2Loss can be invoked."""
     batch_size = 10
     n_features = 5
     guess_tensor = np.random.rand(batch_size, n_features)
     label_tensor = np.random.rand(batch_size, n_features)
     with self.session() as sess:
         guess_tensor = tf.convert_to_tensor(guess_tensor, dtype=tf.float32)
         label_tensor = tf.convert_to_tensor(label_tensor, dtype=tf.float32)
         out_tensor = L2Loss()(guess_tensor, label_tensor)
         out_tensor = out_tensor.eval()
         assert out_tensor.shape == (batch_size, )
Exemple #12
0
    def build_graph(self):
        """Building graph structures:
            Features => DTNNEmbedding => DTNNStep => DTNNStep => DTNNGather => Regression
            """
        self.atom_number = Feature(shape=(None, ), dtype=tf.int32)
        self.distance = Feature(shape=(None, self.n_distance))
        self.atom_membership = Feature(shape=(None, ), dtype=tf.int32)
        self.distance_membership_i = Feature(shape=(None, ), dtype=tf.int32)
        self.distance_membership_j = Feature(shape=(None, ), dtype=tf.int32)

        dtnn_embedding = DTNNEmbedding(n_embedding=self.n_embedding,
                                       in_layers=[self.atom_number])
        if self.dropout > 0.0:
            dtnn_embedding = Dropout(self.dropout, in_layers=dtnn_embedding)
        dtnn_layer1 = DTNNStep(n_embedding=self.n_embedding,
                               n_distance=self.n_distance,
                               in_layers=[
                                   dtnn_embedding, self.distance,
                                   self.distance_membership_i,
                                   self.distance_membership_j
                               ])
        if self.dropout > 0.0:
            dtnn_layer1 = Dropout(self.dropout, in_layers=dtnn_layer1)
        dtnn_layer2 = DTNNStep(n_embedding=self.n_embedding,
                               n_distance=self.n_distance,
                               in_layers=[
                                   dtnn_layer1, self.distance,
                                   self.distance_membership_i,
                                   self.distance_membership_j
                               ])
        if self.dropout > 0.0:
            dtnn_layer2 = Dropout(self.dropout, in_layers=dtnn_layer2)
        dtnn_gather = DTNNGather(n_embedding=self.n_embedding,
                                 layer_sizes=[self.n_hidden],
                                 n_outputs=self.n_tasks,
                                 output_activation=self.output_activation,
                                 in_layers=[dtnn_layer2, self.atom_membership])
        if self.dropout > 0.0:
            dtnn_gather = Dropout(self.dropout, in_layers=dtnn_gather)

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        labels = Label(shape=(None, n_tasks))
        output = Reshape(
            shape=(None, n_tasks),
            in_layers=[Dense(in_layers=dtnn_gather, out_channels=n_tasks)])
        self.add_output(output)
        weighted_loss = ReduceSum(L2Loss(in_layers=[labels, output, weights]))
        self.set_loss(weighted_loss)
Exemple #13
0
  def build_graph(self):
    """Building graph structures:
    Features => DTNNEmbedding => DTNNStep => DTNNStep => DTNNGather => Regression
    """
    self.atom_number = Feature(shape=(None,), dtype=tf.int32)
    self.distance = Feature(shape=(None, self.n_distance))
    self.atom_membership = Feature(shape=(None,), dtype=tf.int32)
    self.distance_membership_i = Feature(shape=(None,), dtype=tf.int32)
    self.distance_membership_j = Feature(shape=(None,), dtype=tf.int32)

    dtnn_embedding = DTNNEmbedding(
        n_embedding=self.n_embedding, in_layers=[self.atom_number])
    dtnn_layer1 = DTNNStep(
        n_embedding=self.n_embedding,
        n_distance=self.n_distance,
        in_layers=[
            dtnn_embedding, self.distance, self.distance_membership_i,
            self.distance_membership_j
        ])
    dtnn_layer2 = DTNNStep(
        n_embedding=self.n_embedding,
        n_distance=self.n_distance,
        in_layers=[
            dtnn_layer1, self.distance, self.distance_membership_i,
            self.distance_membership_j
        ])
    dtnn_gather = DTNNGather(
        n_embedding=self.n_embedding,
        layer_sizes=[self.n_hidden],
        n_outputs=self.n_tasks,
        output_activation=self.output_activation,
        in_layers=[dtnn_layer2, self.atom_membership])

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      regression = DTNNExtract(task, in_layers=[dtnn_gather])
      self.add_output(regression)
      label = Label(shape=(None, 1))
      self.labels_fd.append(label)
      cost = L2Loss(in_layers=[label, regression])
      costs.append(cost)

    all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
  def test_change_loss_function(self):
    tasks, dataset, transformers, metric = self.get_dataset(
        'regression', 'GraphConv', num_tasks=1)

    batch_size = 50
    model = GraphConvModel(len(tasks), batch_size=batch_size, mode='regression')

    model.fit(dataset, nb_epoch=1)
    model.save()

    model2 = TensorGraph.load_from_dir(model.model_dir, restore=False)
    dummy_label = model2.labels[-1]
    dummy_ouput = model2.outputs[-1]
    loss = ReduceSum(L2Loss(in_layers=[dummy_label, dummy_ouput]))
    module = model2.create_submodel(loss=loss)
    model2.restore()
    model2.fit(dataset, nb_epoch=1, submodel=module)
Exemple #15
0
 def __init__(self):
     self.batch_size = 10
     self.tg = dc.models.TensorGraph(use_queue=False)
     self.features = Feature(shape=(None, 1))
     self.labels = Label(shape=(None, 1))
     hidden1 = Dense(in_layers=self.features,
                     out_channels=40,
                     activation_fn=tf.nn.relu)
     hidden2 = Dense(in_layers=hidden1,
                     out_channels=40,
                     activation_fn=tf.nn.relu)
     output = Dense(in_layers=hidden2, out_channels=1)
     loss = L2Loss(in_layers=[output, self.labels])
     self.tg.add_output(output)
     self.tg.set_loss(loss)
     with self.tg._get_tf("Graph").as_default():
         self.tg.build()
    def build_graph(self):

        self.atom_numbers = Feature(shape=(None, self.max_atoms),
                                    dtype=tf.int32)
        self.atom_flags = Feature(shape=(None,
                                         self.max_atoms * self.max_atoms))
        self.atom_feats = Feature(shape=(None, self.max_atoms * 4))

        reshaped_atom_flags = Reshape(in_layers=[self.atom_flags],
                                      shape=(-1, self.max_atoms,
                                             self.max_atoms))
        reshaped_atom_feats = Reshape(in_layers=[self.atom_feats],
                                      shape=(-1, self.max_atoms, 4))

        previous_layer = ANIFeat(in_layers=reshaped_atom_feats,
                                 max_atoms=self.max_atoms)

        self.featurized = previous_layer

        Hiddens = []
        for n_hidden in self.layer_structures:
            Hidden = AtomicDifferentiatedDense(
                self.max_atoms,
                n_hidden,
                self.atom_number_cases,
                activation=self.activation_fn,
                in_layers=[previous_layer, self.atom_numbers])
            Hiddens.append(Hidden)
            previous_layer = Hiddens[-1]

        regression = Dense(out_channels=1 * self.n_tasks,
                           activation_fn=None,
                           in_layers=[Hiddens[-1]])
        output = BPGather(self.max_atoms,
                          in_layers=[regression, reshaped_atom_flags])
        self.add_output(output)

        label = Label(shape=(None, self.n_tasks, 1))
        loss = ReduceSum(L2Loss(in_layers=[label, output]))
        weights = Weights(shape=(None, self.n_tasks))

        weighted_loss = WeightedError(in_layers=[loss, weights])
        if self.exp_loss:
            weighted_loss = Exp(in_layers=[weighted_loss])
        self.set_loss(weighted_loss)
Exemple #17
0
    def build_graph(self):

        self.atom_numbers = Feature(shape=(None, self.max_atoms),
                                    dtype=tf.int32)
        self.atom_flags = Feature(shape=(None, self.max_atoms, self.max_atoms))
        self.atom_feats = Feature(shape=(None, self.max_atoms, 4))

        previous_layer = ANIFeat(in_layers=self.atom_feats,
                                 max_atoms=self.max_atoms)

        self.featurized = previous_layer

        Hiddens = []
        for n_hidden in self.layer_structures:
            Hidden = AtomicDifferentiatedDense(
                self.max_atoms,
                n_hidden,
                self.atom_number_cases,
                activation=self.activation_fn,
                in_layers=[previous_layer, self.atom_numbers])
            Hiddens.append(Hidden)
            previous_layer = Hiddens[-1]

        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            regression = Dense(out_channels=1,
                               activation_fn=None,
                               in_layers=[Hiddens[-1]])
            output = BPGather(self.max_atoms,
                              in_layers=[regression, self.atom_flags])
            self.add_output(output)

            label = Label(shape=(None, 1))
            self.labels_fd.append(label)
            cost = L2Loss(in_layers=[label, output])
            costs.append(cost)

        all_cost = Stack(in_layers=costs, axis=1)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        if self.exp_loss:
            loss = Exp(in_layers=[loss])
        self.set_loss(loss)
Exemple #18
0
    def build_graph(self):
        print("building")
        features = Feature(shape=(None, self.n_features))
        last_layer = features
        for layer_size in self.encoder_layers:
            last_layer = Dense(in_layers=last_layer,
                               activation_fn=tf.nn.elu,
                               out_channels=layer_size)

        self.mean = Dense(in_layers=last_layer,
                          activation_fn=None,
                          out_channels=1)
        self.std = Dense(in_layers=last_layer,
                         activation_fn=None,
                         out_channels=1)

        readout = CombineMeanStd([self.mean, self.std], training_only=True)
        last_layer = readout
        for layer_size in self.decoder_layers:
            last_layer = Dense(in_layers=readout,
                               activation_fn=tf.nn.elu,
                               out_channels=layer_size)

        self.reconstruction = Dense(in_layers=last_layer,
                                    activation_fn=None,
                                    out_channels=self.n_features)
        weights = Weights(shape=(None, self.n_features))
        reproduction_loss = L2Loss(
            in_layers=[features, self.reconstruction, weights])
        reproduction_loss = ReduceSum(in_layers=reproduction_loss, axis=0)
        global_step = TensorWrapper(self._get_tf("GlobalStep"))
        kl_loss = KLDivergenceLoss(
            in_layers=[self.mean, self.std, global_step],
            annealing_start_step=self.kl_annealing_start_step,
            annealing_stop_step=self.kl_annealing_stop_step)
        loss = Add(in_layers=[kl_loss, reproduction_loss], weights=[0.5, 1])

        self.add_output(self.mean)
        self.add_output(self.reconstruction)
        self.set_loss(loss)
Exemple #19
0
  def build_graph(self):
    """
    Building graph structures:
    """
    self.atom_features = Feature(shape=(None, 75))
    self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    self.membership = Feature(shape=(None,), dtype=tf.int32)

    self.deg_adjs = []
    for i in range(0, 10 + 1):
      deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
      self.deg_adjs.append(deg_adj)
    gc1 = GraphConv(
        64,
        activation_fn=tf.nn.relu,
        in_layers=[self.atom_features, self.degree_slice, self.membership] +
        self.deg_adjs)
    batch_norm1 = BatchNorm(in_layers=[gc1])
    gp1 = GraphPool(in_layers=[batch_norm1, self.degree_slice, self.membership]
                    + self.deg_adjs)
    gc2 = GraphConv(
        64,
        activation_fn=tf.nn.relu,
        in_layers=[gp1, self.degree_slice, self.membership] + self.deg_adjs)
    batch_norm2 = BatchNorm(in_layers=[gc2])
    gp2 = GraphPool(in_layers=[batch_norm2, self.degree_slice, self.membership]
                    + self.deg_adjs)
    dense = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2])
    batch_norm3 = BatchNorm(in_layers=[dense])
    readout = GraphGather(
        batch_size=self.batch_size,
        activation_fn=tf.nn.tanh,
        in_layers=[batch_norm3, self.degree_slice, self.membership] +
        self.deg_adjs)

    if self.error_bars == True:
      readout = Dropout(in_layers=[readout], dropout_prob=0.2)

    costs = []
    self.my_labels = []
    for task in range(self.n_tasks):
      if self.mode == 'classification':
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[readout])

        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.my_labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == 'regression':
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[readout])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.my_labels.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      entropy = Concat(in_layers=costs, axis=-1)
    elif self.mode == "regression":
      entropy = Stack(in_layers=costs, axis=1)
    self.my_task_weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[entropy, self.my_task_weights])
    self.set_loss(loss)
    def __init__(self,
                 n_tasks,
                 n_features,
                 alpha_init_stddevs=0.02,
                 layer_sizes=[1000],
                 weight_init_stddevs=0.02,
                 bias_init_consts=1.0,
                 weight_decay_penalty=0.0,
                 weight_decay_penalty_type="l2",
                 dropouts=0.5,
                 activation_fns=tf.nn.relu,
                 **kwargs):
        """Creates a progressive network.
  
    Only listing parameters specific to progressive networks here.

    Parameters
    ----------
    n_tasks: int
      Number of tasks
    n_features: int
      Number of input features
    alpha_init_stddevs: list
      List of standard-deviations for alpha in adapter layers.
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes)+1.  The final element corresponds to the output layer.
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    bias_init_consts: list or float
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes)+1.
      The final element corresponds to the output layer.  Alternatively this may be a single value instead of a list,
      in which case the same value is used for every layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      the Tensorflow activation function to apply to each layer.  The length of this list should equal
      len(layer_sizes).  Alternatively this may be a single value instead of a list, in which case the
      same value is used for every layer.
    """

        super(ProgressiveMultitaskRegressor, self).__init__(**kwargs)
        self.n_tasks = n_tasks
        self.n_features = n_features
        self.layer_sizes = layer_sizes
        self.alpha_init_stddevs = alpha_init_stddevs
        self.weight_init_stddevs = weight_init_stddevs
        self.bias_init_consts = bias_init_consts
        self.dropouts = dropouts
        self.activation_fns = activation_fns

        n_layers = len(layer_sizes)
        if not isinstance(weight_init_stddevs, collections.Sequence):
            self.weight_init_stddevs = [weight_init_stddevs] * n_layers
        if not isinstance(alpha_init_stddevs, collections.Sequence):
            self.alpha_init_stddevs = [alpha_init_stddevs] * n_layers
        if not isinstance(bias_init_consts, collections.Sequence):
            self.bias_init_consts = [bias_init_consts] * n_layers
        if not isinstance(dropouts, collections.Sequence):
            self.dropouts = [dropouts] * n_layers
        if not isinstance(activation_fns, collections.Sequence):
            self.activation_fns = [activation_fns] * n_layers

        # Add the input features.
        self.mol_features = Feature(shape=(None, n_features))

        all_layers = {}
        outputs = []
        for task in range(self.n_tasks):
            task_layers = []
            for i in range(n_layers):
                if i == 0:
                    prev_layer = self.mol_features
                else:
                    prev_layer = all_layers[(i - 1, task)]
                    if task > 0:
                        lateral_contrib, trainables = self.add_adapter(
                            all_layers, task, i)
                        task_layers.extend(trainables)

                layer = Dense(in_layers=[prev_layer],
                              out_channels=layer_sizes[i],
                              activation_fn=None,
                              weights_initializer=TFWrapper(
                                  tf.truncated_normal_initializer,
                                  stddev=self.weight_init_stddevs[i]),
                              biases_initializer=TFWrapper(
                                  tf.constant_initializer,
                                  value=self.bias_init_consts[i]))
                task_layers.append(layer)

                if i > 0 and task > 0:
                    layer = layer + lateral_contrib
                assert self.activation_fns[
                    i] is tf.nn.relu, "Only ReLU is supported"
                layer = ReLU(in_layers=[layer])
                if self.dropouts[i] > 0.0:
                    layer = Dropout(self.dropouts[i], in_layers=[layer])
                all_layers[(i, task)] = layer

            prev_layer = all_layers[(n_layers - 1, task)]
            layer = Dense(in_layers=[prev_layer],
                          out_channels=1,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=self.weight_init_stddevs[-1]),
                          biases_initializer=TFWrapper(
                              tf.constant_initializer,
                              value=self.bias_init_consts[-1]))
            task_layers.append(layer)

            if task > 0:
                lateral_contrib, trainables = self.add_adapter(
                    all_layers, task, n_layers)
                task_layers.extend(trainables)
                layer = layer + lateral_contrib
            outputs.append(layer)
            self.add_output(layer)
            task_label = Label(shape=(None, 1))
            task_weight = Weights(shape=(None, 1))
            weighted_loss = ReduceSum(
                L2Loss(in_layers=[task_label, layer, task_weight]))
            self.create_submodel(layers=task_layers,
                                 loss=weighted_loss,
                                 optimizer=None)
        # Weight decay not activated
        """
Exemple #21
0
 def create_loss(self, layer, label, weight):
   weighted_loss = ReduceSum(L2Loss(in_layers=[label, layer, weight]))
   return weighted_loss
Exemple #22
0
    def __init__(self,
                 n_tasks,
                 n_features,
                 layer_sizes=[1000],
                 weight_init_stddevs=[0.02, 0.02],
                 bias_init_consts=[1.0, 1.0],
                 weight_decay_penalty=0.0,
                 weight_decay_penalty_type="l2",
                 dropouts=[0.5],
                 **kwargs):
        """Create a TensorGraphMultiTaskRegressor.

    In addition to the following arguments, this class also accepts all the keywork arguments
    from TensorGraph.

    Parameters
    ----------
    n_tasks: int
      number of tasks
    n_features: int
      number of features
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes)+1.  The final element corresponds to the output layer.
    bias_init_consts: list
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes)+1.
      The final element corresponds to the output layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
    """
        super(TensorGraphMultiTaskRegressor, self).__init__(mode='regression',
                                                            **kwargs)
        self.n_tasks = n_tasks
        self.n_features = n_features

        # Add the input features.

        mol_features = Feature(shape=(None, n_features))
        prev_layer = mol_features

        # Add the dense layers

        for size, weight_stddev, bias_const, dropout in zip(
                layer_sizes, weight_init_stddevs, bias_init_consts, dropouts):
            layer = Dense(in_layers=[prev_layer],
                          out_channels=size,
                          activation_fn=tf.nn.relu,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=weight_stddev),
                          biases_initializer=TFWrapper(tf.constant_initializer,
                                                       value=bias_const))
            if dropout > 0.0:
                layer = Dropout(dropout, in_layers=[layer])
            prev_layer = layer

        # Compute the loss function for each label.

        output = Reshape(shape=(-1, n_tasks, 1),
                         in_layers=[
                             Dense(in_layers=[prev_layer],
                                   out_channels=n_tasks,
                                   weights_initializer=TFWrapper(
                                       tf.truncated_normal_initializer,
                                       stddev=weight_init_stddevs[-1]),
                                   biases_initializer=TFWrapper(
                                       tf.constant_initializer,
                                       value=bias_init_consts[-1]))
                         ])
        self.add_output(output)
        labels = Label(shape=(None, n_tasks, 1))
        weights = Weights(shape=(None, n_tasks))
        loss = L2Loss(in_layers=[labels, output])
        weighted_loss = WeightedError(in_layers=[loss, weights])
        if weight_decay_penalty != 0.0:
            weighted_loss = WeightDecay(weight_decay_penalty,
                                        weight_decay_penalty_type,
                                        in_layers=[weighted_loss])
        self.set_loss(weighted_loss)
Exemple #23
0
    def build_graph(self):
        """Building graph structures:
                Features => WeaveLayer => WeaveLayer => Dense => WeaveGather => Classification or Regression
                """
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.pair_features = Feature(shape=(None, self.n_pair_feat))
        self.pair_split = Feature(shape=(None, ), dtype=tf.int32)
        self.atom_split = Feature(shape=(None, ), dtype=tf.int32)
        self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)
        weave_layer1A, weave_layer1P = WeaveLayerFactory(
            n_atom_input_feat=self.n_atom_feat,
            n_pair_input_feat=self.n_pair_feat,
            n_atom_output_feat=self.n_hidden,
            n_pair_output_feat=self.n_hidden,
            in_layers=[
                self.atom_features, self.pair_features, self.pair_split,
                self.atom_to_pair
            ])
        weave_layer2A, weave_layer2P = WeaveLayerFactory(
            n_atom_input_feat=self.n_hidden,
            n_pair_input_feat=self.n_hidden,
            n_atom_output_feat=self.n_hidden,
            n_pair_output_feat=self.n_hidden,
            update_pair=False,
            in_layers=[
                weave_layer1A, weave_layer1P, self.pair_split,
                self.atom_to_pair
            ])
        dense1 = Dense(out_channels=self.n_graph_feat,
                       activation_fn=tf.nn.tanh,
                       in_layers=weave_layer2A)
        batch_norm1 = BatchNorm(epsilon=1e-5, in_layers=[dense1])
        weave_gather = WeaveGather(self.batch_size,
                                   n_input=self.n_graph_feat,
                                   gaussian_expand=True,
                                   in_layers=[batch_norm1, self.atom_split])

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=weave_gather,
                                       out_channels=n_tasks * n_classes)
                             ])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(shape=(None, n_tasks),
                             in_layers=[
                                 Dense(in_layers=weave_gather,
                                       out_channels=n_tasks)
                             ])
            self.add_output(output)
            weighted_loss = ReduceSum(
                L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)
Exemple #24
0
    def build_graph(self):
        # Build placeholders
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.pair_features = Feature(shape=(None, self.n_pair_feat))
        self.atom_split = Feature(shape=(None, ), dtype=tf.int32)
        self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)

        message_passing = MessagePassing(self.T,
                                         message_fn='enn',
                                         update_fn='gru',
                                         n_hidden=self.n_hidden,
                                         in_layers=[
                                             self.atom_features,
                                             self.pair_features,
                                             self.atom_to_pair
                                         ])

        atom_embeddings = Dense(self.n_hidden, in_layers=[message_passing])

        mol_embeddings = SetGather(
            self.M,
            self.batch_size,
            n_hidden=self.n_hidden,
            in_layers=[atom_embeddings, self.atom_split])

        dense1 = Dense(out_channels=2 * self.n_hidden,
                       activation_fn=tf.nn.relu,
                       in_layers=[mol_embeddings])

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=dense1,
                                       out_channels=n_tasks * n_classes)
                             ])
            logits = TrimGraphOutput([logits, weights])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(
                shape=(None, n_tasks),
                in_layers=[Dense(in_layers=dense1, out_channels=n_tasks)])
            output = TrimGraphOutput([output, weights])
            self.add_output(output)
            if self.uncertainty:
                log_var = Reshape(
                    shape=(None, n_tasks),
                    in_layers=[Dense(in_layers=dense1, out_channels=n_tasks)])
                log_var = TrimGraphOutput([log_var, weights])
                var = Exp(log_var)
                self.add_variance(var)
                diff = labels - output
                weighted_loss = weights * (diff * diff / var + log_var)
                weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1]))
            else:
                weighted_loss = ReduceSum(
                    L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)
Exemple #25
0
    def build_graph(self):
        """Building graph structures:
                Features => DAGLayer => DAGGather => Classification or Regression
                """
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.parents = Feature(shape=(None, self.max_atoms, self.max_atoms),
                               dtype=tf.int32)
        self.calculation_orders = Feature(shape=(None, self.max_atoms),
                                          dtype=tf.int32)
        self.calculation_masks = Feature(shape=(None, self.max_atoms),
                                         dtype=tf.bool)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)
        self.n_atoms = Feature(shape=(), dtype=tf.int32)
        dag_layer1 = DAGLayer(n_graph_feat=self.n_graph_feat,
                              n_atom_feat=self.n_atom_feat,
                              max_atoms=self.max_atoms,
                              layer_sizes=self.layer_sizes,
                              dropout=self.dropout,
                              batch_size=self.batch_size,
                              in_layers=[
                                  self.atom_features, self.parents,
                                  self.calculation_orders,
                                  self.calculation_masks, self.n_atoms
                              ])
        dag_gather = DAGGather(n_graph_feat=self.n_graph_feat,
                               n_outputs=self.n_outputs,
                               max_atoms=self.max_atoms,
                               layer_sizes=self.layer_sizes_gather,
                               dropout=self.dropout,
                               in_layers=[dag_layer1, self.membership])

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=dag_gather,
                                       out_channels=n_tasks * n_classes)
                             ])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(
                shape=(None, n_tasks),
                in_layers=[Dense(in_layers=dag_gather, out_channels=n_tasks)])
            self.add_output(output)
            if self.uncertainty:
                log_var = Reshape(shape=(None, n_tasks),
                                  in_layers=[
                                      Dense(in_layers=dag_gather,
                                            out_channels=n_tasks)
                                  ])
                var = Exp(log_var)
                self.add_variance(var)
                diff = labels - output
                weighted_loss = weights * (diff * diff / var + log_var)
                weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1]))
            else:
                weighted_loss = ReduceSum(
                    L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)
Exemple #26
0
  def build_graph(self):
    """Building graph structures:
        Features => WeaveLayer => WeaveLayer => Dense => WeaveGather => Classification or Regression
        """
    self.atom_features = Feature(shape=(None, self.n_atom_feat))
    self.pair_features = Feature(shape=(None, self.n_pair_feat))
    combined = Combine_AP(in_layers=[self.atom_features, self.pair_features])
    self.pair_split = Feature(shape=(None,), dtype=tf.int32)
    self.atom_split = Feature(shape=(None,), dtype=tf.int32)
    self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)
    weave_layer1 = WeaveLayer(
        n_atom_input_feat=self.n_atom_feat,
        n_pair_input_feat=self.n_pair_feat,
        n_atom_output_feat=self.n_hidden,
        n_pair_output_feat=self.n_hidden,
        in_layers=[combined, self.pair_split, self.atom_to_pair])
    weave_layer2 = WeaveLayer(
        n_atom_input_feat=self.n_hidden,
        n_pair_input_feat=self.n_hidden,
        n_atom_output_feat=self.n_hidden,
        n_pair_output_feat=self.n_hidden,
        update_pair=False,
        in_layers=[weave_layer1, self.pair_split, self.atom_to_pair])
    separated = Separate_AP(in_layers=[weave_layer2])
    dense1 = Dense(
        out_channels=self.n_graph_feat,
        activation_fn=tf.nn.tanh,
        in_layers=[separated])
    batch_norm1 = BatchNormalization(epsilon=1e-5, mode=1, in_layers=[dense1])
    weave_gather = WeaveGather(
        self.batch_size,
        n_input=self.n_graph_feat,
        gaussian_expand=True,
        in_layers=[batch_norm1, self.atom_split])

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      if self.mode == "classification":
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[weave_gather])
        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.labels_fd.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == "regression":
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[weave_gather])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.labels_fd.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      all_cost = Concat(in_layers=costs, axis=1)
    elif self.mode == "regression":
      all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
Exemple #27
0
    def __init__(self,
                 n_tasks,
                 n_features,
                 layer_sizes=[1000],
                 weight_init_stddevs=0.02,
                 bias_init_consts=1.0,
                 weight_decay_penalty=0.0,
                 weight_decay_penalty_type="l2",
                 dropouts=0.5,
                 activation_fns=tf.nn.relu,
                 bypass_layer_sizes=[100],
                 bypass_weight_init_stddevs=[.02],
                 bypass_bias_init_consts=[1.],
                 bypass_dropouts=[.5],
                 **kwargs):
        """ Create a RobustMultitaskRegressor.

    Parameters
    ----------
    n_tasks: int
      number of tasks
    n_features: int
      number of features
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes).  Alternatively this may be a single value instead of a list,
      in which case the same value is used for every layer.
    bias_init_consts: list or loat
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      the Tensorflow activation function to apply to each layer.  The length of this list should equal
      len(layer_sizes).  Alternatively this may be a single value instead of a list, in which case the
      same value is used for every layer.
    bypass_layer_sizes: list
      the size of each dense layer in the bypass network. The length of this list determines the number of bypass layers.
    bypass_weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of bypass layers.
      same requirements as weight_init_stddevs
    bypass_bias_init_consts: list or float
      the value to initialize the biases in bypass layers
      same requirements as bias_init_consts
    bypass_dropouts: list or float
      the dropout probablity to use for bypass layers.
      same requirements as dropouts
    """
        super(RobustMultitaskRegressor, self).__init__(**kwargs)
        self.n_tasks = n_tasks
        self.n_features = n_features
        n_layers = len(layer_sizes)
        if not isinstance(weight_init_stddevs, collections.Sequence):
            weight_init_stddevs = [weight_init_stddevs] * n_layers
        if not isinstance(bias_init_consts, collections.Sequence):
            bias_init_consts = [bias_init_consts] * n_layers
        if not isinstance(dropouts, collections.Sequence):
            dropouts = [dropouts] * n_layers
        if not isinstance(activation_fns, collections.Sequence):
            activation_fns = [activation_fns] * n_layers

        n_bypass_layers = len(bypass_layer_sizes)
        if not isinstance(bypass_weight_init_stddevs, collections.Sequence):
            bypass_weight_init_stddevs = [bypass_weight_init_stddevs
                                          ] * n_bypass_layers
        if not isinstance(bypass_bias_init_consts, collections.Sequence):
            bypass_bias_init_consts = [bypass_bias_init_consts
                                       ] * n_bypass_layers
        if not isinstance(bypass_dropouts, collections.Sequence):
            bypass_dropouts = [bypass_dropouts] * n_bypass_layers
        bypass_activation_fns = [activation_fns[0]] * n_bypass_layers

        # Add the input features.
        mol_features = Feature(shape=(None, n_features))
        prev_layer = mol_features

        # Add the shared dense layers
        for size, weight_stddev, bias_const, dropout, activation_fn in zip(
                layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
                activation_fns):
            layer = Dense(in_layers=[prev_layer],
                          out_channels=size,
                          activation_fn=activation_fn,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=weight_stddev),
                          biases_initializer=TFWrapper(tf.constant_initializer,
                                                       value=bias_const))
            if dropout > 0.0:
                layer = Dropout(dropout, in_layers=[layer])
            prev_layer = layer
        top_multitask_layer = prev_layer

        task_outputs = []
        for i in range(self.n_tasks):
            prev_layer = mol_features
            # Add task-specific bypass layers
            for size, weight_stddev, bias_const, dropout, activation_fn in zip(
                    bypass_layer_sizes, bypass_weight_init_stddevs,
                    bypass_bias_init_consts, bypass_dropouts,
                    bypass_activation_fns):
                layer = Dense(in_layers=[prev_layer],
                              out_channels=size,
                              activation_fn=activation_fn,
                              weights_initializer=TFWrapper(
                                  tf.truncated_normal_initializer,
                                  stddev=weight_stddev),
                              biases_initializer=TFWrapper(
                                  tf.constant_initializer, value=bias_const))
                if dropout > 0.0:
                    layer = Dropout(dropout, in_layers=[layer])
                prev_layer = layer
            top_bypass_layer = prev_layer

            if n_bypass_layers > 0:
                task_layer = Concat(
                    axis=1, in_layers=[top_multitask_layer, top_bypass_layer])
            else:
                task_layer = top_multitask_layer

            task_out = Dense(in_layers=[task_layer], out_channels=1)
            task_outputs.append(task_out)

        output = Concat(axis=1, in_layers=task_outputs)

        self.add_output(output)
        labels = Label(shape=(None, n_tasks))
        weights = Weights(shape=(None, n_tasks))
        weighted_loss = ReduceSum(L2Loss(in_layers=[labels, output, weights]))
        if weight_decay_penalty != 0.0:
            weighted_loss = WeightDecay(weight_decay_penalty,
                                        weight_decay_penalty_type,
                                        in_layers=[weighted_loss])
        self.set_loss(weighted_loss)
def atomic_conv_model(frag1_num_atoms=70,
                      frag2_num_atoms=634,
                      complex_num_atoms=701,
                      max_num_neighbors=12,
                      batch_size=24,
                      at=[
                          6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25.,
                          30., 35., 53., -1.
                      ],
                      radial=[[
                          1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0,
                          6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,
                          11.5, 12.0
                      ], [0.0, 4.0, 8.0], [0.4]],
                      layer_sizes=[32, 32, 16],
                      learning_rate=0.001):
    rp = [x for x in itertools.product(*radial)]
    frag1_X = Feature(shape=(batch_size, frag1_num_atoms, 3))
    frag1_nbrs = Feature(shape=(batch_size, frag1_num_atoms,
                                max_num_neighbors))
    frag1_nbrs_z = Feature(shape=(batch_size, frag1_num_atoms,
                                  max_num_neighbors))
    frag1_z = Feature(shape=(batch_size, frag1_num_atoms))

    frag2_X = Feature(shape=(batch_size, frag2_num_atoms, 3))
    frag2_nbrs = Feature(shape=(batch_size, frag2_num_atoms,
                                max_num_neighbors))
    frag2_nbrs_z = Feature(shape=(batch_size, frag2_num_atoms,
                                  max_num_neighbors))
    frag2_z = Feature(shape=(batch_size, frag2_num_atoms))

    complex_X = Feature(shape=(batch_size, complex_num_atoms, 3))
    complex_nbrs = Feature(shape=(batch_size, complex_num_atoms,
                                  max_num_neighbors))
    complex_nbrs_z = Feature(shape=(batch_size, complex_num_atoms,
                                    max_num_neighbors))
    complex_z = Feature(shape=(batch_size, complex_num_atoms))

    frag1_conv = AtomicConvolution(
        atom_types=at,
        radial_params=rp,
        boxsize=None,
        in_layers=[frag1_X, frag1_nbrs, frag1_nbrs_z])

    frag2_conv = AtomicConvolution(
        atom_types=at,
        radial_params=rp,
        boxsize=None,
        in_layers=[frag2_X, frag2_nbrs, frag2_nbrs_z])

    complex_conv = AtomicConvolution(
        atom_types=at,
        radial_params=rp,
        boxsize=None,
        in_layers=[complex_X, complex_nbrs, complex_nbrs_z])

    score = AtomicConvScore(at,
                            layer_sizes,
                            in_layers=[
                                frag1_conv, frag2_conv, complex_conv, frag1_z,
                                frag2_z, complex_z
                            ])

    label = Label(shape=(None, 1))
    loss = ReduceMean(in_layers=L2Loss(in_layers=[score, label]))

    def feed_dict_generator(dataset, batch_size, epochs=1, pad_batches=True):
        def replace_atom_types(z):
            def place_holder(i):
                if i in at:
                    return i
                return -1

            return np.array([place_holder(x) for x in z])

        for epoch in range(epochs):
            for ind, (F_b, y_b, w_b, ids_b) in enumerate(
                    dataset.iterbatches(batch_size,
                                        deterministic=True,
                                        pad_batches=pad_batches)):
                N = complex_num_atoms
                N_1 = frag1_num_atoms
                N_2 = frag2_num_atoms
                M = max_num_neighbors

                orig_dict = {}
                batch_size = F_b.shape[0]
                num_features = F_b[0][0].shape[1]
                frag1_X_b = np.zeros((batch_size, N_1, num_features))
                for i in range(batch_size):
                    frag1_X_b[i] = F_b[i][0]
                orig_dict[frag1_X] = frag1_X_b

                frag2_X_b = np.zeros((batch_size, N_2, num_features))
                for i in range(batch_size):
                    frag2_X_b[i] = F_b[i][3]
                orig_dict[frag2_X] = frag2_X_b

                complex_X_b = np.zeros((batch_size, N, num_features))
                for i in range(batch_size):
                    complex_X_b[i] = F_b[i][6]
                orig_dict[complex_X] = complex_X_b

                frag1_Nbrs = np.zeros((batch_size, N_1, M))
                frag1_Z_b = np.zeros((batch_size, N_1))
                for i in range(batch_size):
                    z = replace_atom_types(F_b[i][2])
                    frag1_Z_b[i] = z
                frag1_Nbrs_Z = np.zeros((batch_size, N_1, M))
                for atom in range(N_1):
                    for i in range(batch_size):
                        atom_nbrs = F_b[i][1].get(atom, "")
                        frag1_Nbrs[i,
                                   atom, :len(atom_nbrs)] = np.array(atom_nbrs)
                        for j, atom_j in enumerate(atom_nbrs):
                            frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j]
                orig_dict[frag1_nbrs] = frag1_Nbrs
                orig_dict[frag1_nbrs_z] = frag1_Nbrs_Z
                orig_dict[frag1_z] = frag1_Z_b

                frag2_Nbrs = np.zeros((batch_size, N_2, M))
                frag2_Z_b = np.zeros((batch_size, N_2))
                for i in range(batch_size):
                    z = replace_atom_types(F_b[i][5])
                    frag2_Z_b[i] = z
                frag2_Nbrs_Z = np.zeros((batch_size, N_2, M))
                for atom in range(N_2):
                    for i in range(batch_size):
                        atom_nbrs = F_b[i][4].get(atom, "")
                        frag2_Nbrs[i,
                                   atom, :len(atom_nbrs)] = np.array(atom_nbrs)
                        for j, atom_j in enumerate(atom_nbrs):
                            frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j]
                orig_dict[frag2_nbrs] = frag2_Nbrs
                orig_dict[frag2_nbrs_z] = frag2_Nbrs_Z
                orig_dict[frag2_z] = frag2_Z_b

                complex_Nbrs = np.zeros((batch_size, N, M))
                complex_Z_b = np.zeros((batch_size, N))
                for i in range(batch_size):
                    z = replace_atom_types(F_b[i][8])
                    complex_Z_b[i] = z
                complex_Nbrs_Z = np.zeros((batch_size, N, M))
                for atom in range(N):
                    for i in range(batch_size):
                        atom_nbrs = F_b[i][7].get(atom, "")
                        complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(
                            atom_nbrs)
                        for j, atom_j in enumerate(atom_nbrs):
                            complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j]

                orig_dict[complex_nbrs] = complex_Nbrs
                orig_dict[complex_nbrs_z] = complex_Nbrs_Z
                orig_dict[complex_z] = complex_Z_b
                orig_dict[label] = np.reshape(y_b, newshape=(batch_size, 1))
                yield orig_dict

    tg = TensorGraph(batch_size=batch_size,
                     mode=str("regression"),
                     model_dir=str("/tmp/atom_conv"),
                     learning_rate=learning_rate)
    tg.add_output(score)
    tg.set_loss(loss)
    return tg, feed_dict_generator, label
Exemple #29
0
    def build_graph(self):
        """
    Building graph structures:
    """
        self.atom_features = Feature(shape=(None, self.number_atom_features))
        self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)

        self.deg_adjs = []
        for i in range(0, 10 + 1):
            deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
            self.deg_adjs.append(deg_adj)
        in_layer = self.atom_features
        for layer_size, dropout in zip(self.graph_conv_layers, self.dropout):
            gc1_in = [in_layer, self.degree_slice, self.membership
                      ] + self.deg_adjs
            gc1 = GraphConv(layer_size,
                            activation_fn=tf.nn.relu,
                            in_layers=gc1_in)
            batch_norm1 = BatchNorm(in_layers=[gc1])
            if dropout > 0.0:
                batch_norm1 = Dropout(dropout, in_layers=batch_norm1)
            gp_in = [batch_norm1, self.degree_slice, self.membership
                     ] + self.deg_adjs
            in_layer = GraphPool(in_layers=gp_in)
        dense = Dense(out_channels=self.dense_layer_size,
                      activation_fn=tf.nn.relu,
                      in_layers=[in_layer])
        batch_norm3 = BatchNorm(in_layers=[dense])
        if self.dropout[-1] > 0.0:
            batch_norm3 = Dropout(self.dropout[-1], in_layers=batch_norm3)
        readout = GraphGather(
            batch_size=self.batch_size,
            activation_fn=tf.nn.tanh,
            in_layers=[batch_norm3, self.degree_slice, self.membership] +
            self.deg_adjs)

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=readout,
                                       out_channels=n_tasks * n_classes)
                             ])
            logits = TrimGraphOutput([logits, weights])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(
                shape=(None, n_tasks),
                in_layers=[Dense(in_layers=readout, out_channels=n_tasks)])
            output = TrimGraphOutput([output, weights])
            self.add_output(output)
            if self.uncertainty:
                log_var = Reshape(
                    shape=(None, n_tasks),
                    in_layers=[Dense(in_layers=readout, out_channels=n_tasks)])
                log_var = TrimGraphOutput([log_var, weights])
                var = Exp(log_var)
                self.add_variance(var)
                diff = labels - output
                weighted_loss = weights * (diff * diff / var + log_var)
                weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1]))
            else:
                weighted_loss = ReduceSum(
                    L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)
Exemple #30
0
    def __init__(self,
                 n_tasks,
                 n_features,
                 layer_sizes=[1000],
                 weight_init_stddevs=0.02,
                 bias_init_consts=1.0,
                 weight_decay_penalty=0.0,
                 weight_decay_penalty_type="l2",
                 dropouts=0.5,
                 activation_fns=tf.nn.relu,
                 uncertainty=False,
                 **kwargs):
        """Create a MultitaskRegressor.

    In addition to the following arguments, this class also accepts all the keywork arguments
    from TensorGraph.

    Parameters
    ----------
    n_tasks: int
      number of tasks
    n_features: int
      number of features
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes)+1.  The final element corresponds to the output layer.
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    bias_init_consts: list or float
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes)+1.
      The final element corresponds to the output layer.  Alternatively this may be a single value instead of a list,
      in which case the same value is used for every layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      the Tensorflow activation function to apply to each layer.  The length of this list should equal
      len(layer_sizes).  Alternatively this may be a single value instead of a list, in which case the
      same value is used for every layer.
    uncertainty: bool
      if True, include extra outputs and loss terms to enable the uncertainty
      in outputs to be predicted
    """
        super(MultitaskRegressor, self).__init__(**kwargs)
        self.n_tasks = n_tasks
        self.n_features = n_features
        n_layers = len(layer_sizes)
        if not isinstance(weight_init_stddevs, collections.Sequence):
            weight_init_stddevs = [weight_init_stddevs] * (n_layers + 1)
        if not isinstance(bias_init_consts, collections.Sequence):
            bias_init_consts = [bias_init_consts] * (n_layers + 1)
        if not isinstance(dropouts, collections.Sequence):
            dropouts = [dropouts] * n_layers
        if not isinstance(activation_fns, collections.Sequence):
            activation_fns = [activation_fns] * n_layers
        if uncertainty:
            if any(d == 0.0 for d in dropouts):
                raise ValueError(
                    'Dropout must be included in every layer to predict uncertainty'
                )

        # Add the input features.

        mol_features = Feature(shape=(None, n_features))
        prev_layer = mol_features

        # Add the dense layers

        for size, weight_stddev, bias_const, dropout, activation_fn in zip(
                layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
                activation_fns):
            layer = Dense(in_layers=[prev_layer],
                          out_channels=size,
                          activation_fn=activation_fn,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=weight_stddev),
                          biases_initializer=TFWrapper(tf.constant_initializer,
                                                       value=bias_const))
            if dropout > 0.0:
                layer = Dropout(dropout, in_layers=[layer])
            prev_layer = layer
        self.neural_fingerprint = prev_layer

        # Compute the loss function for each label.

        output = Reshape(shape=(-1, n_tasks, 1),
                         in_layers=[
                             Dense(in_layers=[prev_layer],
                                   out_channels=n_tasks,
                                   weights_initializer=TFWrapper(
                                       tf.truncated_normal_initializer,
                                       stddev=weight_init_stddevs[-1]),
                                   biases_initializer=TFWrapper(
                                       tf.constant_initializer,
                                       value=bias_init_consts[-1]))
                         ])
        self.add_output(output)
        labels = Label(shape=(None, n_tasks, 1))
        weights = Weights(shape=(None, n_tasks, 1))
        if uncertainty:
            log_var = Reshape(
                shape=(-1, n_tasks, 1),
                in_layers=[
                    Dense(in_layers=[prev_layer],
                          out_channels=n_tasks,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=weight_init_stddevs[-1]),
                          biases_initializer=TFWrapper(tf.constant_initializer,
                                                       value=0.0))
                ])
            var = Exp(log_var)
            self.add_variance(var)
            diff = labels - output
            weighted_loss = weights * (diff * diff / var + log_var)
            weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1, 2]))
        else:
            weighted_loss = ReduceSum(
                L2Loss(in_layers=[labels, output, weights]))
        if weight_decay_penalty != 0.0:
            weighted_loss = WeightDecay(weight_decay_penalty,
                                        weight_decay_penalty_type,
                                        in_layers=[weighted_loss])
        self.set_loss(weighted_loss)