Exemple #1
0
    def _build_graph(self):

        self.one_hot_seq = Feature(shape=(None, self.pad_length,
                                          self.num_amino_acids),
                                   dtype=tf.float32)

        conv1 = Conv1D(kernel_size=2,
                       filters=512,
                       in_layers=[self.one_hot_seq])

        maxpool1 = MaxPool1D(strides=2, padding="VALID", in_layers=[conv1])
        conv2 = Conv1D(kernel_size=3, filters=512, in_layers=[maxpool1])
        flattened = Flatten(in_layers=[conv2])
        dense1 = Dense(out_channels=400,
                       in_layers=[flattened],
                       activation_fn=tf.nn.tanh)
        dropout = Dropout(dropout_prob=self.dropout_p, in_layers=[dense1])
        output = Dense(out_channels=1, in_layers=[dropout], activation_fn=None)
        self.add_output(output)

        if self.mode == "regression":
            label = Label(shape=(None, 1))
            loss = L2Loss(in_layers=[label, output])
        else:
            raise NotImplementedError(
                "Classification support not added yet. Missing details in paper."
            )
        weights = Weights(shape=(None, ))
        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
Exemple #2
0
def test_Conv1D_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1, 1))
  conv = Conv1D(2, 1, in_layers=feature)
  tg.add_output(conv)
  tg.set_loss(conv)
  tg.build()
  tg.save()
Exemple #3
0
  def build_graph(self):
    self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32)
    # Character embedding
    self.Embedding = DTNNEmbedding(
        n_embedding=self.n_embedding,
        periodic_table_length=len(self.char_dict.keys()) + 1,
        in_layers=[self.smiles_seqs])
    self.pooled_outputs = []
    self.conv_layers = []
    for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters):
      # Multiple convolutional layers with different filter widths
      self.conv_layers.append(
          Conv1D(
              kernel_size=filter_size,
              filters=num_filter,
              padding='valid',
              in_layers=[self.Embedding]))
      # Max-over-time pooling
      self.pooled_outputs.append(
          ReduceMax(axis=1, in_layers=[self.conv_layers[-1]]))
    # Concat features from all filters(one feature per filter)
    concat_outputs = Concat(axis=1, in_layers=self.pooled_outputs)
    dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs])
    dense = Dense(
        out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout])
    # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
    self.gather = Highway(in_layers=[dense])

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      if self.mode == "classification":
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[self.gather])
        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.labels_fd.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == "regression":
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[self.gather])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.labels_fd.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      all_cost = Stack(in_layers=costs, axis=1)
    elif self.mode == "regression":
      all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
Exemple #4
0
    def _build_graph(self):
        self.smiles_seqs = Feature(shape=(None, self.seq_length),
                                   dtype=tf.int32)
        # Character embedding
        Embedding = DTNNEmbedding(
            n_embedding=self.n_embedding,
            periodic_table_length=len(self.char_dict.keys()) + 1,
            in_layers=[self.smiles_seqs])
        pooled_outputs = []
        conv_layers = []
        for filter_size, num_filter in zip(self.kernel_sizes,
                                           self.num_filters):
            # Multiple convolutional layers with different filter widths
            conv_layers.append(
                Conv1D(kernel_size=filter_size,
                       filters=num_filter,
                       padding='valid',
                       in_layers=[Embedding]))
            # Max-over-time pooling
            pooled_outputs.append(
                ReduceMax(axis=1, in_layers=[conv_layers[-1]]))
        # Concat features from all filters(one feature per filter)
        concat_outputs = Concat(axis=1, in_layers=pooled_outputs)
        dropout = Dropout(dropout_prob=self.dropout,
                          in_layers=[concat_outputs])
        dense = Dense(out_channels=200,
                      activation_fn=tf.nn.relu,
                      in_layers=[dropout])
        # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
        gather = Highway(in_layers=[dense])

        if self.mode == "classification":
            logits = Dense(out_channels=self.n_tasks * 2,
                           activation_fn=None,
                           in_layers=[gather])
            logits = Reshape(shape=(-1, self.n_tasks, 2), in_layers=[logits])
            output = SoftMax(in_layers=[logits])
            self.add_output(output)
            labels = Label(shape=(None, self.n_tasks, 2))
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])

        else:
            vals = Dense(out_channels=self.n_tasks * 1,
                         activation_fn=None,
                         in_layers=[gather])
            vals = Reshape(shape=(-1, self.n_tasks, 1), in_layers=[vals])
            self.add_output(vals)
            labels = Label(shape=(None, self.n_tasks, 1))
            loss = ReduceSum(L2Loss(in_layers=[labels, vals]))

        weights = Weights(shape=(None, self.n_tasks))
        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
Exemple #5
0
  def test_conv_1D(self):
    """Test that Conv1D can be invoked."""
    width = 5
    in_channels = 2
    out_channels = 3
    batch_size = 10
    in_tensor = np.random.rand(batch_size, width, in_channels)
    with self.test_session() as sess:
      in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
      out_tensor = Conv1D(width, out_channels)(in_tensor)
      sess.run(tf.global_variables_initializer())
      out_tensor = out_tensor.eval()

      assert out_tensor.shape == (batch_size, width, out_channels)
Exemple #6
0
    def test_conv_1D(self):
        """Test that Conv1D can be invoked."""
        width = 5
        in_channels = 2
        filters = 3
        kernel_size = 2
        batch_size = 10
        in_tensor = np.random.rand(batch_size, width, in_channels)
        with self.session() as sess:
            in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
            out_tensor = Conv1D(filters, kernel_size)(in_tensor)
            sess.run(tf.global_variables_initializer())
            out_tensor = out_tensor.eval()

            self.assertEqual(out_tensor.shape[0], batch_size)
            self.assertEqual(out_tensor.shape[2], filters)