def build(self):
        # Create target inputs
        self.label_placeholder = Input(tensor=tf.placeholder(
            dtype='bool', shape=(None,
                                 self.n_tasks), name="label_placeholder"))
        self.weight_placeholder = Input(
            tensor=tf.placeholder(dtype='float32',
                                  shape=(None, self.n_tasks),
                                  name="weight_placholder"))

        feat = self.model.return_outputs()
        output = model_ops.multitask_logits(feat, self.n_tasks)
        return output
Esempio n. 2
0
    def build(self, graph, name_scopes, training):
        """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
        placeholder_scope = TensorflowGraph.get_placeholder_scope(
            graph, name_scopes)
        n_features = self.n_features
        with graph.as_default():
            with placeholder_scope:
                self.mol_features = tf.placeholder(tf.float32,
                                                   shape=[None, n_features],
                                                   name='mol_features')

            layer_sizes = self.layer_sizes
            weight_init_stddevs = self.weight_init_stddevs
            bias_init_consts = self.bias_init_consts
            dropouts = self.dropouts
            lengths_set = {
                len(layer_sizes),
                len(weight_init_stddevs),
                len(bias_init_consts),
                len(dropouts),
            }
            assert len(
                lengths_set) == 1, 'All layer params must have same length.'
            n_layers = lengths_set.pop()
            assert n_layers > 0, 'Must have some layers defined.'

            prev_layer = self.mol_features
            prev_layer_size = n_features
            for i in range(n_layers):
                layer = tf.nn.relu(
                    model_ops.fully_connected_layer(
                        tensor=prev_layer,
                        size=layer_sizes[i],
                        weight_init=tf.truncated_normal(
                            shape=[prev_layer_size, layer_sizes[i]],
                            stddev=weight_init_stddevs[i]),
                        bias_init=tf.constant(value=bias_init_consts[i],
                                              shape=[layer_sizes[i]])))
                layer = model_ops.dropout(layer, dropouts[i], training)
                prev_layer = layer
                prev_layer_size = layer_sizes[i]

            output = model_ops.multitask_logits(layer, self.n_tasks)
        return output
Esempio n. 3
0
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    n_features = self.n_features
    with graph.as_default():
      with placeholder_scope:
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, n_features],
            name='mol_features')

      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      dropouts = self.dropouts
      lengths_set = {
          len(layer_sizes),
          len(weight_init_stddevs),
          len(bias_init_consts),
          len(dropouts),
          }
      assert len(lengths_set) == 1, 'All layer params must have same length.'
      n_layers = lengths_set.pop()
      assert n_layers > 0, 'Must have some layers defined.'

      prev_layer = self.mol_features
      prev_layer_size = n_features 
      for i in range(n_layers):
        layer = tf.nn.relu(model_ops.fully_connected_layer(
            tensor=prev_layer,
            size=layer_sizes[i],
            weight_init=tf.truncated_normal(
                shape=[prev_layer_size, layer_sizes[i]],
                stddev=weight_init_stddevs[i]),
            bias_init=tf.constant(value=bias_init_consts[i],
                                  shape=[layer_sizes[i]])))
        layer = model_ops.dropout(layer, dropouts[i], training)
        prev_layer = layer
        prev_layer_size = layer_sizes[i]

      output = model_ops.multitask_logits(
          layer, self.n_tasks)
    return output
Esempio n. 4
0
  def build(self):
    # Create target inputs
    self.label_placeholder = tf.placeholder(
        dtype='bool', shape=(None, self.n_tasks), name="label_placeholder")
    self.weight_placeholder = tf.placeholder(
        dtype='float32', shape=(None, self.n_tasks), name="weight_placholder")

    feat = self.model.return_outputs()
    ################################################################ DEBUG
    #print("multitask classifier")
    #print("feat")
    #print(feat)
    ################################################################ DEBUG
    output = model_ops.multitask_logits(feat, self.n_tasks)
    return output
Esempio n. 5
0
 def build(self):
   # Create target inputs
   self.label_placeholder = tf.placeholder(
       dtype='bool', shape=(None, self.n_tasks), name="label_placeholder")
   self.weight_placeholder = tf.placeholder(
       dtype='float32', shape=(None, self.n_tasks), name="weight_placholder")
   self.training = self.model.get_training_state()
   feat = self.model.return_outputs()
   ################################################################ DEBUG
   #print("multitask classifier")
   #print("feat")
   #print(feat)
   ################################################################ DEBUG
   output = model_ops.multitask_logits(feat, self.n_tasks)
   return output
Esempio n. 6
0
    def build(self, graph, name_scopes, training):
        """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
        warnings.warn(
            "TensorflowMultiTaskClassifier is deprecated. "
            "Will be removed in DeepChem 1.4.", DeprecationWarning)
        placeholder_scope = TensorflowGraph.get_placeholder_scope(
            graph, name_scopes)
        n_features = self.n_features
        with graph.as_default():
            with placeholder_scope:
                mol_features = tf.placeholder(tf.float32,
                                              shape=[None, n_features],
                                              name='mol_features')

            layer_sizes = self.layer_sizes
            weight_init_stddevs = self.weight_init_stddevs
            bias_init_consts = self.bias_init_consts
            dropouts = self.dropouts
            lengths_set = {
                len(layer_sizes),
                len(weight_init_stddevs),
                len(bias_init_consts),
                len(dropouts),
            }
            assert len(
                lengths_set) == 1, 'All layer params must have same length.'
            n_layers = lengths_set.pop()
            assert n_layers > 0, 'Must have some layers defined.'

            label_placeholders = self.add_label_placeholders(
                graph, name_scopes)
            weight_placeholders = self.add_example_weight_placeholders(
                graph, name_scopes)
            if training:
                graph.queue = tf.FIFOQueue(
                    capacity=5,
                    dtypes=[tf.float32] *
                    (len(label_placeholders) + len(weight_placeholders) + 1))
                graph.enqueue = graph.queue.enqueue([mol_features] +
                                                    label_placeholders +
                                                    weight_placeholders)
                queue_outputs = graph.queue.dequeue()
                labels = queue_outputs[1:len(label_placeholders) + 1]
                weights = queue_outputs[len(label_placeholders) + 1:]
                prev_layer = queue_outputs[0]
            else:
                labels = label_placeholders
                weights = weight_placeholders
                prev_layer = mol_features

            prev_layer_size = n_features
            for i in range(n_layers):
                layer = tf.nn.relu(
                    model_ops.fully_connected_layer(
                        tensor=prev_layer,
                        size=layer_sizes[i],
                        weight_init=tf.truncated_normal(
                            shape=[prev_layer_size, layer_sizes[i]],
                            stddev=weight_init_stddevs[i]),
                        bias_init=tf.constant(value=bias_init_consts[i],
                                              shape=[layer_sizes[i]])))
                layer = model_ops.dropout(layer, dropouts[i], training)
                prev_layer = layer
                prev_layer_size = layer_sizes[i]

            output = model_ops.multitask_logits(layer, self.n_tasks)
        return (output, labels, weights)