Beispiel #1
0
    def build(self, graph, name_scopes, training):
        """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
        n_features = self.n_features
        placeholder_scope = TensorflowGraph.get_placeholder_scope(
            graph, name_scopes)
        with graph.as_default():
            with placeholder_scope:
                self.mol_features = tf.placeholder(tf.float32,
                                                   shape=[None, n_features],
                                                   name='mol_features')

            layer_sizes = self.layer_sizes
            weight_init_stddevs = self.weight_init_stddevs
            bias_init_consts = self.bias_init_consts
            dropouts = self.dropouts
            lengths_set = {
                len(layer_sizes),
                len(weight_init_stddevs),
                len(bias_init_consts),
                len(dropouts),
            }
            assert len(
                lengths_set) == 1, 'All layer params must have same length.'
            n_layers = lengths_set.pop()
            assert n_layers > 0, 'Must have some layers defined.'

            prev_layer = self.mol_features
            prev_layer_size = n_features
            for i in range(n_layers):
                layer = tf.nn.relu(
                    model_ops.fully_connected_layer(
                        tensor=prev_layer,
                        size=layer_sizes[i],
                        weight_init=tf.truncated_normal(
                            shape=[prev_layer_size, layer_sizes[i]],
                            stddev=weight_init_stddevs[i]),
                        bias_init=tf.constant(value=bias_init_consts[i],
                                              shape=[layer_sizes[i]])))
                layer = model_ops.dropout(layer, dropouts[i])
                prev_layer = layer
                prev_layer_size = layer_sizes[i]

            output = []
            for task in range(self.n_tasks):
                output.append(
                    tf.squeeze(
                        model_ops.fully_connected_layer(
                            tensor=prev_layer,
                            size=layer_sizes[i],
                            weight_init=tf.truncated_normal(
                                shape=[prev_layer_size, 1],
                                stddev=weight_init_stddevs[i]),
                            bias_init=tf.constant(value=bias_init_consts[i],
                                                  shape=[1]))))
            return output
Beispiel #2
0
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
    n_features = self.n_features
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    with graph.as_default():
      with placeholder_scope:
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, n_features],
            name='mol_features')

      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      dropouts = self.dropouts
      lengths_set = {
          len(layer_sizes),
          len(weight_init_stddevs),
          len(bias_init_consts),
          len(dropouts),
          }
      assert len(lengths_set) == 1, 'All layer params must have same length.'
      n_layers = lengths_set.pop()
      assert n_layers > 0, 'Must have some layers defined.'

      prev_layer = self.mol_features
      prev_layer_size = n_features 
      for i in range(n_layers):
        layer = tf.nn.relu(model_ops.fully_connected_layer(
            tensor=prev_layer,
            size=layer_sizes[i],
            weight_init=tf.truncated_normal(
                shape=[prev_layer_size, layer_sizes[i]],
                stddev=weight_init_stddevs[i]),
            bias_init=tf.constant(value=bias_init_consts[i],
                                  shape=[layer_sizes[i]])))
        layer = model_ops.dropout(layer, dropouts[i])
        prev_layer = layer
        prev_layer_size = layer_sizes[i]

      output = []
      for task in range(self.n_tasks):
        output.append(tf.squeeze(
            model_ops.fully_connected_layer(
                tensor=prev_layer,
                size=layer_sizes[i],
                weight_init=tf.truncated_normal(
                    shape=[prev_layer_size, 1],
                    stddev=weight_init_stddevs[i]),
                bias_init=tf.constant(value=bias_init_consts[i],
                                      shape=[1]))))
      return output
Beispiel #3
0
    def build(self, graph, name_scopes, training):
        """Constructs the graph architecture of model: n_tasks * sigmoid nodes.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
        placeholder_scope = TensorflowGraph.get_placeholder_scope(
            graph, name_scopes)
        n_features = self.n_features
        with graph.as_default():
            with placeholder_scope:
                self.mol_features = tf.placeholder(tf.float32,
                                                   shape=[None, n_features],
                                                   name='mol_features')

            weight_init_stddevs = self.weight_init_stddevs
            bias_init_consts = self.bias_init_consts
            lg_list = []
            for task in range(self.n_tasks):
                #setting up n_tasks nodes(output nodes)
                lg = model_ops.fully_connected_layer(
                    tensor=self.mol_features,
                    size=1,
                    weight_init=tf.truncated_normal(
                        shape=[self.n_features, 1],
                        stddev=weight_init_stddevs[0]),
                    bias_init=tf.constant(value=bias_init_consts[0],
                                          shape=[1]))
                lg_list.append(lg)

        return lg_list
Beispiel #4
0
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture of model: n_tasks * sigmoid nodes.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    n_features = self.n_features
    with graph.as_default():
      with placeholder_scope:
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, n_features],
            name='mol_features')

      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      lg_list = []
      for task in range(self.n_tasks):
        #setting up n_tasks nodes(output nodes)
        lg = model_ops.fully_connected_layer(
            tensor=self.mol_features,
            size = 1,
            weight_init=tf.truncated_normal(
                shape=[self.n_features, 1],
                stddev=weight_init_stddevs[0]),
            bias_init=tf.constant(value=bias_init_consts[0],
                                  shape=[1]))
        lg_list.append(lg)

    return lg_list
Beispiel #5
0
 def test_fully_connected_layer(self):
     with self.test_session() as sess:
         features = np.random.random((128, 100))
         features_t = tf.constant(features, dtype=tf.float32)
         dense_t = model_ops.fully_connected_layer(features_t, 50)
         sess.run(tf.initialize_all_variables())
         features, dense, w, b = sess.run([features_t, dense_t] +
                                          tf.trainable_variables())
         expected = np.dot(features, w) + b
         self.assertAllClose(dense, expected)
Beispiel #6
0
    def build(self, graph, name_scopes, training):
        """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x num_features.
    """
        num_features = self.n_features
        placeholder_scope = TensorflowGraph.get_placeholder_scope(
            graph, name_scopes)
        with graph.as_default():
            with placeholder_scope:
                self.mol_features = tf.placeholder(tf.float32,
                                                   shape=[None, num_features],
                                                   name='mol_features')

            layer_sizes = self.layer_sizes
            weight_init_stddevs = self.weight_init_stddevs
            bias_init_consts = self.bias_init_consts
            dropouts = self.dropouts

            bypass_layer_sizes = self.bypass_layer_sizes
            bypass_weight_init_stddevs = self.bypass_weight_init_stddevs
            bypass_bias_init_consts = self.bypass_bias_init_consts
            bypass_dropouts = self.bypass_dropouts

            lengths_set = {
                len(layer_sizes),
                len(weight_init_stddevs),
                len(bias_init_consts),
                len(dropouts),
            }
            assert len(
                lengths_set) == 1, "All layer params must have same length."
            num_layers = lengths_set.pop()
            assert num_layers > 0, "Must have some layers defined."

            bypass_lengths_set = {
                len(bypass_layer_sizes),
                len(bypass_weight_init_stddevs),
                len(bypass_bias_init_consts),
                len(bypass_dropouts),
            }
            assert len(bypass_lengths_set) == 1, ("All bypass_layer params" +
                                                  " must have same length.")
            num_bypass_layers = bypass_lengths_set.pop()

            prev_layer = self.mol_features
            prev_layer_size = num_features
            for i in range(num_layers):
                # layer has shape [None, layer_sizes[i]]
                ########################################################## DEBUG
                print("Adding weights of shape %s" %
                      str([prev_layer_size, layer_sizes[i]]))
                ########################################################## DEBUG
                layer = tf.nn.relu(
                    model_ops.fully_connected_layer(
                        tensor=prev_layer,
                        size=layer_sizes[i],
                        weight_init=tf.truncated_normal(
                            shape=[prev_layer_size, layer_sizes[i]],
                            stddev=weight_init_stddevs[i]),
                        bias_init=tf.constant(value=bias_init_consts[i],
                                              shape=[layer_sizes[i]])))
                layer = model_ops.dropout(layer, dropouts[i], training)
                prev_layer = layer
                prev_layer_size = layer_sizes[i]

            output = []
            # top_multitask_layer has shape [None, layer_sizes[-1]]
            top_multitask_layer = prev_layer
            for task in range(self.n_tasks):
                # TODO(rbharath): Might want to make it feasible to have multiple
                # bypass layers.
                # Construct task bypass layer
                prev_bypass_layer = self.mol_features
                prev_bypass_layer_size = num_features
                for i in range(num_bypass_layers):
                    # bypass_layer has shape [None, bypass_layer_sizes[i]]
                    ########################################################## DEBUG
                    print("Adding bypass weights of shape %s" %
                          str([prev_bypass_layer_size, bypass_layer_sizes[i]]))
                    ########################################################## DEBUG
                    bypass_layer = tf.nn.relu(
                        model_ops.fully_connected_layer(
                            tensor=prev_bypass_layer,
                            size=bypass_layer_sizes[i],
                            weight_init=tf.truncated_normal(
                                shape=[
                                    prev_bypass_layer_size,
                                    bypass_layer_sizes[i]
                                ],
                                stddev=bypass_weight_init_stddevs[i]),
                            bias_init=tf.constant(
                                value=bypass_bias_init_consts[i],
                                shape=[bypass_layer_sizes[i]])))

                    bypass_layer = model_ops.dropout(bypass_layer,
                                                     bypass_dropouts[i])
                    prev_bypass_layer = bypass_layer
                    prev_bypass_layer_size = bypass_layer_sizes[i]
                top_bypass_layer = prev_bypass_layer

                if num_bypass_layers > 0:
                    # task_layer has shape [None, layer_sizes[-1] + bypass_layer_sizes[-1]]
                    task_layer = tf.concat(
                        1, [top_multitask_layer, top_bypass_layer])
                    task_layer_size = layer_sizes[-1] + bypass_layer_sizes[-1]
                else:
                    task_layer = top_multitask_layer
                    task_layer_size = layer_sizes[-1]
                ########################################################## DEBUG
                print("Adding output weights of shape %s" %
                      str([task_layer_size, 1]))
                ########################################################## DEBUG
                #################################################### DEBUG
                print("task_layer_size")
                print(task_layer_size)
                #################################################### DEBUG
                output.append(
                    tf.squeeze(
                        model_ops.logits(task_layer,
                                         num_classes=2,
                                         weight_init=tf.truncated_normal(
                                             shape=[task_layer_size, 2],
                                             stddev=weight_init_stddevs[-1]),
                                         bias_init=tf.constant(
                                             value=bias_init_consts[-1],
                                             shape=[2]))))
            return output
Beispiel #7
0
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x num_features.
    """
    num_features = self.n_features 
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    with graph.as_default():
      with placeholder_scope:
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, num_features],
            name='mol_features')

      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      dropouts = self.dropouts

      bypass_layer_sizes = self.bypass_layer_sizes
      bypass_weight_init_stddevs = self.bypass_weight_init_stddevs
      bypass_bias_init_consts = self.bypass_bias_init_consts
      bypass_dropouts = self.bypass_dropouts

      lengths_set = {
          len(layer_sizes),
          len(weight_init_stddevs),
          len(bias_init_consts),
          len(dropouts),
          }
      assert len(lengths_set) == 1, "All layer params must have same length."
      num_layers = lengths_set.pop()
      assert num_layers > 0, "Must have some layers defined."

      bypass_lengths_set = {
          len(bypass_layer_sizes),
          len(bypass_weight_init_stddevs),
          len(bypass_bias_init_consts),
          len(bypass_dropouts),
          }
      assert len(bypass_lengths_set) == 1, ("All bypass_layer params"+
                                            " must have same length.")
      num_bypass_layers = bypass_lengths_set.pop()

      prev_layer = self.mol_features
      prev_layer_size = num_features 
      for i in range(num_layers):
        # layer has shape [None, layer_sizes[i]]
        print("Adding weights of shape %s" % str([prev_layer_size, layer_sizes[i]]))
        layer = tf.nn.relu(model_ops.fully_connected_layer(
            tensor=prev_layer,
            size=layer_sizes[i],
            weight_init=tf.truncated_normal(
                shape=[prev_layer_size, layer_sizes[i]],
                stddev=weight_init_stddevs[i]),
            bias_init=tf.constant(value=bias_init_consts[i],
                                  shape=[layer_sizes[i]])))
        layer = model_ops.dropout(layer, dropouts[i], training)
        prev_layer = layer
        prev_layer_size = layer_sizes[i]

      output = []
      # top_multitask_layer has shape [None, layer_sizes[-1]]
      top_multitask_layer = prev_layer
      for task in range(self.n_tasks):
        # TODO(rbharath): Might want to make it feasible to have multiple
        # bypass layers.
        # Construct task bypass layer
        prev_bypass_layer = self.mol_features
        prev_bypass_layer_size = num_features
        for i in range(num_bypass_layers):
          # bypass_layer has shape [None, bypass_layer_sizes[i]]
          print("Adding bypass weights of shape %s"
                % str([prev_bypass_layer_size, bypass_layer_sizes[i]]))
          bypass_layer = tf.nn.relu(model_ops.fully_connected_layer(
            tensor = prev_bypass_layer,
            size = bypass_layer_sizes[i],
            weight_init=tf.truncated_normal(
                shape=[prev_bypass_layer_size, bypass_layer_sizes[i]],
                stddev=bypass_weight_init_stddevs[i]),
            bias_init=tf.constant(value=bypass_bias_init_consts[i],
                                  shape=[bypass_layer_sizes[i]])))
    
          bypass_layer = model_ops.dropout(bypass_layer, bypass_dropouts[i], training)
          prev_bypass_layer = bypass_layer
          prev_bypass_layer_size = bypass_layer_sizes[i]
        top_bypass_layer = prev_bypass_layer

        if num_bypass_layers > 0:
          # task_layer has shape [None, layer_sizes[-1] + bypass_layer_sizes[-1]]
          task_layer = tf.concat(1, [top_multitask_layer, top_bypass_layer])
          task_layer_size = layer_sizes[-1] + bypass_layer_sizes[-1]
        else:
          task_layer = top_multitask_layer
          task_layer_size = layer_sizes[-1]
        print("Adding output weights of shape %s"
              % str([task_layer_size, 1]))
        output.append(tf.squeeze(
            model_ops.logits(task_layer, num_classes=2,
                weight_init=tf.truncated_normal(
                    shape=[task_layer_size, 2],
                    stddev=weight_init_stddevs[-1]),
                bias_init=tf.constant(value=bias_init_consts[-1],
                                      shape=[2]))))
      return output