Example #1
0
    def build(self, graph, name_scopes, training):
        """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
        n_features = self.n_features
        placeholder_scope = TensorflowGraph.get_placeholder_scope(
            graph, name_scopes)
        with graph.as_default():
            with placeholder_scope:
                self.mol_features = tf.placeholder(tf.float32,
                                                   shape=[None, n_features],
                                                   name='mol_features')

            layer_sizes = self.layer_sizes
            weight_init_stddevs = self.weight_init_stddevs
            bias_init_consts = self.bias_init_consts
            dropouts = self.dropouts
            lengths_set = {
                len(layer_sizes),
                len(weight_init_stddevs),
                len(bias_init_consts),
                len(dropouts),
            }
            assert len(
                lengths_set) == 1, 'All layer params must have same length.'
            n_layers = lengths_set.pop()
            assert n_layers > 0, 'Must have some layers defined.'

            prev_layer = self.mol_features
            prev_layer_size = n_features
            for i in range(n_layers):
                layer = tf.nn.relu(
                    model_ops.fully_connected_layer(
                        tensor=prev_layer,
                        size=layer_sizes[i],
                        weight_init=tf.truncated_normal(
                            shape=[prev_layer_size, layer_sizes[i]],
                            stddev=weight_init_stddevs[i]),
                        bias_init=tf.constant(value=bias_init_consts[i],
                                              shape=[layer_sizes[i]])))
                layer = model_ops.dropout(layer, dropouts[i])
                prev_layer = layer
                prev_layer_size = layer_sizes[i]

            output = []
            for task in range(self.n_tasks):
                output.append(
                    tf.squeeze(
                        model_ops.fully_connected_layer(
                            tensor=prev_layer,
                            size=layer_sizes[i],
                            weight_init=tf.truncated_normal(
                                shape=[prev_layer_size, 1],
                                stddev=weight_init_stddevs[i]),
                            bias_init=tf.constant(value=bias_init_consts[i],
                                                  shape=[1]))))
            return output
Example #2
0
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
    n_features = self.n_features
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    with graph.as_default():
      with placeholder_scope:
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, n_features],
            name='mol_features')

      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      dropouts = self.dropouts
      lengths_set = {
          len(layer_sizes),
          len(weight_init_stddevs),
          len(bias_init_consts),
          len(dropouts),
          }
      assert len(lengths_set) == 1, 'All layer params must have same length.'
      n_layers = lengths_set.pop()
      assert n_layers > 0, 'Must have some layers defined.'

      prev_layer = self.mol_features
      prev_layer_size = n_features 
      for i in range(n_layers):
        layer = tf.nn.relu(model_ops.fully_connected_layer(
            tensor=prev_layer,
            size=layer_sizes[i],
            weight_init=tf.truncated_normal(
                shape=[prev_layer_size, layer_sizes[i]],
                stddev=weight_init_stddevs[i]),
            bias_init=tf.constant(value=bias_init_consts[i],
                                  shape=[layer_sizes[i]])))
        layer = model_ops.dropout(layer, dropouts[i])
        prev_layer = layer
        prev_layer_size = layer_sizes[i]

      output = []
      for task in range(self.n_tasks):
        output.append(tf.squeeze(
            model_ops.fully_connected_layer(
                tensor=prev_layer,
                size=layer_sizes[i],
                weight_init=tf.truncated_normal(
                    shape=[prev_layer_size, 1],
                    stddev=weight_init_stddevs[i]),
                bias_init=tf.constant(value=bias_init_consts[i],
                                      shape=[1]))))
      return output
Example #3
0
    def build(self, graph, name_scopes, training):
        """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x num_features.
    """
        num_features = self.n_features
        placeholder_scope = TensorflowGraph.get_placeholder_scope(
            graph, name_scopes)
        with graph.as_default():
            with placeholder_scope:
                self.mol_features = tf.placeholder(tf.float32,
                                                   shape=[None, num_features],
                                                   name='mol_features')

            layer_sizes = self.layer_sizes
            weight_init_stddevs = self.weight_init_stddevs
            bias_init_consts = self.bias_init_consts
            dropouts = self.dropouts

            bypass_layer_sizes = self.bypass_layer_sizes
            bypass_weight_init_stddevs = self.bypass_weight_init_stddevs
            bypass_bias_init_consts = self.bypass_bias_init_consts
            bypass_dropouts = self.bypass_dropouts

            lengths_set = {
                len(layer_sizes),
                len(weight_init_stddevs),
                len(bias_init_consts),
                len(dropouts),
            }
            assert len(
                lengths_set) == 1, "All layer params must have same length."
            num_layers = lengths_set.pop()
            assert num_layers > 0, "Must have some layers defined."

            bypass_lengths_set = {
                len(bypass_layer_sizes),
                len(bypass_weight_init_stddevs),
                len(bypass_bias_init_consts),
                len(bypass_dropouts),
            }
            assert len(bypass_lengths_set) == 1, ("All bypass_layer params" +
                                                  " must have same length.")
            num_bypass_layers = bypass_lengths_set.pop()

            prev_layer = self.mol_features
            prev_layer_size = num_features
            for i in range(num_layers):
                # layer has shape [None, layer_sizes[i]]
                ########################################################## DEBUG
                print("Adding weights of shape %s" %
                      str([prev_layer_size, layer_sizes[i]]))
                ########################################################## DEBUG
                layer = tf.nn.relu(
                    model_ops.fully_connected_layer(
                        tensor=prev_layer,
                        size=layer_sizes[i],
                        weight_init=tf.truncated_normal(
                            shape=[prev_layer_size, layer_sizes[i]],
                            stddev=weight_init_stddevs[i]),
                        bias_init=tf.constant(value=bias_init_consts[i],
                                              shape=[layer_sizes[i]])))
                layer = model_ops.dropout(layer, dropouts[i], training)
                prev_layer = layer
                prev_layer_size = layer_sizes[i]

            output = []
            # top_multitask_layer has shape [None, layer_sizes[-1]]
            top_multitask_layer = prev_layer
            for task in range(self.n_tasks):
                # TODO(rbharath): Might want to make it feasible to have multiple
                # bypass layers.
                # Construct task bypass layer
                prev_bypass_layer = self.mol_features
                prev_bypass_layer_size = num_features
                for i in range(num_bypass_layers):
                    # bypass_layer has shape [None, bypass_layer_sizes[i]]
                    ########################################################## DEBUG
                    print("Adding bypass weights of shape %s" %
                          str([prev_bypass_layer_size, bypass_layer_sizes[i]]))
                    ########################################################## DEBUG
                    bypass_layer = tf.nn.relu(
                        model_ops.fully_connected_layer(
                            tensor=prev_bypass_layer,
                            size=bypass_layer_sizes[i],
                            weight_init=tf.truncated_normal(
                                shape=[
                                    prev_bypass_layer_size,
                                    bypass_layer_sizes[i]
                                ],
                                stddev=bypass_weight_init_stddevs[i]),
                            bias_init=tf.constant(
                                value=bypass_bias_init_consts[i],
                                shape=[bypass_layer_sizes[i]])))

                    bypass_layer = model_ops.dropout(bypass_layer,
                                                     bypass_dropouts[i])
                    prev_bypass_layer = bypass_layer
                    prev_bypass_layer_size = bypass_layer_sizes[i]
                top_bypass_layer = prev_bypass_layer

                if num_bypass_layers > 0:
                    # task_layer has shape [None, layer_sizes[-1] + bypass_layer_sizes[-1]]
                    task_layer = tf.concat(
                        1, [top_multitask_layer, top_bypass_layer])
                    task_layer_size = layer_sizes[-1] + bypass_layer_sizes[-1]
                else:
                    task_layer = top_multitask_layer
                    task_layer_size = layer_sizes[-1]
                ########################################################## DEBUG
                print("Adding output weights of shape %s" %
                      str([task_layer_size, 1]))
                ########################################################## DEBUG
                #################################################### DEBUG
                print("task_layer_size")
                print(task_layer_size)
                #################################################### DEBUG
                output.append(
                    tf.squeeze(
                        model_ops.logits(task_layer,
                                         num_classes=2,
                                         weight_init=tf.truncated_normal(
                                             shape=[task_layer_size, 2],
                                             stddev=weight_init_stddevs[-1]),
                                         bias_init=tf.constant(
                                             value=bias_init_consts[-1],
                                             shape=[2]))))
            return output
Example #4
0
    def build(self, graph, name_scopes, training):
        """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
        n_features = self.n_features
        placeholder_scope = TensorflowGraph.get_placeholder_scope(
            graph, name_scopes)
        with graph.as_default():
            with placeholder_scope:
                self.mol_features = tf.placeholder(tf.float32,
                                                   shape=[None, n_features],
                                                   name='mol_features')

            layer_sizes = self.layer_sizes
            weight_init_stddevs = self.weight_init_stddevs
            bias_init_consts = self.bias_init_consts
            dropouts = self.dropouts
            lengths_set = {
                len(layer_sizes),
                len(weight_init_stddevs),
                len(bias_init_consts),
                len(dropouts),
            }
            assert len(
                lengths_set) == 1, 'All layer params must have same length.'
            n_layers = lengths_set.pop()
            assert n_layers > 0, 'Must have some layers defined.'

            prev_layer = self.mol_features
            prev_layer_size = n_features
            all_layers = {}
            for i in range(n_layers):
                for task in range(self.n_tasks):
                    task_scope = TensorflowGraph.shared_name_scope(
                        "task%d" % task, graph, name_scopes)
                    print("Adding weights for task %d, layer %d" % (task, i))
                    with task_scope as scope:
                        if i == 0:
                            prev_layer = self.mol_features
                            prev_layer_size = self.n_features
                        else:
                            prev_layer = all_layers[(i - 1, task)]
                            prev_layer_size = layer_sizes[i - 1]
                            if task > 0:
                                lateral_contrib = self.add_adapter(
                                    all_layers, task, i)
                        print(
                            "Creating W_layer_%d_task%d of shape %s" %
                            (i, task, str([prev_layer_size, layer_sizes[i]])))
                        W = tf.Variable(tf.truncated_normal(
                            shape=[prev_layer_size, layer_sizes[i]],
                            stddev=self.weight_init_stddevs[i]),
                                        name='W_layer_%d_task%d' % (i, task),
                                        dtype=tf.float32)
                        print("Creating b_layer_%d_task%d of shape %s" %
                              (i, task, str([layer_sizes[i]])))
                        b = tf.Variable(tf.constant(
                            value=self.bias_init_consts[i],
                            shape=[layer_sizes[i]]),
                                        name='b_layer_%d_task%d' % (i, task),
                                        dtype=tf.float32)
                        layer = tf.matmul(prev_layer, W) + b
                        if i > 0 and task > 0:
                            layer = layer + lateral_contrib
                        layer = tf.nn.relu(layer)
                        layer = model_ops.dropout(layer, dropouts[i], training)
                        all_layers[(i, task)] = layer

            output = []
            for task in range(self.n_tasks):
                prev_layer = all_layers[(i, task)]
                prev_layer_size = layer_sizes[i]
                task_scope = TensorflowGraph.shared_name_scope(
                    "task%d" % task, graph, name_scopes)
                with task_scope as scope:
                    if task > 0:
                        lateral_contrib = tf.squeeze(
                            self.add_adapter(all_layers, task, i + 1))
                    weight_init = tf.truncated_normal(
                        shape=[prev_layer_size, 1],
                        stddev=weight_init_stddevs[i])
                    bias_init = tf.constant(value=bias_init_consts[i],
                                            shape=[1])
                    print("Creating W_output_task%d of shape %s" %
                          (task, str([prev_layer_size, 1])))
                    w = tf.Variable(weight_init,
                                    name='W_output_task%d' % task,
                                    dtype=tf.float32)
                    print("Creating b_output_task%d of shape %s" %
                          (task, str([1])))
                    b = tf.Variable(bias_init,
                                    name='b_output_task%d' % task,
                                    dtype=tf.float32)
                    layer = tf.squeeze(tf.matmul(prev_layer, w) + b)
                    if i > 0 and task > 0:
                        layer = layer + lateral_contrib
                    output.append(layer)

            return output
  def add_progressive_lattice(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
    n_features = self.n_features
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    with graph.as_default():
      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      dropouts = self.dropouts
      lengths_set = {
          len(layer_sizes),
          len(weight_init_stddevs),
          len(bias_init_consts),
          len(dropouts),
          }
      assert len(lengths_set) == 1, 'All layer params must have same length.'
      n_layers = lengths_set.pop()
      assert n_layers > 0, 'Must have some layers defined.'

      prev_layer = self.mol_features
      prev_layer_size = n_features 
      all_layers = {}
      for i in range(n_layers):
        for task in range(self.n_tasks):
          task_scope = TensorflowGraph.shared_name_scope(
              "task%d_ops" % task, graph, name_scopes)
          print("Adding weights for task %d, layer %d" % (task, i))
          with task_scope as scope:
            if i == 0:
              prev_layer = self.mol_features
              prev_layer_size = self.n_features
            else:
              prev_layer = all_layers[(i-1, task)]
              prev_layer_size = layer_sizes[i-1]
              if task > 0:
                lateral_contrib = self.add_adapter(all_layers, task, i)
            print("Creating W_layer_%d_task%d of shape %s" %
                  (i, task, str([prev_layer_size, layer_sizes[i]])))
            W = tf.Variable(
                tf.truncated_normal(
                    shape=[prev_layer_size, layer_sizes[i]],
                    stddev=self.weight_init_stddevs[i]),
                name='W_layer_%d_task%d' % (i, task), dtype=tf.float32)
            print("Creating b_layer_%d_task%d of shape %s" %
                  (i, task, str([layer_sizes[i]])))
            b = tf.Variable(tf.constant(value=self.bias_init_consts[i],
                            shape=[layer_sizes[i]]),
                            name='b_layer_%d_task%d' % (i, task), dtype=tf.float32)
            layer = tf.matmul(prev_layer, W) + b
            if i > 0 and task > 0:
              layer = layer + lateral_contrib
            layer = tf.nn.relu(layer)
            layer = model_ops.dropout(layer, dropouts[i], training)
            all_layers[(i, task)] = layer

      output = []
      for task in range(self.n_tasks):
        prev_layer = all_layers[(i, task)]
        prev_layer_size = layer_sizes[i]
        task_scope = TensorflowGraph.shared_name_scope(
            "task%d" % task, graph, name_scopes)
        with task_scope as scope:
          if task > 0:
            lateral_contrib = tf.squeeze(self.add_adapter(all_layers, task, i+1))
          weight_init = tf.truncated_normal(
              shape=[prev_layer_size, 1],
              stddev=weight_init_stddevs[i])
          bias_init = tf.constant(value=bias_init_consts[i],
                                  shape=[1])
          print("Creating W_output_task%d of shape %s"
                % (task, str([prev_layer_size, 1])))
          w = tf.Variable(weight_init, name='W_output_task%d'%task,
                          dtype=tf.float32)
          print("Creating b_output_task%d of shape %s" % (task, str([1])))
          b = tf.Variable(bias_init, name='b_output_task%d'%task,
                          dtype=tf.float32)
          layer = tf.squeeze(tf.matmul(prev_layer, w) + b)
          if i > 0 and task > 0:
            layer = layer + lateral_contrib
          output.append(layer)

      return output
Example #6
0
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x num_features.
    """
    num_features = self.n_features 
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    with graph.as_default():
      with placeholder_scope:
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, num_features],
            name='mol_features')

      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      dropouts = self.dropouts

      bypass_layer_sizes = self.bypass_layer_sizes
      bypass_weight_init_stddevs = self.bypass_weight_init_stddevs
      bypass_bias_init_consts = self.bypass_bias_init_consts
      bypass_dropouts = self.bypass_dropouts

      lengths_set = {
          len(layer_sizes),
          len(weight_init_stddevs),
          len(bias_init_consts),
          len(dropouts),
          }
      assert len(lengths_set) == 1, "All layer params must have same length."
      num_layers = lengths_set.pop()
      assert num_layers > 0, "Must have some layers defined."

      bypass_lengths_set = {
          len(bypass_layer_sizes),
          len(bypass_weight_init_stddevs),
          len(bypass_bias_init_consts),
          len(bypass_dropouts),
          }
      assert len(bypass_lengths_set) == 1, ("All bypass_layer params"+
                                            " must have same length.")
      num_bypass_layers = bypass_lengths_set.pop()

      prev_layer = self.mol_features
      prev_layer_size = num_features 
      for i in range(num_layers):
        # layer has shape [None, layer_sizes[i]]
        print("Adding weights of shape %s" % str([prev_layer_size, layer_sizes[i]]))
        layer = tf.nn.relu(model_ops.fully_connected_layer(
            tensor=prev_layer,
            size=layer_sizes[i],
            weight_init=tf.truncated_normal(
                shape=[prev_layer_size, layer_sizes[i]],
                stddev=weight_init_stddevs[i]),
            bias_init=tf.constant(value=bias_init_consts[i],
                                  shape=[layer_sizes[i]])))
        layer = model_ops.dropout(layer, dropouts[i], training)
        prev_layer = layer
        prev_layer_size = layer_sizes[i]

      output = []
      # top_multitask_layer has shape [None, layer_sizes[-1]]
      top_multitask_layer = prev_layer
      for task in range(self.n_tasks):
        # TODO(rbharath): Might want to make it feasible to have multiple
        # bypass layers.
        # Construct task bypass layer
        prev_bypass_layer = self.mol_features
        prev_bypass_layer_size = num_features
        for i in range(num_bypass_layers):
          # bypass_layer has shape [None, bypass_layer_sizes[i]]
          print("Adding bypass weights of shape %s"
                % str([prev_bypass_layer_size, bypass_layer_sizes[i]]))
          bypass_layer = tf.nn.relu(model_ops.fully_connected_layer(
            tensor = prev_bypass_layer,
            size = bypass_layer_sizes[i],
            weight_init=tf.truncated_normal(
                shape=[prev_bypass_layer_size, bypass_layer_sizes[i]],
                stddev=bypass_weight_init_stddevs[i]),
            bias_init=tf.constant(value=bypass_bias_init_consts[i],
                                  shape=[bypass_layer_sizes[i]])))
    
          bypass_layer = model_ops.dropout(bypass_layer, bypass_dropouts[i], training)
          prev_bypass_layer = bypass_layer
          prev_bypass_layer_size = bypass_layer_sizes[i]
        top_bypass_layer = prev_bypass_layer

        if num_bypass_layers > 0:
          # task_layer has shape [None, layer_sizes[-1] + bypass_layer_sizes[-1]]
          task_layer = tf.concat(1, [top_multitask_layer, top_bypass_layer])
          task_layer_size = layer_sizes[-1] + bypass_layer_sizes[-1]
        else:
          task_layer = top_multitask_layer
          task_layer_size = layer_sizes[-1]
        print("Adding output weights of shape %s"
              % str([task_layer_size, 1]))
        output.append(tf.squeeze(
            model_ops.logits(task_layer, num_classes=2,
                weight_init=tf.truncated_normal(
                    shape=[task_layer_size, 2],
                    stddev=weight_init_stddevs[-1]),
                bias_init=tf.constant(value=bias_init_consts[-1],
                                      shape=[2]))))
      return output