コード例 #1
0
ファイル: graph_layers.py プロジェクト: xuzhang5788/PADME
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Perform T steps of message passing """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    # Extract atom_features
    atom_features = in_layers[0].out_tensor
    pair_features = in_layers[1].out_tensor
    atom_to_pair = in_layers[2].out_tensor
    n_atom_features = atom_features.get_shape().as_list()[-1]
    n_pair_features = pair_features.get_shape().as_list()[-1]
    # Add trainable weights
    self.build(pair_features, n_pair_features)

    if n_atom_features < self.n_hidden:
      pad_length = self.n_hidden - n_atom_features
      out = tf.pad(atom_features, ((0, 0), (0, pad_length)), mode='CONSTANT')
    elif n_atom_features > self.n_hidden:
      raise ValueError("Too large initial feature vector")
    else:
      out = atom_features

    for i in range(self.T):
      message = self.message_function.forward(out, atom_to_pair)
      out = self.update_function.forward(out, message)

    out_tensor = out

    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
コード例 #2
0
ファイル: graph_layers.py プロジェクト: xuzhang5788/PADME
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ 
    parent layers: atom_features, atom_split
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    outputs = in_layers[0].out_tensor
    atom_split = in_layers[1].out_tensor

    if self.gaussian_expand:
      outputs = self.gaussian_histogram(outputs)

    output_molecules = tf.segment_sum(outputs, atom_split)

    if self.gaussian_expand:
      output_molecules = tf.matmul(output_molecules, self.W) + self.b
      output_molecules = self.activation(output_molecules)

    out_tensor = output_molecules
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
コード例 #3
0
ファイル: graph_layers.py プロジェクト: xuzhang5788/PADME
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Perform M steps of set2set gather,
        detailed descriptions in: https://arxiv.org/abs/1511.06391 """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    # Extract atom_features
    atom_features = in_layers[0].out_tensor
    atom_split = in_layers[1].out_tensor

    self.c = tf.zeros((self.batch_size, self.n_hidden))
    self.h = tf.zeros((self.batch_size, self.n_hidden))

    for i in range(self.M):
      q_expanded = tf.gather(self.h, atom_split)
      e = tf.reduce_sum(atom_features * q_expanded, 1)
      e_mols = tf.dynamic_partition(e, atom_split, self.batch_size)
      # Add another value(~-Inf) to prevent error in softmax
      e_mols = [
          tf.concat([e_mol, tf.constant([-1000.])], 0) for e_mol in e_mols
      ]
      a = tf.concat([tf.nn.softmax(e_mol)[:-1] for e_mol in e_mols], 0)
      r = tf.segment_sum(tf.reshape(a, [-1, 1]) * atom_features, atom_split)
      # Model using this layer must set pad_batches=True
      q_star = tf.concat([self.h, r], axis=1)
      self.h, self.c = self.LSTMStep(q_star, self.c)

    out_tensor = q_star
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
コード例 #4
0
ファイル: graph_layers.py プロジェクト: xuzhang5788/PADME
 def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
   if in_layers is None:
     in_layers = self.in_layers
   in_layers = convert_to_layers(in_layers)
   output = in_layers[0].out_tensor
   out_tensor = output[:, self.task_id:self.task_id + 1]
   self.out_tensor = out_tensor
   return out_tensor
コード例 #5
0
ファイル: graph_layers.py プロジェクト: xuzhang5788/PADME
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Creates weave tensors.

    parent layers: [atom_features, pair_features], pair_split, atom_to_pair
    """
    activation = activations.get(self.activation)
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()

    atom_features = in_layers[0].out_tensor
    pair_features = in_layers[1].out_tensor

    pair_split = in_layers[2].out_tensor
    atom_to_pair = in_layers[3].out_tensor

    AA = tf.matmul(atom_features, self.W_AA) + self.b_AA
    AA = activation(AA)
    PA = tf.matmul(pair_features, self.W_PA) + self.b_PA
    PA = activation(PA)
    PA = tf.segment_sum(PA, pair_split)

    A = tf.matmul(tf.concat([AA, PA], 1), self.W_A) + self.b_A
    A = activation(A)

    if self.update_pair:
      AP_ij = tf.matmul(
          tf.reshape(
              tf.gather(atom_features, atom_to_pair),
              [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
      AP_ij = activation(AP_ij)
      AP_ji = tf.matmul(
          tf.reshape(
              tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
              [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
      AP_ji = activation(AP_ji)

      PP = tf.matmul(pair_features, self.W_PP) + self.b_PP
      PP = activation(PP)
      P = tf.matmul(tf.concat([AP_ij + AP_ji, PP], 1), self.W_P) + self.b_P
      P = activation(P)
    else:
      P = pair_features

    self.out_tensors = [A, P]
    #pdb.set_trace()
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = A
    return self.out_tensors
コード例 #6
0
ファイル: graph_layers.py プロジェクト: xuzhang5788/PADME
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_number
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    atom_number = in_layers[0].out_tensor
    atom_features = tf.nn.embedding_lookup(self.embedding_list, atom_number)
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = atom_features
コード例 #7
0
ファイル: graph_layers.py プロジェクト: xuzhang5788/PADME
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, membership
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    # Add trainable weights
    self.build()

    # Extract atom_features
    atom_features = in_layers[0].out_tensor
    membership = in_layers[1].out_tensor
    # Extract atom_features
    graph_features = tf.segment_sum(atom_features, membership)
    # sum all graph outputs
    outputs = self.DAGgraph_step(graph_features, self.W_list, self.b_list)
    out_tensor = outputs
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
コード例 #8
0
ファイル: graph_layers.py プロジェクト: xuzhang5788/PADME
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, atom_membership
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    output = in_layers[0].out_tensor
    atom_membership = in_layers[1].out_tensor
    for i, W in enumerate(self.W_list[:-1]):
      output = tf.matmul(output, W) + self.b_list[i]
      output = self.activation(output)
    output = tf.matmul(output, self.W_list[-1]) + self.b_list[-1]
    if self.output_activation:
      output = self.activation(output)
    output = tf.segment_sum(output, atom_membership)
    out_tensor = output
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
コード例 #9
0
ファイル: graph_layers.py プロジェクト: xuzhang5788/PADME
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, distance, distance_membership_i, distance_membership_j
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    atom_features = in_layers[0].out_tensor
    distance = in_layers[1].out_tensor
    distance_membership_i = in_layers[2].out_tensor
    distance_membership_j = in_layers[3].out_tensor
    distance_hidden = tf.matmul(distance, self.W_df) + self.b_df
    atom_features_hidden = tf.matmul(atom_features, self.W_cf) + self.b_cf
    outputs = tf.multiply(distance_hidden,
                          tf.gather(atom_features_hidden,
                                    distance_membership_j))

    # for atom i in a molecule m, this step multiplies together distance info of atom pair(i,j)
    # and embeddings of atom j(both gone through a hidden layer)
    outputs = tf.matmul(outputs, self.W_fc)
    outputs = self.activation(outputs)

    output_ii = tf.multiply(self.b_df, atom_features_hidden)
    output_ii = tf.matmul(output_ii, self.W_fc)
    output_ii = self.activation(output_ii)

    # for atom i, sum the influence from all other atom j in the molecule
    outputs = tf.segment_sum(outputs,
                             distance_membership_i) - output_ii + atom_features
    out_tensor = outputs
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
コード例 #10
0
ファイル: graph_layers.py プロジェクト: xuzhang5788/PADME
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, parents, calculation_orders, calculation_masks, n_atoms
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    # Add trainable weights
    self.build()

    atom_features = in_layers[0].out_tensor
    # each atom corresponds to a graph, which is represented by the `max_atoms*max_atoms` int32 matrix of index
    # each gragh include `max_atoms` of steps(corresponding to rows) of calculating graph features
    parents = in_layers[1].out_tensor
    # target atoms for each step: (batch_size*max_atoms) * max_atoms
    calculation_orders = in_layers[2].out_tensor
    calculation_masks = in_layers[3].out_tensor

    n_atoms = in_layers[4].out_tensor
    # initialize graph features for each graph
    graph_features_initial = tf.zeros((self.max_atoms * self.batch_size,
                                       self.max_atoms + 1, self.n_graph_feat))
    # initialize graph features for each graph
    # another row of zeros is generated for padded dummy atoms
    graph_features = tf.Variable(graph_features_initial, trainable=False)

    for count in range(self.max_atoms):
      # `count`-th step
      # extracting atom features of target atoms: (batch_size*max_atoms) * n_atom_features
      mask = calculation_masks[:, count]
      current_round = tf.boolean_mask(calculation_orders[:, count], mask)
      batch_atom_features = tf.gather(atom_features, current_round)

      # generating index for graph features used in the inputs
      index = tf.stack(
          [
              tf.reshape(
                  tf.stack(
                      [tf.boolean_mask(tf.range(n_atoms), mask)] *
                      (self.max_atoms - 1),
                      axis=1), [-1]),
              tf.reshape(tf.boolean_mask(parents[:, count, 1:], mask), [-1])
          ],
          axis=1)
      # extracting graph features for parents of the target atoms, then flatten
      # shape: (batch_size*max_atoms) * [(max_atoms-1)*n_graph_features]
      batch_graph_features = tf.reshape(
          tf.gather_nd(graph_features, index),
          [-1, (self.max_atoms - 1) * self.n_graph_feat])

      # concat into the input tensor: (batch_size*max_atoms) * n_inputs
      batch_inputs = tf.concat(
          axis=1, values=[batch_atom_features, batch_graph_features])
      # DAGgraph_step maps from batch_inputs to a batch of graph_features
      # of shape: (batch_size*max_atoms) * n_graph_features
      # representing the graph features of target atoms in each graph
      batch_outputs = self.DAGgraph_step(batch_inputs, self.W_list, self.b_list)

      # index for targe atoms
      target_index = tf.stack([tf.range(n_atoms), parents[:, count, 0]], axis=1)
      target_index = tf.boolean_mask(target_index, mask)
      # update the graph features for target atoms
      graph_features = tf.scatter_nd_update(graph_features, target_index,
                                            batch_outputs)

    out_tensor = batch_outputs
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor