Example #1
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, distance, distance_membership_i, distance_membership_j
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    atom_features = in_layers[0].out_tensor
    distance = in_layers[1].out_tensor
    distance_membership_i = in_layers[2].out_tensor
    distance_membership_j = in_layers[3].out_tensor
    distance_hidden = tf.matmul(distance, self.W_df) + self.b_df
    atom_features_hidden = tf.matmul(atom_features, self.W_cf) + self.b_cf
    outputs = tf.multiply(
        distance_hidden, tf.gather(atom_features_hidden, distance_membership_j))

    # for atom i in a molecule m, this step multiplies together distance info of atom pair(i,j)
    # and embeddings of atom j(both gone through a hidden layer)
    outputs = tf.matmul(outputs, self.W_fc)
    outputs = self.activation(outputs)

    output_ii = tf.multiply(self.b_df, atom_features_hidden)
    output_ii = tf.matmul(output_ii, self.W_fc)
    output_ii = self.activation(output_ii)

    # for atom i, sum the influence from all other atom j in the molecule
    outputs = tf.segment_sum(outputs,
                             distance_membership_i) - output_ii + atom_features
    out_tensor = outputs
    if set_tensors:
      self.trainable_variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #2
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Perform T steps of message passing """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    # Extract atom_features
    atom_features = in_layers[0].out_tensor
    pair_features = in_layers[1].out_tensor
    atom_to_pair = in_layers[2].out_tensor
    n_atom_features = atom_features.get_shape().as_list()[-1]
    n_pair_features = pair_features.get_shape().as_list()[-1]
    # Add trainable weights
    self.build(pair_features, n_pair_features)

    if n_atom_features < self.n_hidden:
      pad_length = self.n_hidden - n_atom_features
      out = tf.pad(atom_features, ((0, 0), (0, pad_length)), mode='CONSTANT')
    elif n_atom_features > self.n_hidden:
      raise ValueError("Too large initial feature vector")
    else:
      out = atom_features

    for i in range(self.T):
      message = self.message_function.forward(out, atom_to_pair)
      out = self.update_function.forward(out, message)

    out_tensor = out

    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #3
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Perform T steps of message passing """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    # Extract atom_features
    atom_features = in_layers[0].out_tensor
    pair_features = in_layers[1].out_tensor
    atom_to_pair = in_layers[2].out_tensor
    n_atom_features = atom_features.get_shape().as_list()[-1]
    n_pair_features = pair_features.get_shape().as_list()[-1]
    # Add trainable weights
    self.build(pair_features, n_pair_features)

    if n_atom_features < self.n_hidden:
      pad_length = self.n_hidden - n_atom_features
      out = tf.pad(atom_features, ((0, 0), (0, pad_length)), mode='CONSTANT')
    elif n_atom_features > self.n_hidden:
      raise ValueError("Too large initial feature vector")
    else:
      out = atom_features

    for i in range(self.T):
      message = self.message_function.forward(out, atom_to_pair)
      out = self.update_function.forward(out, message)

    out_tensor = out

    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #4
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    inputs = in_layers[0].out_tensor
    K = self.K
    outputs = []
    for count in range(self.n_tasks):
      # Similarity values
      similarity = inputs[:, 2 * K * count:(2 * K * count + K)]
      # Labels for all top K similar samples
      ys = tf.cast(inputs[:, (2 * K * count + K):2 * K * (count + 1)], tf.int32)

      R = self.b + self.W[0] * similarity + self.W[1] * tf.constant(
          np.arange(K) + 1, dtype=tf.float32)
      R = tf.sigmoid(R)
      z = tf.reduce_sum(R * tf.gather(self.V, ys), axis=1) + self.b2
      outputs.append(tf.reshape(z, shape=[-1, 1]))
    out_tensor = tf.concat(outputs, axis=1)

    if set_tensors:
      self.trainable_variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #5
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()
        inputs = in_layers[0].out_tensor
        K = self.K
        outputs = []
        for count in range(self.n_tasks):
            # Similarity values
            similarity = inputs[:, 2 * K * count:(2 * K * count + K)]
            # Labels for all top K similar samples
            ys = tf.cast(inputs[:, (2 * K * count + K):2 * K * (count + 1)],
                         tf.int32)

            R = self.b + self.W[0] * similarity + self.W[1] * tf.constant(
                np.arange(K) + 1, dtype=tf.float32)
            R = tf.sigmoid(R)
            z = tf.reduce_sum(R * tf.gather(self.V, ys), axis=1) + self.b2
            outputs.append(tf.reshape(z, shape=[-1, 1]))
        out_tensor = tf.concat(outputs, axis=1)

        if set_tensors:
            self.trainable_variables = self.trainable_weights
            self.out_tensor = out_tensor
        return out_tensor
Example #6
0
 def create_tensor(self, in_layers=None, **kwargs):
     if in_layers is None:
         in_layers = self.in_layers
     in_layers = convert_to_layers(in_layers)
     A = in_layers[0].out_tensor
     P = in_layers[1].out_tensor
     self.out_tensor = [A, P]
Example #7
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Generate Radial Symmetry Function """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    d_cutoff = in_layers[0].out_tensor
    d = in_layers[1].out_tensor
    if self.atomic_number_differentiated:
      atom_numbers = in_layers[2].out_tensor
      atom_number_embedded = tf.nn.embedding_lookup(self.atom_number_embedding,
                                                    atom_numbers)
    d_cutoff = tf.stack([d_cutoff] * self.length, axis=3)
    d = tf.stack([d] * self.length, axis=3)
    Rs = tf.reshape(self.Rs, (1, 1, 1, -1))
    ita = tf.reshape(self.ita, (1, 1, 1, -1))
    out_tensor = tf.exp(-ita * tf.square(d - Rs)) * d_cutoff
    if self.atomic_number_differentiated:
      out_tensors = []
      for atom_type in self.atom_number_cases:
        selected_atoms = tf.expand_dims(
            tf.expand_dims(atom_number_embedded[:, :, atom_type], axis=1),
            axis=3)
        out_tensors.append(tf.reduce_sum(out_tensor * selected_atoms, axis=2))
      self.out_tensor = tf.concat(out_tensors, axis=2)
    else:
      self.out_tensor = tf.reduce_sum(out_tensor, axis=2)
Example #8
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, distance, distance_membership_i, distance_membership_j
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    atom_features = in_layers[0].out_tensor
    distance = in_layers[1].out_tensor
    distance_membership_i = in_layers[2].out_tensor
    distance_membership_j = in_layers[3].out_tensor
    distance_hidden = tf.matmul(distance, self.W_df) + self.b_df
    atom_features_hidden = tf.matmul(atom_features, self.W_cf) + self.b_cf
    outputs = tf.multiply(
        distance_hidden, tf.gather(atom_features_hidden, distance_membership_j))

    # for atom i in a molecule m, this step multiplies together distance info of atom pair(i,j)
    # and embeddings of atom j(both gone through a hidden layer)
    outputs = tf.matmul(outputs, self.W_fc)
    outputs = self.activation(outputs)

    output_ii = tf.multiply(self.b_df, atom_features_hidden)
    output_ii = tf.matmul(output_ii, self.W_fc)
    output_ii = self.activation(output_ii)

    # for atom i, sum the influence from all other atom j in the molecule
    outputs = tf.segment_sum(outputs,
                             distance_membership_i) - output_ii + atom_features
    out_tensor = outputs
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #9
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, atom_split
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    outputs = in_layers[0].out_tensor
    atom_split = in_layers[1].out_tensor

    if self.gaussian_expand:
      outputs = self.gaussian_histogram(outputs)

    output_molecules = tf.segment_sum(outputs, atom_split)

    if self.gaussian_expand:
      output_molecules = tf.matmul(output_molecules, self.W) + self.b
      output_molecules = self.activation(output_molecules)

    out_tensor = output_molecules
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #10
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ Generate Radial Symmetry Function """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()
        d_cutoff = in_layers[0].out_tensor
        d = in_layers[1].out_tensor
        if self.atomic_number_differentiated:
            atom_numbers = in_layers[2].out_tensor
            atom_number_embedded = tf.nn.embedding_lookup(
                self.atom_number_embedding, atom_numbers)
        d_cutoff = tf.stack([d_cutoff] * self.length, axis=3)
        d = tf.stack([d] * self.length, axis=3)
        Rs = tf.reshape(self.Rs, (1, 1, 1, -1))
        ita = tf.reshape(self.ita, (1, 1, 1, -1))
        out_tensor = tf.exp(-ita * tf.square(d - Rs)) * d_cutoff
        if self.atomic_number_differentiated:
            out_tensors = []
            for atom_type in self.atom_number_cases:
                selected_atoms = tf.expand_dims(tf.expand_dims(
                    atom_number_embedded[:, :, atom_type], axis=1),
                                                axis=3)
                out_tensors.append(
                    tf.reduce_sum(out_tensor * selected_atoms, axis=2))
            out_tensor = tf.concat(out_tensors, axis=2)
        else:
            out_tensor = tf.reduce_sum(out_tensor, axis=2)

        if set_tensors:
            self.out_tensor = out_tensor

        return out_tensor
Example #11
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Perform M steps of set2set gather,
        detailed descriptions in: https://arxiv.org/abs/1511.06391 """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    # Extract atom_features
    atom_features = in_layers[0].out_tensor
    atom_split = in_layers[1].out_tensor

    self.c = tf.zeros((self.batch_size, self.n_hidden))
    self.h = tf.zeros((self.batch_size, self.n_hidden))

    for i in range(self.M):
      q_expanded = tf.gather(self.h, atom_split)
      e = tf.reduce_sum(atom_features * q_expanded, 1)
      e_mols = tf.dynamic_partition(e, atom_split, self.batch_size)
      # Add another value(~-Inf) to prevent error in softmax
      e_mols = [
          tf.concat([e_mol, tf.constant([-1000.])], 0) for e_mol in e_mols
      ]
      a = tf.concat([tf.nn.softmax(e_mol)[:-1] for e_mol in e_mols], 0)
      r = tf.segment_sum(tf.reshape(a, [-1, 1]) * atom_features, atom_split)
      # Model using this layer must set pad_batches=True
      q_star = tf.concat([self.h, r], axis=1)
      self.h, self.c = self.LSTMStep(q_star, self.c)

    out_tensor = q_star
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #12
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Perform M steps of set2set gather,
        detailed descriptions in: https://arxiv.org/abs/1511.06391 """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    # Extract atom_features
    atom_features = in_layers[0].out_tensor
    atom_split = in_layers[1].out_tensor

    self.c = tf.zeros((self.batch_size, self.n_hidden))
    self.h = tf.zeros((self.batch_size, self.n_hidden))

    for i in range(self.M):
      q_expanded = tf.gather(self.h, atom_split)
      e = tf.reduce_sum(atom_features * q_expanded, 1)
      e_mols = tf.dynamic_partition(e, atom_split, self.batch_size)
      # Add another value(~-Inf) to prevent error in softmax
      e_mols = [
          tf.concat([e_mol, tf.constant([-1000.])], 0) for e_mol in e_mols
      ]
      a = tf.concat([tf.nn.softmax(e_mol)[:-1] for e_mol in e_mols], 0)
      r = tf.segment_sum(tf.reshape(a, [-1, 1]) * atom_features, atom_split)
      # Model using this layer must set pad_batches=True
      q_star = tf.concat([self.h, r], axis=1)
      self.h, self.c = self.LSTMStep(q_star, self.c)

    out_tensor = q_star
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #13
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, atom_split
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    outputs = in_layers[0].out_tensor
    atom_split = in_layers[1].out_tensor

    if self.gaussian_expand:
      outputs = self.gaussian_histogram(outputs)

    output_molecules = tf.segment_sum(outputs, atom_split)

    if self.gaussian_expand:
      output_molecules = tf.matmul(output_molecules, self.W) + self.b
      output_molecules = self.activation(output_molecules)

    out_tensor = output_molecules
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #14
0
 def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
     if in_layers is None:
         in_layers = self.in_layers
     in_layers = convert_to_layers(in_layers)
     out_tensor = in_layers[0].out_tensor[0]
     if set_tensors:
         self.out_tensor = out_tensor
     return out_tensor
Example #15
0
 def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
   if in_layers is None:
     in_layers = self.in_layers
   in_layers = convert_to_layers(in_layers)
   output = in_layers[0].out_tensor
   out_tensor = output[:, self.task_id:self.task_id + 1]
   self.out_tensor = out_tensor
   return out_tensor
Example #16
0
 def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
   if in_layers is None:
     in_layers = self.in_layers
   in_layers = convert_to_layers(in_layers)
   output = in_layers[0].out_tensor
   out_tensor = output[:, self.task_id:self.task_id + 1]
   self.out_tensor = out_tensor
   return out_tensor
Example #17
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Generate Angular Symmetry Function """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    max_atoms = self.max_atoms
    d_cutoff = in_layers[0].out_tensor
    d = in_layers[1].out_tensor
    atom_coordinates = in_layers[2].out_tensor
    if self.atomic_number_differentiated:
      atom_numbers = in_layers[3].out_tensor
      atom_number_embedded = tf.nn.embedding_lookup(self.atom_number_embedding,
                                                    atom_numbers)

    vector_distances = tf.tile(tf.expand_dims(atom_coordinates, axis=2), (1, 1, max_atoms, 1)) - \
                       tf.tile(tf.expand_dims(atom_coordinates, axis=1), (1, max_atoms, 1, 1))
    R_ij = tf.tile(tf.expand_dims(d, axis=3), (1, 1, 1, max_atoms))
    R_ik = tf.tile(tf.expand_dims(d, axis=2), (1, 1, max_atoms, 1))
    f_R_ij = tf.tile(tf.expand_dims(d_cutoff, axis=3), (1, 1, 1, max_atoms))
    f_R_ik = tf.tile(tf.expand_dims(d_cutoff, axis=2), (1, 1, max_atoms, 1))

    # Define angle theta = R_ij(Vector) dot R_ik(Vector)/R_ij(distance)/R_ik(distance)
    theta = tf.reduce_sum(tf.tile(tf.expand_dims(vector_distances, axis=3), (1, 1, 1, max_atoms, 1)) * \
                          tf.tile(tf.expand_dims(vector_distances, axis=2), (1, 1, max_atoms, 1, 1)), axis=4)

    theta = tf.div(theta, R_ij * R_ik + 1e-5)

    R_ij = tf.stack([R_ij] * self.length, axis=4)
    R_ik = tf.stack([R_ik] * self.length, axis=4)
    f_R_ij = tf.stack([f_R_ij] * self.length, axis=4)
    f_R_ik = tf.stack([f_R_ik] * self.length, axis=4)

    theta = tf.stack([theta] * self.length, axis=4)
    lambd = tf.reshape(self.lambd, (1, 1, 1, 1, -1))
    zeta = tf.reshape(self.zeta, (1, 1, 1, 1, -1))
    ita = tf.reshape(self.ita, (1, 1, 1, 1, -1))
    Rs = tf.reshape(self.Rs, (1, 1, 1, 1, -1))
    thetas = tf.reshape(self.thetas, (1, 1, 1, 1, -1))

    out_tensor = tf.pow(1 + lambd * tf.cos(theta - thetas), zeta) * \
                 tf.exp(-ita * tf.square((R_ij + R_ik) / 2 - Rs)) * \
                 f_R_ij * f_R_ik * tf.pow(tf.constant(2.), 1 - zeta)
    if self.atomic_number_differentiated:
      out_tensors = []
      for atom_type_j in self.atom_number_cases:
        for atom_type_k in self.atom_number_cases:
          selected_atoms = tf.stack([atom_number_embedded[:, :, atom_type_j]] * max_atoms, axis=2) * \
                           tf.stack([atom_number_embedded[:, :, atom_type_k]] * max_atoms, axis=1)
          selected_atoms = tf.expand_dims(
              tf.expand_dims(selected_atoms, axis=1), axis=4)
          out_tensors.append(
              tf.reduce_sum(out_tensor * selected_atoms, axis=[2, 3]))
      self.out_tensor = tf.concat(out_tensors, axis=2)
    else:
      self.out_tensor = tf.reduce_sum(out_tensor, axis=[2, 3])
Example #18
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ Merge features together """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        out_tensor = in_layers[0].out_tensor
        flags = in_layers[1].out_tensor

        out_tensor = tf.reduce_sum(out_tensor * flags[:, :, 0:1], axis=1)
        self.out_tensor = out_tensor
Example #19
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Merge features together """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    out_tensor = in_layers[0].out_tensor
    flags = in_layers[1].out_tensor

    out_tensor = tf.reduce_sum(out_tensor * flags[:, :, 0:1], axis=1)
    self.out_tensor = out_tensor
Example #20
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """description and explanation refer to deepchem.nn.DTNNEmbedding
        parent layers: atom_number
        """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    atom_number = in_layers[0].out_tensor
    atom_features = tf.nn.embedding_lookup(self.embedding_list, atom_number)
    self.out_tensor = atom_features
Example #21
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """Creates weave tensors.

    parent layers: [atom_features, pair_features], pair_split, atom_to_pair
    """
        activation = activations.get(self.activation)  # Get activations
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()

        atom_features = in_layers[0].out_tensor
        pair_features = in_layers[1].out_tensor

        pair_split = in_layers[2].out_tensor
        atom_to_pair = in_layers[3].out_tensor

        AA = tf.matmul(atom_features, self.W_AA) + self.b_AA
        AA = activation(AA)
        PA = tf.matmul(pair_features, self.W_PA) + self.b_PA
        PA = activation(PA)
        PA = tf.segment_sum(PA, pair_split)

        A = tf.matmul(tf.concat([AA, PA], 1), self.W_A) + self.b_A
        A = activation(A)

        if self.update_pair:
            AP_ij = tf.matmul(
                tf.reshape(tf.gather(atom_features, atom_to_pair),
                           [-1, 2 * self.n_atom_input_feat]),
                self.W_AP) + self.b_AP
            AP_ij = activation(AP_ij)
            AP_ji = tf.matmul(
                tf.reshape(
                    tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
                    [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
            AP_ji = activation(AP_ji)

            PP = tf.matmul(pair_features, self.W_PP) + self.b_PP
            PP = activation(PP)
            P = tf.matmul(tf.concat([AP_ij + AP_ji, PP], 1),
                          self.W_P) + self.b_P
            P = activation(P)
        else:
            P = pair_features

        self.out_tensors = [A, P]
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = A
        return self.out_tensors
Example #22
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    slice_num = self.slice_num
    axis = self.axis
    inputs = in_layers[0].out_tensor
    out_tensor = tf.slice(inputs, [0] * axis + [slice_num], [-1] * axis + [1])

    if set_tensors:
      self.out_tensor = out_tensor
    return out_tensor
Example #23
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    slice_num = self.slice_num
    axis = self.axis
    inputs = in_layers[0].out_tensor
    out_tensor = tf.slice(inputs, [0] * axis + [slice_num], [-1] * axis + [1])

    if set_tensors:
      self.out_tensor = out_tensor
    return out_tensor
Example #24
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ Generate Angular Symmetry Function """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()
        max_atoms = self.max_atoms
        d_cutoff = in_layers[0].out_tensor
        d = in_layers[1].out_tensor
        atom_coordinates = in_layers[2].out_tensor
        vector_distances = tf.tile(tf.expand_dims(atom_coordinates, axis=2), (1, 1, max_atoms, 1)) - \
                           tf.tile(tf.expand_dims(atom_coordinates, axis=1), (1, max_atoms, 1, 1))
        R_ij = tf.tile(tf.expand_dims(d, axis=3), (1, 1, 1, max_atoms))
        R_ik = tf.tile(tf.expand_dims(d, axis=2), (1, 1, max_atoms, 1))
        R_jk = tf.tile(tf.expand_dims(d, axis=1), (1, max_atoms, 1, 1))
        f_R_ij = tf.tile(tf.expand_dims(d_cutoff, axis=3),
                         (1, 1, 1, max_atoms))
        f_R_ik = tf.tile(tf.expand_dims(d_cutoff, axis=2),
                         (1, 1, max_atoms, 1))
        f_R_jk = tf.tile(tf.expand_dims(d_cutoff, axis=1),
                         (1, max_atoms, 1, 1))

        # Define angle theta = R_ij(Vector) dot R_ik(Vector)/R_ij(distance)/R_ik(distance)
        theta = tf.reduce_sum(tf.tile(tf.expand_dims(vector_distances, axis=3), (1, 1, 1, max_atoms, 1)) * \
                              tf.tile(tf.expand_dims(vector_distances, axis=2), (1, 1, max_atoms, 1, 1)), axis=4)

        theta = tf.div(theta, R_ij * R_ik + 1e-5)

        R_ij = tf.stack([R_ij] * self.length, axis=4)
        R_ik = tf.stack([R_ik] * self.length, axis=4)
        R_jk = tf.stack([R_jk] * self.length, axis=4)
        f_R_ij = tf.stack([f_R_ij] * self.length, axis=4)
        f_R_ik = tf.stack([f_R_ik] * self.length, axis=4)
        f_R_jk = tf.stack([f_R_jk] * self.length, axis=4)

        theta = tf.stack([theta] * self.length, axis=4)
        lambd = tf.reshape(self.lambd, (1, 1, 1, 1, -1))
        zeta = tf.reshape(self.zeta, (1, 1, 1, 1, -1))
        ita = tf.reshape(self.ita, (1, 1, 1, 1, -1))

        out_tensor = tf.pow(1 + lambd * tf.cos(theta), zeta) * \
                     tf.exp(-ita * (tf.square(R_ij) + tf.square(R_ik) + tf.square(R_jk))) * \
                     f_R_ij * f_R_ik * f_R_jk
        out_tensor = tf.reduce_sum(out_tensor, axis=[2, 3]) * \
                          tf.pow(tf.constant(2.), 1 - tf.reshape(self.zeta, (1, 1, -1)))

        if set_tensors:
            self.out_tensor = out_tensor

        return out_tensor
Example #25
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """Creates weave tensors.

    parent layers: [atom_features, pair_features], pair_split, atom_to_pair
    """
    activation = activations.get(self.activation)  # Get activations
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()

    atom_features = in_layers[0].out_tensor
    pair_features = in_layers[1].out_tensor

    pair_split = in_layers[2].out_tensor
    atom_to_pair = in_layers[3].out_tensor

    AA = tf.matmul(atom_features, self.W_AA) + self.b_AA
    AA = activation(AA)
    PA = tf.matmul(pair_features, self.W_PA) + self.b_PA
    PA = activation(PA)
    PA = tf.segment_sum(PA, pair_split)

    A = tf.matmul(tf.concat([AA, PA], 1), self.W_A) + self.b_A
    A = activation(A)

    if self.update_pair:
      AP_ij = tf.matmul(
          tf.reshape(
              tf.gather(atom_features, atom_to_pair),
              [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
      AP_ij = activation(AP_ij)
      AP_ji = tf.matmul(
          tf.reshape(
              tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
              [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
      AP_ji = activation(AP_ji)

      PP = tf.matmul(pair_features, self.W_PP) + self.b_PP
      PP = activation(PP)
      P = tf.matmul(tf.concat([AP_ij + AP_ji, PP], 1), self.W_P) + self.b_P
      P = activation(P)
    else:
      P = pair_features

    self.out_tensors = [A, P]
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = A
    return self.out_tensors
Example #26
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ Merge features together """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        atom_embedding = in_layers[0].out_tensor
        radial_symmetry = in_layers[1].out_tensor
        angular_symmetry = in_layers[2].out_tensor
        atom_flags = in_layers[3].out_tensor

        out_tensor = tf.concat(
            [atom_embedding, radial_symmetry, angular_symmetry], axis=2)
        self.out_tensor = out_tensor * atom_flags[:, :, 0:1]
Example #27
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_number
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    atom_number = in_layers[0].out_tensor
    atom_features = tf.nn.embedding_lookup(self.embedding_list, atom_number)
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = atom_features
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ description and explanation refer to deepchem.nn.WeaveLayer
        parent layers: [atom_features, pair_features], pair_split, atom_to_pair
        """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()

        atom_features = in_layers[0].out_tensor[0]
        pair_features = in_layers[0].out_tensor[1]

        pair_split = in_layers[1].out_tensor
        atom_to_pair = in_layers[2].out_tensor

        AA = tf.matmul(atom_features, self.W_AA) + self.b_AA
        AA = self.activation(AA)
        PA = tf.matmul(pair_features, self.W_PA) + self.b_PA
        PA = self.activation(PA)
        PA = tf.segment_sum(PA, pair_split)

        A = tf.matmul(tf.concat([AA, PA], 1), self.W_A) + self.b_A
        A = self.activation(A)

        if self.update_pair:
            AP_ij = tf.matmul(
                tf.reshape(tf.gather(atom_features, atom_to_pair),
                           [-1, 2 * self.n_atom_input_feat]),
                self.W_AP) + self.b_AP
            AP_ij = self.activation(AP_ij)
            AP_ji = tf.matmul(
                tf.reshape(
                    tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
                    [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
            AP_ji = self.activation(AP_ji)

            PP = tf.matmul(pair_features, self.W_PP) + self.b_PP
            PP = self.activation(PP)
            P = tf.matmul(tf.concat([AP_ij + AP_ji, PP], 1),
                          self.W_P) + self.b_P
            P = self.activation(P)
        else:
            P = pair_features

        out_tensor = [A, P]
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor
        return out_tensor
Example #29
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_number
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    atom_number = in_layers[0].out_tensor
    atom_features = tf.nn.embedding_lookup(self.embedding_list, atom_number)
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = atom_features
Example #30
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Merge features together """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    atom_embedding = in_layers[0].out_tensor
    radial_symmetry = in_layers[1].out_tensor
    angular_symmetry = in_layers[2].out_tensor
    atom_flags = in_layers[3].out_tensor

    out_tensor = tf.concat(
        [atom_embedding, radial_symmetry, angular_symmetry], axis=2)
    self.out_tensor = out_tensor * atom_flags[:, :, 0:1]
Example #31
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Generate distance matrix for BPSymmetryFunction with trainable cutoff """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    d = in_layers[0].out_tensor
    d_flag = in_layers[1].out_tensor
    # Cutoff with threshold Rc
    d_flag = d_flag * tf.nn.relu(tf.sign(self.Rc - d))
    d = 0.5 * (tf.cos(np.pi * d / self.Rc) + 1)
    out_tensor = d * d_flag
    out_tensor = out_tensor * tf.expand_dims((1 - tf.eye(self.max_atoms)), 0)
    self.out_tensor = out_tensor
Example #32
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Generate Angular Symmetry Function """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    max_atoms = self.max_atoms
    d_cutoff = in_layers[0].out_tensor
    d = in_layers[1].out_tensor
    atom_coordinates = in_layers[2].out_tensor
    vector_distances = tf.tile(tf.expand_dims(atom_coordinates, axis=2), (1, 1, max_atoms, 1)) - \
                       tf.tile(tf.expand_dims(atom_coordinates, axis=1), (1, max_atoms, 1, 1))
    R_ij = tf.tile(tf.expand_dims(d, axis=3), (1, 1, 1, max_atoms))
    R_ik = tf.tile(tf.expand_dims(d, axis=2), (1, 1, max_atoms, 1))
    R_jk = tf.tile(tf.expand_dims(d, axis=1), (1, max_atoms, 1, 1))
    f_R_ij = tf.tile(tf.expand_dims(d_cutoff, axis=3), (1, 1, 1, max_atoms))
    f_R_ik = tf.tile(tf.expand_dims(d_cutoff, axis=2), (1, 1, max_atoms, 1))
    f_R_jk = tf.tile(tf.expand_dims(d_cutoff, axis=1), (1, max_atoms, 1, 1))

    # Define angle theta = R_ij(Vector) dot R_ik(Vector)/R_ij(distance)/R_ik(distance)
    theta = tf.reduce_sum(tf.tile(tf.expand_dims(vector_distances, axis=3), (1, 1, 1, max_atoms, 1)) * \
                          tf.tile(tf.expand_dims(vector_distances, axis=2), (1, 1, max_atoms, 1, 1)), axis=4)

    theta = tf.math.divide(theta, R_ij * R_ik + 1e-5)

    R_ij = tf.stack([R_ij] * self.length, axis=4)
    R_ik = tf.stack([R_ik] * self.length, axis=4)
    R_jk = tf.stack([R_jk] * self.length, axis=4)
    f_R_ij = tf.stack([f_R_ij] * self.length, axis=4)
    f_R_ik = tf.stack([f_R_ik] * self.length, axis=4)
    f_R_jk = tf.stack([f_R_jk] * self.length, axis=4)

    theta = tf.stack([theta] * self.length, axis=4)
    lambd = tf.reshape(self.lambd, (1, 1, 1, 1, -1))
    zeta = tf.reshape(self.zeta, (1, 1, 1, 1, -1))
    ita = tf.reshape(self.ita, (1, 1, 1, 1, -1))

    out_tensor = tf.pow(1 + lambd * tf.cos(theta), zeta) * \
                 tf.exp(-ita * (tf.square(R_ij) + tf.square(R_ik) + tf.square(R_jk))) * \
                 f_R_ij * f_R_ik * f_R_jk
    out_tensor = tf.reduce_sum(out_tensor, axis=[2, 3]) * \
                      tf.pow(tf.constant(2.), 1 - tf.reshape(self.zeta, (1, 1, -1)))

    if set_tensors:
      self.out_tensor = out_tensor

    return out_tensor
Example #33
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ Generate Radial Symmetry Function """
        init_fn = initializations.get(self.init)  # Set weight initialization
        if self.activation == 'ani':
            activation_fn = self.ani_activate
        else:
            activation_fn = activations.get(self.activation)
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        inputs = in_layers[0].out_tensor
        atom_numbers = in_layers[1].out_tensor
        in_channels = inputs.get_shape().as_list()[-1]
        self.W = init_fn(
            [len(self.atom_number_cases), in_channels, self.out_channels])

        self.b = model_ops.zeros(
            (len(self.atom_number_cases), self.out_channels))
        outputs = []
        for i, atom_case in enumerate(self.atom_number_cases):
            # optimization to allow for tensorcontraction/broadcasted mmul
            # using a reshape trick. Note that the np and tf matmul behavior
            # differs when dealing with broadcasts

            a = inputs  # (i,j,k)
            b = self.W[i, :, :]  # (k, l)

            ai = tf.shape(a)[0]
            aj = tf.shape(a)[1]
            ak = tf.shape(a)[2]
            bl = tf.shape(b)[1]

            output = activation_fn(
                tf.reshape(tf.matmul(tf.reshape(a, [ai * aj, ak]), b),
                           [ai, aj, bl]) + self.b[i, :])

            mask = 1 - tf.cast(tf.cast(atom_numbers - atom_case, tf.bool),
                               tf.float32)
            output = tf.reshape(output * tf.expand_dims(mask, 2),
                                (-1, self.max_atoms, self.out_channels))
            outputs.append(output)
        out_tensor = tf.add_n(outputs)

        if set_tensors:
            self.out_tensor = out_tensor

        return out_tensor
Example #34
0
    def create_tensor(self, in_layers=None, **kwargs):
        """description and explanation refer to deepchem.nn.DTNNGather
    parent layers: atom_features, atom_membership
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()
        output = in_layers[0].out_tensor
        atom_membership = in_layers[1].out_tensor
        for i, W in enumerate(self.W_list):
            output = tf.matmul(output, W) + self.b_list[i]
            output = self.activation(output)
        output = tf.segment_sum(output, atom_membership)
        self.out_tensor = output
Example #35
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Generate distance matrix for BPSymmetryFunction with trainable cutoff """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    max_atoms = self.max_atoms
    atom_coordinates = in_layers[0].out_tensor
    atom_flags = in_layers[1].out_tensor
    tensor1 = tf.tile(
        tf.expand_dims(atom_coordinates, axis=2), (1, 1, max_atoms, 1))
    tensor2 = tf.tile(
        tf.expand_dims(atom_coordinates, axis=1), (1, max_atoms, 1, 1))
    # Calculate pairwise distance
    d = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=3))
    # Masking for valid atom index
    self.out_tensor = d * tf.to_float(atom_flags)
Example #36
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ Generate distance matrix for BPSymmetryFunction with trainable cutoff """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        max_atoms = self.max_atoms
        atom_coordinates = in_layers[0].out_tensor
        atom_flags = in_layers[1].out_tensor
        tensor1 = tf.tile(tf.expand_dims(atom_coordinates, axis=2),
                          (1, 1, max_atoms, 1))
        tensor2 = tf.tile(tf.expand_dims(atom_coordinates, axis=1),
                          (1, max_atoms, 1, 1))
        # Calculate pairwise distance
        d = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=3))
        # Masking for valid atom index
        self.out_tensor = d * tf.to_float(atom_flags)
Example #37
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Generate Radial Symmetry Function """
    init_fn = initializations.get(self.init)  # Set weight initialization
    if self.activation == 'ani':
      activation_fn = self.ani_activate
    else:
      activation_fn = activations.get(self.activation)
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    inputs = in_layers[0].out_tensor
    atom_numbers = in_layers[1].out_tensor
    in_channels = inputs.get_shape().as_list()[-1]
    self.W = init_fn(
        [len(self.atom_number_cases), in_channels, self.out_channels])

    self.b = model_ops.zeros((len(self.atom_number_cases), self.out_channels))
    outputs = []
    for i, atom_case in enumerate(self.atom_number_cases):
      # optimization to allow for tensorcontraction/broadcasted mmul
      # using a reshape trick. Note that the np and tf matmul behavior
      # differs when dealing with broadcasts

      a = inputs  # (i,j,k)
      b = self.W[i, :, :]  # (k, l)

      ai = tf.shape(a)[0]
      aj = tf.shape(a)[1]
      ak = tf.shape(a)[2]
      bl = tf.shape(b)[1]

      output = activation_fn(
          tf.reshape(tf.matmul(tf.reshape(a, [ai * aj, ak]), b), [ai, aj, bl]) +
          self.b[i, :])

      mask = 1 - tf.cast(tf.cast(atom_numbers - atom_case, tf.bool), tf.float32)
      output = tf.reshape(output * tf.expand_dims(mask, 2),
                          (-1, self.max_atoms, self.out_channels))
      outputs.append(output)
    out_tensor = tf.add_n(outputs)

    if set_tensors:
      self.out_tensor = out_tensor

    return out_tensor
Example #38
0
    def create_tensor(self, in_layers=None, **kwargs):
        """description and explanation refer to deepchem.nn.DAGGather
    parent layers: atom_features, membership
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        # Add trainable weights
        self.build()

        # Extract atom_features
        atom_features = in_layers[0].out_tensor
        membership = in_layers[1].out_tensor
        # Extract atom_features
        graph_features = tf.segment_sum(atom_features, membership)
        # sum all graph outputs
        outputs = self.DAGgraph_step(graph_features, self.W_list, self.b_list)
        self.out_tensor = outputs
Example #39
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)
    self._build()

    A_tilda_k = in_layers[0].out_tensor
    X = in_layers[1].out_tensor
    adp_fn_val = in_layers[2].out_tensor

    attn_weights = tf.multiply(adp_fn_val, self.W)
    wt_adjacency = attn_weights * A_tilda_k
    out = tf.matmul(wt_adjacency, X) + tf.expand_dims(self.b, axis=1)

    out_tensor = out
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor

    return out_tensor
Example #40
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)
        self._build()

        A_tilda_k = in_layers[0].out_tensor
        X = in_layers[1].out_tensor
        adp_fn_val = in_layers[2].out_tensor

        attn_weights = tf.multiply(adp_fn_val, self.W)
        wt_adjacency = attn_weights * A_tilda_k
        out = tf.matmul(wt_adjacency, X) + tf.expand_dims(self.b, axis=1)

        out_tensor = out
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor

        return out_tensor
Example #41
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Generate Radial Symmetry Function """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    inputs = in_layers[0].out_tensor
    atom_numbers = in_layers[1].out_tensor
    in_channels = inputs.get_shape().as_list()[-1]
    self.W = self.init(
        [len(self.atom_number_cases), in_channels, self.out_channels])
    self.b = model_ops.zeros((len(self.atom_number_cases), self.out_channels))
    outputs = []
    for i, atom_case in enumerate(self.atom_number_cases):
      output = self.activation(
          tf.tensordot(inputs, self.W[i, :, :], [[2], [0]]) + self.b[i, :])
      mask = 1 - tf.to_float(tf.cast(atom_numbers - atom_case, tf.bool))
      output = tf.reshape(output * tf.expand_dims(mask, 2), (-1, self.max_atoms,
                                                             self.out_channels))
      outputs.append(output)
    self.out_tensor = tf.add_n(outputs)
Example #42
0
    def create_tensor(self, in_layers=None, **kwargs):
        """ description and explanation refer to deepchem.nn.WeaveGather
    parent layers: atom_features, atom_split
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()
        outputs = in_layers[0].out_tensor
        atom_split = in_layers[1].out_tensor

        if self.gaussian_expand:
            outputs = self.gaussian_histogram(outputs)

        output_molecules = tf.segment_sum(outputs, atom_split)

        if self.gaussian_expand:
            output_molecules = tf.matmul(output_molecules, self.W) + self.b
            output_molecules = self.activation(output_molecules)
        self.out_tensor = output_molecules
Example #43
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    act_fn = activations.get('sigmoid')
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)
    self._build()

    A_tilda_k = in_layers[0].out_tensor
    X = in_layers[1].out_tensor

    if self.combine_method == "linear":
      concatenated = tf.concat([A_tilda_k, X], axis=2)
      adp_fn_val = act_fn(
          tf.tensordot(concatenated, self.trainable_weights[0], axes=1))
    else:
      adp_fn_val = act_fn(tf.matmul(A_tilda_k, tf.tensordot(X, self.Q, axes=1)))
    out_tensor = adp_fn_val
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor

    return out_tensor
Example #44
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, atom_membership
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    output = in_layers[0].out_tensor
    atom_membership = in_layers[1].out_tensor
    for i, W in enumerate(self.W_list[:-1]):
      output = tf.matmul(output, W) + self.b_list[i]
      output = self.activation(output)
    output = tf.matmul(output, self.W_list[-1]) + self.b_list[-1]
    if self.output_activation:
      output = self.activation(output)
    output = tf.segment_sum(output, atom_membership)
    out_tensor = output
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #45
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        act_fn = activations.get('sigmoid')
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)
        self._build()

        A_tilda_k = in_layers[0].out_tensor
        X = in_layers[1].out_tensor

        if self.combine_method == "linear":
            concatenated = tf.concat([A_tilda_k, X], axis=2)
            adp_fn_val = act_fn(
                tf.tensordot(concatenated, self.trainable_weights[0], axes=1))
        else:
            adp_fn_val = act_fn(
                tf.matmul(A_tilda_k, tf.tensordot(X, self.Q, axes=1)))
        out_tensor = adp_fn_val
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor

        return out_tensor
Example #46
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """
    parent layers: atom_features, membership
    """
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        # Add trainable weights
        self.build()

        # Extract atom_features
        atom_features = in_layers[0].out_tensor
        membership = in_layers[1].out_tensor
        # Extract atom_features
        graph_features = tf.segment_sum(atom_features, membership)
        # sum all graph outputs
        outputs = self.DAGgraph_step(graph_features, self.W_list, self.b_list)
        out_tensor = outputs
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor
        return out_tensor
Example #47
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, atom_membership
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    output = in_layers[0].out_tensor
    atom_membership = in_layers[1].out_tensor
    for i, W in enumerate(self.W_list[:-1]):
      output = tf.matmul(output, W) + self.b_list[i]
      output = self.activation(output)
    output = tf.matmul(output, self.W_list[-1]) + self.b_list[-1]
    if self.output_activation:
      output = self.activation(output)
    output = tf.segment_sum(output, atom_membership)
    out_tensor = output
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #48
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, membership
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    # Add trainable weights
    self.build()

    # Extract atom_features
    atom_features = in_layers[0].out_tensor
    membership = in_layers[1].out_tensor
    # Extract atom_features
    graph_features = tf.segment_sum(atom_features, membership)
    # sum all graph outputs
    outputs = self.DAGgraph_step(graph_features, self.W_list, self.b_list,
                                 **kwargs)
    out_tensor = outputs
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #49
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, parents, calculation_orders, calculation_masks, n_atoms
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    # Add trainable weights
    self.build()

    atom_features = in_layers[0].out_tensor
    # each atom corresponds to a graph, which is represented by the `max_atoms*max_atoms` int32 matrix of index
    # each gragh include `max_atoms` of steps(corresponding to rows) of calculating graph features
    parents = in_layers[1].out_tensor
    # target atoms for each step: (batch_size*max_atoms) * max_atoms
    calculation_orders = in_layers[2].out_tensor
    calculation_masks = in_layers[3].out_tensor

    n_atoms = in_layers[4].out_tensor
    # initialize graph features for each graph
    graph_features_initial = tf.zeros((self.max_atoms * self.batch_size,
                                       self.max_atoms + 1, self.n_graph_feat))
    # initialize graph features for each graph
    # another row of zeros is generated for padded dummy atoms
    graph_features = tf.Variable(graph_features_initial, trainable=False)

    for count in range(self.max_atoms):
      # `count`-th step
      # extracting atom features of target atoms: (batch_size*max_atoms) * n_atom_features
      mask = calculation_masks[:, count]
      current_round = tf.boolean_mask(calculation_orders[:, count], mask)
      batch_atom_features = tf.gather(atom_features, current_round)

      # generating index for graph features used in the inputs
      index = tf.stack(
          [
              tf.reshape(
                  tf.stack(
                      [tf.boolean_mask(tf.range(n_atoms), mask)] *
                      (self.max_atoms - 1),
                      axis=1), [-1]),
              tf.reshape(tf.boolean_mask(parents[:, count, 1:], mask), [-1])
          ],
          axis=1)
      # extracting graph features for parents of the target atoms, then flatten
      # shape: (batch_size*max_atoms) * [(max_atoms-1)*n_graph_features]
      batch_graph_features = tf.reshape(
          tf.gather_nd(graph_features, index),
          [-1, (self.max_atoms - 1) * self.n_graph_feat])

      # concat into the input tensor: (batch_size*max_atoms) * n_inputs
      batch_inputs = tf.concat(
          axis=1, values=[batch_atom_features, batch_graph_features])
      # DAGgraph_step maps from batch_inputs to a batch of graph_features
      # of shape: (batch_size*max_atoms) * n_graph_features
      # representing the graph features of target atoms in each graph
      batch_outputs = self.DAGgraph_step(batch_inputs, self.W_list, self.b_list,
                                         **kwargs)

      # index for targe atoms
      target_index = tf.stack([tf.range(n_atoms), parents[:, count, 0]], axis=1)
      target_index = tf.boolean_mask(target_index, mask)
      # update the graph features for target atoms
      graph_features = tf.scatter_nd_update(graph_features, target_index,
                                            batch_outputs)

    out_tensor = batch_outputs
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
Example #50
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, parents, calculation_orders, calculation_masks, n_atoms
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    # Add trainable weights
    self.build()

    atom_features = in_layers[0].out_tensor
    # each atom corresponds to a graph, which is represented by the `max_atoms*max_atoms` int32 matrix of index
    # each gragh include `max_atoms` of steps(corresponding to rows) of calculating graph features
    parents = in_layers[1].out_tensor
    # target atoms for each step: (batch_size*max_atoms) * max_atoms
    calculation_orders = in_layers[2].out_tensor
    calculation_masks = in_layers[3].out_tensor

    n_atoms = in_layers[4].out_tensor
    # initialize graph features for each graph
    graph_features_initial = tf.zeros((self.max_atoms * self.batch_size,
                                       self.max_atoms + 1, self.n_graph_feat))
    # initialize graph features for each graph
    # another row of zeros is generated for padded dummy atoms
    graph_features = tf.Variable(graph_features_initial, trainable=False)

    for count in range(self.max_atoms):
      # `count`-th step
      # extracting atom features of target atoms: (batch_size*max_atoms) * n_atom_features
      mask = calculation_masks[:, count]
      current_round = tf.boolean_mask(calculation_orders[:, count], mask)
      batch_atom_features = tf.gather(atom_features, current_round)

      # generating index for graph features used in the inputs
      index = tf.stack(
          [
              tf.reshape(
                  tf.stack(
                      [tf.boolean_mask(tf.range(n_atoms), mask)] *
                      (self.max_atoms - 1),
                      axis=1), [-1]),
              tf.reshape(tf.boolean_mask(parents[:, count, 1:], mask), [-1])
          ],
          axis=1)
      # extracting graph features for parents of the target atoms, then flatten
      # shape: (batch_size*max_atoms) * [(max_atoms-1)*n_graph_features]
      batch_graph_features = tf.reshape(
          tf.gather_nd(graph_features, index),
          [-1, (self.max_atoms - 1) * self.n_graph_feat])

      # concat into the input tensor: (batch_size*max_atoms) * n_inputs
      batch_inputs = tf.concat(
          axis=1, values=[batch_atom_features, batch_graph_features])
      # DAGgraph_step maps from batch_inputs to a batch of graph_features
      # of shape: (batch_size*max_atoms) * n_graph_features
      # representing the graph features of target atoms in each graph
      batch_outputs = self.DAGgraph_step(batch_inputs, self.W_list, self.b_list,
                                         **kwargs)

      # index for targe atoms
      target_index = tf.stack([tf.range(n_atoms), parents[:, count, 0]], axis=1)
      target_index = tf.boolean_mask(target_index, mask)
      # update the graph features for target atoms
      graph_features = tf.scatter_nd_update(graph_features, target_index,
                                            batch_outputs)

    out_tensor = batch_outputs
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor