Пример #1
0
  def __init__(self,
               n_embedding=30,
               n_outputs=100,
               layer_sizes=[100],
               output_activation=True,
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    """
        Parameters
        ----------
        n_embedding: int, optional
          Number of features for each atom
        n_outputs: int, optional
          Number of features for each molecule(output)
        layer_sizes: list of int, optional(default=[1000])
          Structure of hidden layer(s)
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied
        """
    self.n_embedding = n_embedding
    self.n_outputs = n_outputs
    self.layer_sizes = layer_sizes
    self.output_activation = output_activation
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations

    super(DTNNGather, self).__init__(**kwargs)
Пример #2
0
  def __init__(self,
               n_embedding=30,
               n_distance=100,
               n_hidden=60,
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    """
        Parameters
        ----------
        n_embedding: int, optional
          Number of features for each atom
        n_distance: int, optional
          granularity of distance matrix
        n_hidden: int, optional
          Number of nodes in hidden layer
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied
        """
    self.n_embedding = n_embedding
    self.n_distance = n_distance
    self.n_hidden = n_hidden
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations

    super(DTNNStep, self).__init__(**kwargs)
Пример #3
0
  def __init__(self,
               n_embedding=30,
               n_outputs=100,
               layer_sizes=[100],
               output_activation=True,
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    """
        Parameters
        ----------
        n_embedding: int, optional
          Number of features for each atom
        n_outputs: int, optional
          Number of features for each molecule(output)
        layer_sizes: list of int, optional(default=[1000])
          Structure of hidden layer(s)
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied
        """
    self.n_embedding = n_embedding
    self.n_outputs = n_outputs
    self.layer_sizes = layer_sizes
    self.output_activation = output_activation
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations

    super(DTNNGather, self).__init__(**kwargs)
Пример #4
0
  def __init__(self,
               n_embedding=30,
               n_distance=100,
               n_hidden=60,
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    """
        Parameters
        ----------
        n_embedding: int, optional
          Number of features for each atom
        n_distance: int, optional
          granularity of distance matrix
        n_hidden: int, optional
          Number of nodes in hidden layer
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied
        """
    self.n_embedding = n_embedding
    self.n_distance = n_distance
    self.n_hidden = n_hidden
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations

    super(DTNNStep, self).__init__(**kwargs)
Пример #5
0
  def __init__(self,
               batch_size,
               n_input=128,
               gaussian_expand=False,
               init='glorot_uniform',
               activation='tanh',
               epsilon=1e-3,
               momentum=0.99,
               **kwargs):
    """
    Parameters
    ----------
    batch_size: int
      number of molecules in a batch
    n_input: int, optional
      number of features for each input molecule
    gaussian_expand: boolean. optional
      Whether to expand each dimension of atomic features by gaussian histogram
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied

    """
    self.n_input = n_input
    self.batch_size = batch_size
    self.gaussian_expand = gaussian_expand
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.epsilon = epsilon
    self.momentum = momentum
    self.W, self.b = None, None
    super(WeaveGather, self).__init__(**kwargs)
Пример #6
0
  def __init__(self,
               batch_size,
               n_input=128,
               gaussian_expand=False,
               init='glorot_uniform',
               activation='tanh',
               epsilon=1e-3,
               momentum=0.99,
               **kwargs):
    """
    Parameters
    ----------
    batch_size: int
      number of molecules in a batch
    n_input: int, optional
      number of features for each input molecule
    gaussian_expand: boolean. optional
      Whether to expand each dimension of atomic features by gaussian histogram
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied

    """
    self.n_input = n_input
    self.batch_size = batch_size
    self.gaussian_expand = gaussian_expand
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.epsilon = epsilon
    self.momentum = momentum
    self.W, self.b = None, None
    super(WeaveGather, self).__init__(**kwargs)
Пример #7
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """Creates weave tensors.

    parent layers: [atom_features, pair_features], pair_split, atom_to_pair
    """
        activation = activations.get(self.activation)  # Get activations
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        self.build()

        atom_features = in_layers[0].out_tensor
        pair_features = in_layers[1].out_tensor

        pair_split = in_layers[2].out_tensor
        atom_to_pair = in_layers[3].out_tensor

        AA = tf.matmul(atom_features, self.W_AA) + self.b_AA
        AA = activation(AA)
        PA = tf.matmul(pair_features, self.W_PA) + self.b_PA
        PA = activation(PA)
        PA = tf.segment_sum(PA, pair_split)

        A = tf.matmul(tf.concat([AA, PA], 1), self.W_A) + self.b_A
        A = activation(A)

        if self.update_pair:
            AP_ij = tf.matmul(
                tf.reshape(tf.gather(atom_features, atom_to_pair),
                           [-1, 2 * self.n_atom_input_feat]),
                self.W_AP) + self.b_AP
            AP_ij = activation(AP_ij)
            AP_ji = tf.matmul(
                tf.reshape(
                    tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
                    [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
            AP_ji = activation(AP_ji)

            PP = tf.matmul(pair_features, self.W_PP) + self.b_PP
            PP = activation(PP)
            P = tf.matmul(tf.concat([AP_ij + AP_ji, PP], 1),
                          self.W_P) + self.b_P
            P = activation(P)
        else:
            P = pair_features

        self.out_tensors = [A, P]
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = A
        return self.out_tensors
Пример #8
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """Creates weave tensors.

    parent layers: [atom_features, pair_features], pair_split, atom_to_pair
    """
    activation = activations.get(self.activation)  # Get activations
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()

    atom_features = in_layers[0].out_tensor
    pair_features = in_layers[1].out_tensor

    pair_split = in_layers[2].out_tensor
    atom_to_pair = in_layers[3].out_tensor

    AA = tf.matmul(atom_features, self.W_AA) + self.b_AA
    AA = activation(AA)
    PA = tf.matmul(pair_features, self.W_PA) + self.b_PA
    PA = activation(PA)
    PA = tf.segment_sum(PA, pair_split)

    A = tf.matmul(tf.concat([AA, PA], 1), self.W_A) + self.b_A
    A = activation(A)

    if self.update_pair:
      AP_ij = tf.matmul(
          tf.reshape(
              tf.gather(atom_features, atom_to_pair),
              [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
      AP_ij = activation(AP_ij)
      AP_ji = tf.matmul(
          tf.reshape(
              tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
              [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
      AP_ji = activation(AP_ji)

      PP = tf.matmul(pair_features, self.W_PP) + self.b_PP
      PP = activation(PP)
      P = tf.matmul(tf.concat([AP_ij + AP_ji, PP], 1), self.W_P) + self.b_P
      P = activation(P)
    else:
      P = pair_features

    self.out_tensors = [A, P]
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = A
    return self.out_tensors
Пример #9
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ Generate Radial Symmetry Function """
        init_fn = initializations.get(self.init)  # Set weight initialization
        if self.activation == 'ani':
            activation_fn = self.ani_activate
        else:
            activation_fn = activations.get(self.activation)
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        inputs = in_layers[0].out_tensor
        atom_numbers = in_layers[1].out_tensor
        in_channels = inputs.get_shape().as_list()[-1]
        self.W = init_fn(
            [len(self.atom_number_cases), in_channels, self.out_channels])

        self.b = model_ops.zeros(
            (len(self.atom_number_cases), self.out_channels))
        outputs = []
        for i, atom_case in enumerate(self.atom_number_cases):
            # optimization to allow for tensorcontraction/broadcasted mmul
            # using a reshape trick. Note that the np and tf matmul behavior
            # differs when dealing with broadcasts

            a = inputs  # (i,j,k)
            b = self.W[i, :, :]  # (k, l)

            ai = tf.shape(a)[0]
            aj = tf.shape(a)[1]
            ak = tf.shape(a)[2]
            bl = tf.shape(b)[1]

            output = activation_fn(
                tf.reshape(tf.matmul(tf.reshape(a, [ai * aj, ak]), b),
                           [ai, aj, bl]) + self.b[i, :])

            mask = 1 - tf.cast(tf.cast(atom_numbers - atom_case, tf.bool),
                               tf.float32)
            output = tf.reshape(output * tf.expand_dims(mask, 2),
                                (-1, self.max_atoms, self.out_channels))
            outputs.append(output)
        out_tensor = tf.add_n(outputs)

        if set_tensors:
            self.out_tensor = out_tensor

        return out_tensor
Пример #10
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Generate Radial Symmetry Function """
    init_fn = initializations.get(self.init)  # Set weight initialization
    if self.activation == 'ani':
      activation_fn = self.ani_activate
    else:
      activation_fn = activations.get(self.activation)
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    inputs = in_layers[0].out_tensor
    atom_numbers = in_layers[1].out_tensor
    in_channels = inputs.get_shape().as_list()[-1]
    self.W = init_fn(
        [len(self.atom_number_cases), in_channels, self.out_channels])

    self.b = model_ops.zeros((len(self.atom_number_cases), self.out_channels))
    outputs = []
    for i, atom_case in enumerate(self.atom_number_cases):
      # optimization to allow for tensorcontraction/broadcasted mmul
      # using a reshape trick. Note that the np and tf matmul behavior
      # differs when dealing with broadcasts

      a = inputs  # (i,j,k)
      b = self.W[i, :, :]  # (k, l)

      ai = tf.shape(a)[0]
      aj = tf.shape(a)[1]
      ak = tf.shape(a)[2]
      bl = tf.shape(b)[1]

      output = activation_fn(
          tf.reshape(tf.matmul(tf.reshape(a, [ai * aj, ak]), b), [ai, aj, bl]) +
          self.b[i, :])

      mask = 1 - tf.cast(tf.cast(atom_numbers - atom_case, tf.bool), tf.float32)
      output = tf.reshape(output * tf.expand_dims(mask, 2),
                          (-1, self.max_atoms, self.out_channels))
      outputs.append(output)
    out_tensor = tf.add_n(outputs)

    if set_tensors:
      self.out_tensor = out_tensor

    return out_tensor
Пример #11
0
  def __init__(self,
               n_graph_feat=30,
               n_atom_feat=75,
               max_atoms=50,
               layer_sizes=[100],
               init='glorot_uniform',
               activation='relu',
               dropout=None,
               batch_size=64,
               **kwargs):
    """
        Parameters
        ----------
        n_graph_feat: int, optional
          Number of features for each node(and the whole grah).
        n_atom_feat: int, optional
          Number of features listed per atom.
        max_atoms: int, optional
          Maximum number of atoms in molecules.
        layer_sizes: list of int, optional(default=[100])
          List of hidden layer size(s):
          length of this list represents the number of hidden layers,
          and each element is the width of corresponding hidden layer.
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied.
        dropout: float, optional
          Dropout probability in hidden layer(s).
        batch_size: int, optional
          number of molecules in a batch.
        """
    super(DAGLayer, self).__init__(**kwargs)

    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.layer_sizes = layer_sizes
    self.dropout = dropout
    self.max_atoms = max_atoms
    self.batch_size = batch_size
    self.n_inputs = n_atom_feat + (self.max_atoms - 1) * n_graph_feat
    # number of inputs each step
    self.n_graph_feat = n_graph_feat
    self.n_outputs = n_graph_feat
    self.n_atom_feat = n_atom_feat
Пример #12
0
  def __init__(self,
               n_graph_feat=30,
               n_atom_feat=75,
               max_atoms=50,
               layer_sizes=[100],
               init='glorot_uniform',
               activation='relu',
               dropout=None,
               batch_size=64,
               **kwargs):
    """
        Parameters
        ----------
        n_graph_feat: int, optional
          Number of features for each node(and the whole grah).
        n_atom_feat: int, optional
          Number of features listed per atom.
        max_atoms: int, optional
          Maximum number of atoms in molecules.
        layer_sizes: list of int, optional(default=[100])
          List of hidden layer size(s):
          length of this list represents the number of hidden layers,
          and each element is the width of corresponding hidden layer.
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied.
        dropout: float, optional
          Dropout probability in hidden layer(s).
        batch_size: int, optional
          number of molecules in a batch.
        """
    super(DAGLayer, self).__init__(**kwargs)

    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.layer_sizes = layer_sizes
    self.dropout = dropout
    self.max_atoms = max_atoms
    self.batch_size = batch_size
    self.n_inputs = n_atom_feat + (self.max_atoms - 1) * n_graph_feat
    # number of inputs each step
    self.n_graph_feat = n_graph_feat
    self.n_outputs = n_graph_feat
    self.n_atom_feat = n_atom_feat
Пример #13
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    act_fn = activations.get('sigmoid')
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)
    self._build()

    A_tilda_k = in_layers[0].out_tensor
    X = in_layers[1].out_tensor

    if self.combine_method == "linear":
      concatenated = tf.concat([A_tilda_k, X], axis=2)
      adp_fn_val = act_fn(
          tf.tensordot(concatenated, self.trainable_weights[0], axes=1))
    else:
      adp_fn_val = act_fn(tf.matmul(A_tilda_k, tf.tensordot(X, self.Q, axes=1)))
    out_tensor = adp_fn_val
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor

    return out_tensor
Пример #14
0
  def __init__(self,
               n_graph_feat=30,
               n_outputs=30,
               max_atoms=50,
               layer_sizes=[100],
               init='glorot_uniform',
               activation='relu',
               dropout=None,
               **kwargs):
    """
        Parameters
        ----------
        n_graph_feat: int, optional
          Number of features for each atom.
        n_outputs: int, optional
          Number of features for each molecule.
        max_atoms: int, optional
          Maximum number of atoms in molecules.
        layer_sizes: list of int, optional
          List of hidden layer size(s):
          length of this list represents the number of hidden layers,
          and each element is the width of corresponding hidden layer.
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied.
        dropout: float, optional
          Dropout probability in the hidden layer(s).
        """
    super(DAGGather, self).__init__(**kwargs)

    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.layer_sizes = layer_sizes
    self.dropout = dropout
    self.max_atoms = max_atoms
    self.n_graph_feat = n_graph_feat
    self.n_outputs = n_outputs
Пример #15
0
  def __init__(self,
               n_graph_feat=30,
               n_outputs=30,
               max_atoms=50,
               layer_sizes=[100],
               init='glorot_uniform',
               activation='relu',
               dropout=None,
               **kwargs):
    """
        Parameters
        ----------
        n_graph_feat: int, optional
          Number of features for each atom.
        n_outputs: int, optional
          Number of features for each molecule.
        max_atoms: int, optional
          Maximum number of atoms in molecules.
        layer_sizes: list of int, optional
          List of hidden layer size(s):
          length of this list represents the number of hidden layers,
          and each element is the width of corresponding hidden layer.
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied.
        dropout: float, optional
          Dropout probability in the hidden layer(s).
        """
    super(DAGGather, self).__init__(**kwargs)

    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.layer_sizes = layer_sizes
    self.dropout = dropout
    self.max_atoms = max_atoms
    self.n_graph_feat = n_graph_feat
    self.n_outputs = n_outputs
Пример #16
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        act_fn = activations.get('sigmoid')
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)
        self._build()

        A_tilda_k = in_layers[0].out_tensor
        X = in_layers[1].out_tensor

        if self.combine_method == "linear":
            concatenated = tf.concat([A_tilda_k, X], axis=2)
            adp_fn_val = act_fn(
                tf.tensordot(concatenated, self.trainable_weights[0], axes=1))
        else:
            adp_fn_val = act_fn(
                tf.matmul(A_tilda_k, tf.tensordot(X, self.Q, axes=1)))
        out_tensor = adp_fn_val
        if set_tensors:
            self.variables = self.trainable_weights
            self.out_tensor = out_tensor

        return out_tensor
Пример #17
0
    def __init__(self,
                 n_graph_feat=30,
                 n_outputs=30,
                 max_atoms=50,
                 layer_sizes=[100],
                 init='glorot_uniform',
                 activation='relu',
                 dropout=None,
                 **kwargs):
        """
        Parameters
        ----------
        n_graph_feat: int, optional
          Number of features for each atom
        n_outputs: int, optional
          Number of features for each molecule.
        max_atoms: int, optional
          Maximum number of atoms in molecules.
        layer_sizes: list of int, optional
          Structure of hidden layer(s)
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied
        dropout: float, optional
          Dropout probability, not supported
        """
        super(DAGGather, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.layer_sizes = layer_sizes
        self.dropout = dropout
        self.max_atoms = max_atoms
        self.n_graph_feat = n_graph_feat
        self.n_outputs = n_outputs