Exemplo n.º 1
0
    def __init__(self,
                 n_embedding=30,
                 n_outputs=100,
                 layer_sizes=[100],
                 output_activation=True,
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        """
        Parameters
        ----------
        n_embedding: int, optional
          Number of features for each atom
        n_outputs: int, optional
          Number of features for each molecule(output)
        layer_sizes: list of int, optional(default=[1000])
          Structure of hidden layer(s)
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied
        """
        self.n_embedding = n_embedding
        self.n_outputs = n_outputs
        self.layer_sizes = layer_sizes
        self.output_activation = output_activation
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNGather, self).__init__(**kwargs)
Exemplo n.º 2
0
    def __init__(self,
                 n_embedding=30,
                 n_distance=100,
                 n_hidden=60,
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        """
        Parameters
        ----------
        n_embedding: int, optional
          Number of features for each atom
        n_distance: int, optional
          granularity of distance matrix
        n_hidden: int, optional
          Number of nodes in hidden layer
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied
        """
        self.n_embedding = n_embedding
        self.n_distance = n_distance
        self.n_hidden = n_hidden
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNStep, self).__init__(**kwargs)
Exemplo n.º 3
0
 def __init__(self,
              batch_size,
              n_input=128,
              gaussian_expand=False,
              init='glorot_uniform',
              activation='tanh',
              eps=1e-3,
              momentum=0.99,
              **kwargs):
     """
 Parameters
 ----------
 batch_size: int
   number of molecules in a batch
 n_input: int, optional
   number of features for each input molecule
 gaussian_expand: boolean. optional
   Whether to expand each dimension of atomic features by gaussian histogram
 init: str, optional
   Weight initialization for filters.
 activation: str, optional
   Activation function applied
 """
     self.n_input = n_input
     self.batch_size = batch_size
     self.gaussian_expand = gaussian_expand
     self.init = initializations.get(init)  # Set weight initialization
     self.activation = activations.get(activation)  # Get activations
     self.eps = eps
     self.momentum = momentum
     self.W, self.b = None, None
     super(WeaveGather, self).__init__(**kwargs)
Exemplo n.º 4
0
 def __init__(self, n_hidden=100, init='glorot_uniform'):
     self.n_hidden = n_hidden
     self.init = initializations.get(init)
     Wz = self.init([n_hidden, n_hidden])
     Wr = self.init([n_hidden, n_hidden])
     Wh = self.init([n_hidden, n_hidden])
     Uz = self.init([n_hidden, n_hidden])
     Ur = self.init([n_hidden, n_hidden])
     Uh = self.init([n_hidden, n_hidden])
     bz = model_ops.zeros(shape=(n_hidden, ))
     br = model_ops.zeros(shape=(n_hidden, ))
     bh = model_ops.zeros(shape=(n_hidden, ))
     self.trainable_weights = [Wz, Wr, Wh, Uz, Ur, Uh, bz, br, bh]
Exemplo n.º 5
0
 def __init__(self,
              pair_features,
              n_pair_features=8,
              n_hidden=100,
              init='glorot_uniform'):
     self.n_pair_features = n_pair_features
     self.n_hidden = n_hidden
     self.init = initializations.get(init)
     W = self.init([n_pair_features, n_hidden * n_hidden])
     b = model_ops.zeros(shape=(n_hidden * n_hidden, ))
     self.A = torch.matmul(pair_features, W) + b
     self.A = torch.reshape(self.A, (-1, n_hidden, n_hidden))
     self.trainable_weights = [W, b]
Exemplo n.º 6
0
    def __init__(self,
                 n_graph_feat=30,
                 n_atom_feat=75,
                 max_atoms=50,
                 layer_sizes=[100],
                 init='glorot_uniform',
                 activation='relu',
                 dropout=None,
                 batch_size=64,
                 **kwargs):
        """
        Parameters
        ----------
        n_graph_feat: int, optional
          Number of features for each node(and the whole grah).
        n_atom_feat: int, optional
          Number of features listed per atom.
        max_atoms: int, optional
          Maximum number of atoms in molecules.
        layer_sizes: list of int, optional(default=[100])
          List of hidden layer size(s):
          length of this list represents the number of hidden layers,
          and each element is the width of corresponding hidden layer.
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied.
        dropout: float, optional
          Dropout probability in hidden layer(s).
        batch_size: int, optional
          number of molecules in a batch.
        """
        super(DAGLayer, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.layer_sizes = layer_sizes
        self.dropout = dropout
        self.max_atoms = max_atoms
        self.batch_size = batch_size
        self.n_inputs = n_atom_feat + (self.max_atoms - 1) * n_graph_feat
        # number of inputs each step
        self.n_graph_feat = n_graph_feat
        self.n_outputs = n_graph_feat
        self.n_atom_feat = n_atom_feat
Exemplo n.º 7
0
    def build(self):
        """ Construct internal trainable weights.
        TODO(rbharath): Need to make this not set instance variables to
        follow style in other layers.
        """
        init = initializations.get(self.init)  # Set weight initialization

        self.W_AA = init([self.n_atom_input_feat, self.n_hidden_AA])
        self.b_AA = model_ops.zeros(shape=[
            self.n_hidden_AA,
        ])

        self.W_PA = init([self.n_pair_input_feat, self.n_hidden_PA])
        self.b_PA = model_ops.zeros(shape=[
            self.n_hidden_PA,
        ])

        self.W_A = init([self.n_hidden_A, self.n_atom_output_feat])
        self.b_A = model_ops.zeros(shape=[
            self.n_atom_output_feat,
        ])

        self.trainable_weights = [
            self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A
        ]
        if self.update_pair:
            self.W_AP = init([self.n_atom_input_feat * 2, self.n_hidden_AP])
            self.b_AP = model_ops.zeros(shape=[
                self.n_hidden_AP,
            ])

            self.W_PP = init([self.n_pair_input_feat, self.n_hidden_PP])
            self.b_PP = model_ops.zeros(shape=[
                self.n_hidden_PP,
            ])

            self.W_P = init([self.n_hidden_P, self.n_pair_output_feat])
            self.b_P = model_ops.zeros(shape=[
                self.n_pair_output_feat,
            ])

            self.trainable_weights.extend([
                self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P
            ])
Exemplo n.º 8
0
    def __init__(self,
                 n_embedding=30,
                 periodic_table_length=30,
                 init='glorot_uniform',
                 **kwargs):
        """
        Parameters
        ----------
        n_embedding: int, optional
          Number of features for each atom
        periodic_table_length: int, optional
          Length of embedding, 83=Bi
        init: str, optional
          Weight initialization for filters.
        """
        self.n_embedding = n_embedding
        self.periodic_table_length = periodic_table_length
        self.init = initializations.get(init)  # Set weight initialization

        super(DTNNEmbedding, self).__init__(**kwargs)
Exemplo n.º 9
0
    def __init__(self,
                 M,
                 batch_size,
                 n_hidden=100,
                 init='orthogonal',
                 **kwargs):
        """
        Parameters
        ----------
        M: int
          Number of LSTM steps
        batch_size: int
          Number of samples in a batch(all batches must have same size)
        n_hidden: int, optional
          number of hidden units in the passing phase
        """

        self.M = M
        self.batch_size = batch_size
        self.n_hidden = n_hidden
        self.init = initializations.get(init)
        super(SetGather, self).__init__(**kwargs)
Exemplo n.º 10
0
    def __init__(self,
                 n_graph_feat=30,
                 n_outputs=30,
                 max_atoms=50,
                 layer_sizes=[100],
                 init='glorot_uniform',
                 activation='relu',
                 dropout=None,
                 **kwargs):
        """
        Parameters
        ----------
        n_graph_feat: int, optional
          Number of features for each atom.
        n_outputs: int, optional
          Number of features for each molecule.
        max_atoms: int, optional
          Maximum number of atoms in molecules.
        layer_sizes: list of int, optional
          List of hidden layer size(s):
          length of this list represents the number of hidden layers,
          and each element is the width of corresponding hidden layer.
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied.
        dropout: float, optional
          Dropout probability in the hidden layer(s).
        """
        super(DAGGather, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.layer_sizes = layer_sizes
        self.dropout = dropout
        self.max_atoms = max_atoms
        self.n_graph_feat = n_graph_feat
        self.n_outputs = n_outputs