コード例 #1
0
ファイル: modules.py プロジェクト: torchmd/mdgrad
 def __init__(self,
              n_atom_basis,
              n_filters,
              n_gaussians,
              cutoff,
              trainable_gauss,
              ):
     super(SchNetConv, self).__init__()
     self.moduledict = ModuleDict({
         'message_edge_filter': Sequential(
             GaussianSmearing(
                 start=0.0,
                 stop=cutoff,
                 n_gaussians=n_gaussians,
                 trainable=trainable_gauss
             ),
             Dense(in_features=n_gaussians, out_features=n_gaussians),
             shifted_softplus(),
             Dense(in_features=n_gaussians, out_features=n_filters)
         ),
         'message_node_filter': Dense(in_features=n_atom_basis, out_features=n_filters),
         'update_function': Sequential(
             Dense(in_features=n_filters, out_features=n_atom_basis),
             shifted_softplus(),
             Dense(in_features=n_atom_basis, out_features=n_atom_basis)
         )
     })
コード例 #2
0
ファイル: schnet.py プロジェクト: lmzhhh/NeuralForceField
    def __init__(self,
                 cutoff,
                 n_gaussians,
                 trainable_gauss,
                 n_filters,
                 dropout_rate,
                 activation='shifted_softplus'):

        super(SchNetEdgeFilter, self).__init__()

        self.filter = Sequential(
            GaussianSmearing(
                start=0.0,
                stop=cutoff,
                n_gaussians=n_gaussians,
                trainable=trainable_gauss,
            ),
            Dense(
                in_features=n_gaussians,
                out_features=n_gaussians,
                dropout_rate=dropout_rate,
            ), layer_types[activation](),
            Dense(
                in_features=n_gaussians,
                out_features=n_filters,
                dropout_rate=dropout_rate,
            ))
コード例 #3
0
ファイル: schnet.py プロジェクト: lmzhhh/NeuralForceField
    def __init__(self,
                 n_atom_hidden,
                 n_filters,
                 dropout_rate,
                 n_bond_hidden,
                 activation='shifted_softplus'):
        """
        Args:
            n_atom_hidden (int): hidden dimension of the atom
                features. Same as `n_atom_basis` in regular
                SchNet, but different than `n_atom_basis` in
                SchNetFeatures, where `n_atom_basis` is the initial
                dimension of the atom feature vector and
                `n_atom_hidden` is its dimension after going through
                another network.
            n_filters (int): dimension of the distance hidden vector
            dropout_rate (float): dropout rate
            n_bond_hidden (int): dimension of the bond hidden vector
            activation (str): nonlinear activation name
        Returns:
            None
        """
        super(MixedSchNetConv, self).__init__()
        self.moduledict = ModuleDict({

            # convert the atom features to the dimension
            # of cat(hidden_distance, hidden_bond)
            "message_node_filter":
            Dense(
                in_features=n_atom_hidden,
                out_features=(n_filters + n_bond_hidden),
                dropout_rate=dropout_rate,
            ),
            # after multiplying edge features with
            # node features, convert them back to size
            # `n_atom_hidden`
            "update_function":
            Sequential(
                Dense(
                    in_features=(n_filters + n_bond_hidden),
                    out_features=n_atom_hidden,
                    dropout_rate=dropout_rate,
                ),
                layer_types[activation](),
                Dense(
                    in_features=n_atom_hidden,
                    out_features=n_atom_hidden,
                    dropout_rate=dropout_rate,
                ),
            ),
        })
コード例 #4
0
ファイル: schnet.py プロジェクト: lmzhhh/NeuralForceField
    def __init__(self, n_edge_hidden, dropout_rate, activation, **kwargs):
        """
        Args:
            n_edge_hidden: dimension of the hidden edge vector
            dropout_rate (float): dropout rate
            activation (str): name of non-linear activation function
        Returns:
            None
        """

        MessagePassingModule.__init__(self)

        # As in the original ChemProp paper,
        # the features are added together linearly
        # with no bias. This means that features
        # equal to 0 don't contribute to the output.

        # This is important, for example, for
        # CpSchNetConv, in which every non-
        # bonded neighbour has zeros for its
        # ChemProp bond features. We don't want
        # these zeros contributing to the output.

        self.dense = Dense(in_features=n_edge_hidden,
                           out_features=n_edge_hidden,
                           dropout_rate=dropout_rate,
                           bias=False)
        self.activation = layer_types[activation]()
コード例 #5
0
def get_dense(inp_dim, out_dim, activation, bias):
    """
    Create a dense layer.
    Args:
        inp_dim (int): dimension of input
        out_dim (int): dimension of output
        activation (str): name of activation layer
        bias (bool): whether or not to add a bias
    Returns:
        (nn.layers.Dense): dense layer
    """
    if activation is not None:
        activation = layer_types[activation]()
    return Dense(inp_dim, out_dim, activation=activation, bias=bias)
コード例 #6
0
ファイル: schnet.py プロジェクト: lmzhhh/NeuralForceField
    def __init__(self, n_bond_hidden, cp_dropout, gauss_embed, cutoff,
                 n_gaussians, trainable_gauss, n_filters, schnet_dropout,
                 activation, **kwargs):
        """
        Args:
            n_bond_hidden (int): bond feature hidden dimension
            cp_dropout (float): dropout rate for the ChemProp convolution
            gauss_embed (bool): whether to embed distances in a
                basis of Gaussians.
            cutoff (float): neighbor list cutoff
            n_gaussians (int): number of Gaussians in which to expand
                distances.
            trainable_gauss (bool): whether Gaussian spacings and widths
                are learnable parameters.
            n_filters (int): hidden distance feature dimension
            schnet_dropout (float): dropout rate for SchNet embedding
            activation (str): name of nonlinear activation function
        Returns:
            None
        """

        ChemPropConv.__init__(self,
                              n_edge_hidden=n_bond_hidden,
                              dropout_rate=cp_dropout,
                              activation=activation)

        self.n_bond_hidden = n_bond_hidden
        self.moduledict = ModuleDict({})

        if not gauss_embed:
            return

        edge_filter = Sequential(
            GaussianSmearing(
                start=0.0,
                stop=cutoff,
                n_gaussians=n_gaussians,
                trainable=trainable_gauss,
            ),
            Dense(
                in_features=n_gaussians,
                out_features=n_filters,
                dropout_rate=schnet_dropout,
            ), layer_types[activation]())

        self.moduledict["edge_filter"] = edge_filter
コード例 #7
0
    def __init__(self, modelparams):
        """
        Initialize model.
        Args:
            modelparams (dict): dictionary of parameters for the model
        Returns:
            None
        """

        WeightedConformers.__init__(self, modelparams)
        # get rid of the atom embedding, as we'll be using graph-based
        # atom features instead of atomic number embeddings
        delattr(self, "atom_embed")

        n_convolutions = modelparams["n_convolutions"]
        dropout_rate = modelparams["dropout_rate"]
        n_bond_hidden = modelparams["n_bond_hidden"]
        n_bond_features = modelparams["n_bond_features"]
        n_atom_basis = modelparams["n_atom_basis"]
        n_filters = modelparams["n_filters"]
        trainable_gauss = modelparams["trainable_gauss"]
        n_gaussians = modelparams["n_gaussians"]
        cutoff = modelparams["cutoff"]
        activation = modelparams["activation"]
        n_atom_hidden = modelparams["n_atom_hidden"]

        self.convolutions = nn.ModuleList([
            MixedSchNetConv(n_atom_hidden=n_atom_hidden,
                            n_filters=n_filters,
                            dropout_rate=dropout_rate,
                            n_bond_hidden=n_bond_hidden,
                            activation=activation)
            for _ in range(n_convolutions)
        ])

        # for converting distances to features before concatenating with
        # bond features
        self.distance_filter = SchNetEdgeFilter(
            cutoff=cutoff,
            n_gaussians=n_gaussians,
            trainable_gauss=trainable_gauss,
            n_filters=n_filters,
            dropout_rate=dropout_rate,
            activation=activation)

        # for converting bond features to hidden feature vectors
        self.bond_filter = Sequential(
            Dense(in_features=n_bond_features,
                  out_features=n_bond_hidden,
                  dropout_rate=dropout_rate), layer_types[activation](),
            Dense(in_features=n_bond_hidden,
                  out_features=n_bond_hidden,
                  dropout_rate=dropout_rate))

        self.atom_filter = Sequential(
            Dense(in_features=n_atom_basis,
                  out_features=n_atom_hidden,
                  dropout_rate=dropout_rate), layer_types[activation](),
            Dense(in_features=n_atom_hidden,
                  out_features=n_atom_hidden,
                  dropout_rate=dropout_rate))