예제 #1
0
    def __init__(self, input_layers):
        """
        Args:
            input_layers (list[dict]): instructions for
                making the input layers applied to the node
                and edge features.
        Returns:
            None 
        """
        nn.Module.__init__(self)

        # remove bias from linear layers if there

        new_layers = remove_bias(input_layers)
        self.input = construct_sequential(new_layers)
예제 #2
0
    def __init__(self, output_layers):
        """
        Args:
            output_layers (list[dict]): instructions for
                making the output layers applied after the
                initial node features get concatenated with
                the edge-turned-node updated features.
        Returns:
            None 
        """
        nn.Module.__init__(self)

        # remove bias from linear layers if there

        new_layers = remove_bias(output_layers)
        self.output = construct_sequential(new_layers)
예제 #3
0
파일: modules.py 프로젝트: torchmd/mdgrad
    def __init__(self, update_layers):
        super(AuTopologyConv, self).__init__()

        """
        Args:
            update_layers (dict): dictionary of layers to apply after the convolution
        Returns:
            None
        Example:
                update_layers =  [{'name': 'linear', 'param' : {'in_features': 256,
                                                                        'out_features': 256}},
                                          {'name': 'tanh', 'param': {}},
                                          {'name': 'linear', 'param' : {'in_features': 256,
                                                                  'out_features': 256}},
                                          {'name': 'tanh', 'param': {}}
        """

        self.update_function = construct_sequential(update_layers)
예제 #4
0
    def __init__(self, modelparams):
        """Constructs a SchNet-Like model using a conformer representation.

        Args:
            modelparams (dict): dictionary of parameters for model. All
                are the same as in SchNet, except for  `mol_fp_layers`,
                which describes how to convert atomic fingerprints into
                a single molecular fingerprint.

        Example:

            n_atom_basis = 256
            mol_basis = 512

            # all the atomic fingerprints get added together, then go through the network created
            # by `mol_fp_layers` to turn into a molecular fingerprint
            mol_fp_layers = [{'name': 'linear', 'param' : { 'in_features': n_atom_basis,
                                                            'out_features': int((n_atom_basis + mol_basis)/2)}},
                                           {'name': 'shifted_softplus', 'param': {}},
                                           {'name': 'linear', 'param' : { 'in_features': int((n_atom_basis + mol_basis)/2),
                                                                          'out_features': mol_basis}}]


            readoutdict = {
                                "covid": [{'name': 'linear', 'param' : { 'in_features': mol_basis,
                                                                          'out_features': int(mol_basis / 2)}},
                                           {'name': 'shifted_softplus', 'param': {}},
                                           {'name': 'linear', 'param' : { 'in_features': int(mol_basis / 2),
                                                                          'out_features': 1}},
                                           {'name': 'sigmoid', 'param': {}}],
                            }

            # dictionary to tell you what to do with the Boltzmann factors
            # ex. 1:

            boltzmann_dict = {"type": "multiply"}

            # ex. 2
            boltzmann_layers = [{'name': 'linear', 'param': {'in_features': mol_basis + 1,
                                                           'out_features': mol_basis}},
                                {'name': 'shifted_softplus', 'param': {}},
                                {'name': 'linear', 'param': {'in_features': mol_basis,
                                                           'out_features': mol_basis}}]
            boltzmann_dict = {"type": "layers", "layers": boltzmann_layers}


            modelparams = {
                'n_atom_basis': n_atom_basis,
                'n_filters': 256,
                'n_gaussians': 32,
                'n_convolutions': 4,
                'cutoff': 5.0,
                'trainable_gauss': True,
                'readoutdict': readoutdict,
                'mol_fp_layers': mol_fp_layers,
                'boltzmann_dict': boltzmann_dict
                'dropout_rate': 0.2
            }

            model = WeightedConformers(modelparams)

        """

        nn.Module.__init__(self)

        n_atom_basis = modelparams["n_atom_basis"]
        n_filters = modelparams["n_filters"]
        n_gaussians = modelparams["n_gaussians"]
        n_convolutions = modelparams["n_convolutions"]
        cutoff = modelparams["cutoff"]
        trainable_gauss = modelparams.get("trainable_gauss", False)
        dropout_rate = modelparams.get("dropout_rate", DEFAULT_DROPOUT_RATE)

        self.atom_embed = nn.Embedding(100, n_atom_basis, padding_idx=0)

        # convolutions
        self.convolutions = nn.ModuleList(
            [
                SchNetConv(
                    n_atom_basis=n_atom_basis,
                    n_filters=n_filters,
                    n_gaussians=n_gaussians,
                    cutoff=cutoff,
                    trainable_gauss=trainable_gauss,
                    dropout_rate=dropout_rate,
                )
                for _ in range(n_convolutions)
            ]
        )

        # extra features to consider
        self.extra_feats = modelparams.get("extra_features")
        self.ext_feat_types = modelparams.get("ext_feat_types")

        mol_fp_layers = modelparams["mol_fp_layers"]
        readoutdict = modelparams["readoutdict"]
        boltzmann_dict = modelparams["boltzmann_dict"]

        # the nn that converts atomic finerprints to a molecular fp
        self.mol_fp_nn = construct_sequential(mol_fp_layers)

        # create a module that lets a molecular fp interact with the
        # conformer's boltzmann weight to give a final molecular fp
        self.boltz_nns = self.make_boltz_nn(boltzmann_dict)
        self.head_pool = boltzmann_dict.get("head_pool", "concatenate")

        # the readout acts on this final molceular fp
        self.readout = NodeMultiTaskReadOut(multitaskdict=readoutdict)

        # whether this is a classifier
        self.classifier = modelparams["classifier"]

        # whether to embed fingerprints or just use external features
        self.use_mpnn = modelparams.get("use_mpnn", True)
예제 #5
0
    def make_boltz_nn(self, boltzmann_dict):
        """
        Make the section of the network that creates weights for each
        conformer, which may or may not be equal to the statistical
        boltzmann weights.
        Args:
            boltzmann_dict (dict): dictionary with information about
                this section of the network.
        Returns:
            networks (nn.ModuleList): list of networks that get applied
                to the conformer fingerprints to aggregate them. If
                it contains more than one network, the different fingerprints
                produced will either be averaged or concatenated at the end.
        """

        networks = nn.ModuleList([])

        # if you just want to multiply the boltmzann weight by each conformer
        # fingerprint, return nothing

        if boltzmann_dict["type"] == "multiply":
            return [None]

        # if you supply a dictionary of type `layers`, then the dictionary
        # under the key `layers` will be used to create the corresponding
        # network

        elif boltzmann_dict["type"] == "layers":
            layers = boltzmann_dict["layers"]
            networks.append(construct_sequential(layers))

        # if you ask for some sort of attention network, then make one such
        # network for each of the number of heads

        elif "attention" in boltzmann_dict["type"]:

            if boltzmann_dict["type"] == "attention":
                module = ConfAttention
            elif boltzmann_dict["type"] == "linear_attention":
                module = LinearConfAttention
            else:
                raise NotImplementedError

            # how many attention heads
            num_heads = boltzmann_dict.get("num_heads", 1)
            # whether to just use equal weights and not learnable weights
            # (useful for ablation studies)
            equal_weights = boltzmann_dict.get("equal_weights", False)
            # what function to use to convert the alpha_ij to probabilities
            prob_func = boltzmann_dict.get("prob_func", 'softmax')

            # add a network for each head
            for _ in range(num_heads):

                mol_basis = boltzmann_dict["mol_basis"]
                boltz_basis = boltzmann_dict["boltz_basis"]
                final_act = boltzmann_dict["final_act"]

                networks.append(module(mol_basis=mol_basis,
                                       boltz_basis=boltz_basis,
                                       final_act=final_act,
                                       equal_weights=equal_weights,
                                       prob_func=prob_func))

        return networks