예제 #1
0
 def __init__(
     self,
     n_in,
     n_filters,
     n_out,
     filter_network,
     cutoff_network=None,
     activation=None,
     normalize_filter=False,
     axis=2,
     dropout=0.2,
 ):
     super().__init__(
         n_in,
         n_filters,
         n_out,
         filter_network,
         cutoff_network=cutoff_network,
         activation=activation,
         normalize_filter=normalize_filter,
         axis=axis,
     )
     self.in2f = nn.Sequential(
         Dense(n_in, n_filters, bias=False, activation=None),
         nn.Dropout(dropout))
     self.f2out = nn.Sequential(
         Dense(n_filters, n_out, bias=True, activation=activation),
         nn.Dropout(dropout),
     )
예제 #2
0
 def __init__(
     self,
     n_atom_basis,
     n_spatial_basis,
     n_filters,
     cutoff,
     cutoff_network=HardCutoff,
     normalize_filter=False,
 ):
     super(SchNetInteraction, self).__init__()
     # filter block used in interaction block
     self.filter_network = nn.Sequential(
         Dense(n_spatial_basis, n_filters, activation=shifted_softplus),
         Dense(n_filters, n_filters),
     )
     # cutoff layer used in interaction block
     self.cutoff_network = cutoff_network(cutoff)
     # interaction block
     self.cfconv = CFConv(
         n_atom_basis,
         n_filters,
         n_atom_basis,
         self.filter_network,
         cutoff_network=self.cutoff_network,
         activation=shifted_softplus,
         normalize_filter=normalize_filter,
     )
     # dense layer
     self.dense = Dense(n_atom_basis,
                        n_atom_basis,
                        bias=True,
                        activation=None)
예제 #3
0
    def __init__(self,
                 n_atom_basis,
                 n_spatial_basis,
                 n_filters,
                 cutoff,
                 cutoff_network=CosineCutoff,
                 normalize_filter=False,
                 n_heads_weights=0,
                 n_heads_conv=0,
                 device=torch.device("cpu"),
                 hyperparams=[0, 0],
                 dropout=0,
                 exp=False):
        super(SchNetInteraction, self).__init__()

        #-# add extra dimensions here for the (dimension of attention embeddings)* num_heads
        self.n_heads_weights = n_heads_weights
        self.n_heads_conv = n_heads_conv
        self.device = device
        if n_heads_weights > 0:
            n = 1
        else:
            n = 0
        # filter block used in interaction block
        #n_spatial_basis corresponds to the number of gaussian expansions
        #n_atom_basis corresponds to the dimension of the atomic embedding corresponding to the projection of attention values
        self.filter_network = nn.Sequential(
            Dense(
                n_spatial_basis + n_atom_basis * n,
                n_filters,
                activation=shifted_softplus
            ),  #n_atom_basis could be changed to n_attention_heads*attention_dim at a later time
            Dense(n_filters, n_filters),
        )
        # cutoff layer used in interaction block
        self.cutoff_network = cutoff_network(cutoff)
        # interaction block
        self.cfconv = CFConv(n_atom_basis,
                             n_filters,
                             n_atom_basis,
                             self.filter_network,
                             cutoff_network=self.cutoff_network,
                             activation=shifted_softplus,
                             normalize_filter=normalize_filter,
                             n_heads_weights=self.n_heads_weights,
                             n_heads_conv=self.n_heads_conv,
                             device=self.device,
                             hyperparams=hyperparams,
                             dropout=dropout,
                             exp=False)
        # dense layer
        self.dense = Dense(n_atom_basis,
                           n_atom_basis,
                           bias=True,
                           activation=None)
예제 #4
0
    def __init__(
        self,
        n_atom_basis,
        n_hidden,
        activation=None,
        dropout_rate=0,
    ):
        super(Transition, self).__init__()

        # filter block used in interaction block
        ###self.layer_norm_in = nn.LayerNorm([n_atom_basis]) ###(input.size()[-1])

        self.layer_norm_in = nn.Sequential(
            nn.Dropout(dropout_rate),
            nn.LayerNorm([n_atom_basis]),  ###(input.size()[-1])
        )

        self.transition_network = nn.Sequential(
            Dense(n_atom_basis, n_hidden, activation=activation),
            Dense(n_hidden, n_atom_basis, activation=None),
        )
예제 #5
0
    def __init__(self, n_in, n_out, n_layers, activation, dropout):
        super().__init__(n_in, n_out, n_layers=n_layers, activation=activation)

        c_neurons = n_in
        self.n_neurons = []
        for i in range(n_layers):
            self.n_neurons.append(c_neurons)
            c_neurons = c_neurons // 2
        self.n_neurons.append(n_out)

        layers = []
        for i in range(n_layers - 1):
            layers.append(
                Dense(self.n_neurons[i],
                      self.n_neurons[i + 1],
                      activation=activation))
            layers.append(nn.Dropout(dropout))

        layers.append(
            Dense(self.n_neurons[-2], self.n_neurons[-1], activation=None))
        self.out_net = nn.Sequential(*layers)
예제 #6
0
    def __init__(
        self,
        n_atom_basis,
        n_gaussians,
        n_heads,
        n_hidden,
        cutoff,
        cutoff_network=CosineCutoff,
        gated_attention=True,
        activation=None,
        apply_transition_function=False,
    ):
        super(TDT_Interaction, self).__init__()

        # filter block used in interaction block
        self.filter_network = Dense(n_gaussians,
                                    n_atom_basis,
                                    bias=True,
                                    activation=None)
        ### Mark by Justin: Why not non-linear?

        #         self.filter_network = nn.Sequential(
        #             Dense(n_spatial_basis, n_hidden, activation=swish),
        #             Dense(n_hidden, n_atom_basis),
        #         )

        # cutoff layer used in interaction block
        self.cutoff_network = cutoff_network(cutoff)

        # Perform Ego-Attention:
        self.attention_network = EgoAttention(n_atom_basis,
                                              n_hidden=n_hidden,
                                              n_heads=n_heads,
                                              activation=activation)

        # For Message Passing (interaction block)
        self.mpnn = MPNN(
            self.
            filter_network,  ### Filter_Network is for positional embedding
            self.attention_network,
            cutoff_network=self.cutoff_network,
            activation=activation,
        )

        # Transition function:
        self.apply_transition_function = apply_transition_function
        if apply_transition_function:
            self.transition = Transition(n_atom_basis=n_atom_basis,
                                         n_hidden=n_hidden,
                                         activation=activation,
                                         dropout_rate=dropout_rate)
예제 #7
0
 def __init__(
     self,
     n_atom_basis,
     n_spatial_basis,
     n_filters,
     cutoff_network,
     cutoff,
     dropout,
     normalize_filter=False,
 ):
     super().__init__(
         n_atom_basis=n_atom_basis,
         n_spatial_basis=n_spatial_basis,
         n_filters=n_filters,
         cutoff_network=cutoff_network,
         cutoff=cutoff,
         normalize_filter=normalize_filter,
     )
     self.filter_network = nn.Sequential(
         Dense(
             n_spatial_basis,
             n_filters,
             activation=shifted_softplus,
         ),
         nn.Dropout(dropout),
         Dense(n_filters, n_filters),
     )
     self.dense = nn.Sequential(
         Dense(
             n_atom_basis,
             n_atom_basis,
             activation=shifted_softplus,
         ),
         nn.Dropout(dropout),
         Dense(n_atom_basis, n_atom_basis, activation=None, bias=True),
     )
예제 #8
0
def test_shape_dense(expanded_distances):
    out_shape = [*list(expanded_distances.shape)[:-1], 10]
    model = Dense(expanded_distances.shape[-1], out_shape[-1])
    inputs = [expanded_distances]
    assert_equal_shape(model, inputs, out_shape)
예제 #9
0
    def __init__(self,
                 n_atom_basis=128,
                 n_filters=128,
                 n_interactions=3,
                 cutoff=5.0,
                 n_gaussians=25,
                 normalize_filter=False,
                 coupled_interactions=False,
                 return_intermediate=False,
                 max_z=100,
                 cutoff_network=HardCutoff,
                 trainable_gaussians=False,
                 distance_expansion=None,
                 charged_systems=False,
                 use_noise=False,
                 noise_mean=0,
                 noise_std=1,
                 chargeEmbedding=True,
                 ownFeatures=False,
                 nFeatures=8,
                 finalFeature=None,
                 finalFeatureStart=7,
                 finalFeatureStop=8):
        super(SchNet, self).__init__()

        self.finalFeature = finalFeature
        self.finalFeatureStart = finalFeatureStart
        self.finalFeatureStop = finalFeatureStop
        self.chargeEmbedding = chargeEmbedding
        self.ownFeatures = ownFeatures
        self.n_atom_basis = n_atom_basis

        # make a lookup table to store embeddings for each element (up to atomic
        # number max_z) each of which is a vector of size n_atom_basis
        if chargeEmbedding and not ownFeatures:
            self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0)
        elif chargeEmbedding and ownFeatures:
            if nFeatures is None:
                raise NotImplementedError
            self.embedding = nn.Embedding(max_z,
                                          int(n_atom_basis / 2),
                                          padding_idx=0)
            self.denseEmbedding = Dense(nFeatures, int(n_atom_basis / 2))
        elif ownFeatures and not chargeEmbedding:
            if nFeatures is None:
                raise NotImplementedError
            self.denseEmbedding = Dense(nFeatures, n_atom_basis)
        else:
            raise NotImplementedError

        # layer for computing interatomic distances
        self.distances = AtomDistances()

        # layer for expanding interatomic distances in a basis
        if distance_expansion is None:
            self.distance_expansion = GaussianSmearing(
                0.0, cutoff, n_gaussians, trainable=trainable_gaussians)
        else:
            self.distance_expansion = distance_expansion

        # block for computing interaction
        if isinstance(n_filters, list):
            self.interactions = nn.ModuleList([
                SchNetInteraction(
                    n_atom_basis=n_atom_basis,
                    n_spatial_basis=n_gaussians,
                    n_filters=n_filters[i],
                    cutoff_network=cutoff_network,
                    cutoff=cutoff,
                    normalize_filter=normalize_filter,
                ) for i in range(n_interactions)
            ])

        elif coupled_interactions:
            # use the same SchNetInteraction instance (hence the same weights)
            self.interactions = nn.ModuleList([
                SchNetInteraction(
                    n_atom_basis=n_atom_basis,
                    n_spatial_basis=n_gaussians,
                    n_filters=n_filters,
                    cutoff_network=cutoff_network,
                    cutoff=cutoff,
                    normalize_filter=normalize_filter,
                )
            ] * n_interactions)
        else:
            # use one SchNetInteraction instance for each interaction
            self.interactions = nn.ModuleList([
                SchNetInteraction(
                    n_atom_basis=n_atom_basis,
                    n_spatial_basis=n_gaussians,
                    n_filters=n_filters,
                    cutoff_network=cutoff_network,
                    cutoff=cutoff,
                    normalize_filter=normalize_filter,
                ) for _ in range(n_interactions)
            ])

        # set attributes
        self.use_noise = use_noise
        self.noise_mean = noise_mean
        self.noise_std = noise_std
        self.return_intermediate = return_intermediate
        self.charged_systems = charged_systems
        if charged_systems:
            self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis))
            self.charge.data.normal_(0, 1.0 / n_atom_basis**0.5)
예제 #10
0
    def __init__(
        self,
        ### Newtork Hyper-parameters:
        n_atom_basis=128,
        n_gaussians=32,  ### 25
        n_heads=8,
        n_hidden=128,
        activation=swish,
        dropout_rate=0,

        ### Model Hyper-parameters:
        n_interactions=4,
        n_scales=1,
        cutoff=5.0,
        apply_transition_function=False,  ### If true, Apply Transition function as in Transformer
        use_act=True,  ### Adaptive Computation Time
        use_mcr=False,  ### Multiple-Channel Rep.
        return_intermediate=False,
        max_z=100,
        cutoff_network=CosineCutoff,
        trainable_gaussians=False,
        distance_expansion=None,
        charged_systems=False,
        if_cuda=True,
    ):
        super(TDTNet, self).__init__()

        self.n_atom_basis = n_atom_basis
        # make a lookup table to store embeddings for each element (up to atomic
        # number max_z) each of which is a vector of size n_atom_basis
        self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0)

        # layer for computing interatomic distances
        self.distances = AtomDistances(return_directions=True)

        # layer for expanding interatomic distances in a basis
        self.positional_embedding = Positional_Embedding(
            n_atom_basis=n_atom_basis,
            n_hidden=n_hidden,
            n_gaussians=n_gaussians,
            trainable_gaussians=trainable_gaussians,
            activation=activation,
            cutoff=cutoff,
            cutoff_network=cutoff_network,
            distance_expansion=None,
        )

        # block for computing interaction

        self.interaction_blocks = nn.ModuleList([
            TDT_Interaction(
                n_atom_basis=n_atom_basis,
                n_gaussians=n_gaussians,
                n_heads=n_heads,
                n_hidden=n_hidden,
                cutoff_network=cutoff_network,
                cutoff=cutoff,
                activation=activation,
                apply_transition_function=apply_transition_function,
            ) for _ in range(n_scales)
        ])

        ###
        ### For Time Embedding:
        ### Note by Justin: still some bugs here
        even_mask = torch.cat([
            torch.ones(n_atom_basis // 2, 1),
            torch.zeros(n_atom_basis // 2, 1)
        ],
                              dim=-1)
        even_mask = even_mask.reshape(1, n_atom_basis)
        period = torch.pow(
            10000,
            -2. * torch.arange(n_atom_basis // 2) / n_atom_basis).unsqueeze(-1)
        period = torch.cat([period, period], dim=-1)
        period = period.reshape(1, n_atom_basis)
        tt = torch.arange(n_interactions).reshape(n_interactions, 1)
        tt = tt * period  ### [n_interactions,n_atom_basis]
        self.time_embedding = torch.sin(tt) * even_mask + torch.cos(tt) * (
            1. - even_mask)
        if if_cuda:
            self.time_embedding = self.time_embedding.cuda()
        self.time_embedding_list = torch.split(
            self.time_embedding, 1, dim=0)  ### n_interactions*[1,n_atom_basis]
        ###print('debug: ', self.time_embedding)

        ### ACT:
        self.use_act = use_act
        if self.use_act and n_interactions > 1:
            self.act_blocks = nn.ModuleList([
                AdaptiveComputationTime(
                    n_atom_basis=n_atom_basis,
                    n_hidden=n_hidden,
                    activation=activation,
                    dropout_rate=dropout_rate,
                ) for _ in range(n_scales)
            ])

        ### MCR: Multiple-Channel Representation.
        self.use_mcr = use_mcr
        if self.use_mcr:
            assert (n_atom_basis %
                    n_scales == 0), "n_scales should divide-out n_atom_basis!"

            self.mcr_proj_blocks = nn.ModuleList([
                nn.Sequential(
                    Dense(n_atom_basis, n_hidden, activation=activation),
                    Dense(n_atom_basis,
                          n_atom_basis // n_scales,
                          activation=None),
                ) for _ in range(n_scales)
            ])

        #################
        # set attributes
        self.n_scales = n_scales
        self.use_act = use_act
        self.n_interactions = n_interactions

        self.return_intermediate = return_intermediate
        self.charged_systems = charged_systems
        if charged_systems:
            self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis))
            self.charge.data.normal_(0, 1.0 / n_atom_basis**0.5)