def __init__( self, n_atom_basis=128, n_filters=128, n_interactions=3, cutoff=5.0, n_gaussians=25, normalize_filter=False, coupled_interactions=False, return_intermediate=False, max_z=100, cutoff_network=CosineCutoff, trainable_gaussians=False, distance_expansion=None, charged_systems=False, ): super(SchNet, self).__init__() self.n_atom_basis = n_atom_basis # make a lookup table to store embeddings for each element (up to atomic # number max_z) each of which is a vector of size n_atom_basis self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0) # layer for computing interatomic distances self.distances = AtomDistances() # layer for expanding interatomic distances in a basis if distance_expansion is None: self.distance_expansion = GaussianSmearing( 0.0, cutoff, n_gaussians, trainable=trainable_gaussians) else: self.distance_expansion = distance_expansion # block for computing interaction if coupled_interactions: # use the same SchNetInteraction instance (hence the same weights) self.interactions = nn.ModuleList([ SchNetInteraction( n_atom_basis=n_atom_basis, n_spatial_basis=n_gaussians, n_filters=n_filters, cutoff_network=cutoff_network, cutoff=cutoff, normalize_filter=normalize_filter, ) ] * n_interactions) else: # use one SchNetInteraction instance for each interaction self.interactions = nn.ModuleList([ SchNetInteraction( n_atom_basis=n_atom_basis, n_spatial_basis=n_gaussians, n_filters=n_filters, cutoff_network=cutoff_network, cutoff=cutoff, normalize_filter=normalize_filter, ) for _ in range(n_interactions) ]) # set attributes self.return_intermediate = return_intermediate self.charged_systems = charged_systems if charged_systems: self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis)) self.charge.data.normal_(0, 1.0 / n_atom_basis**0.5)
def __init__(self, n_atom_basis=128, n_filters=128, n_interactions=3, cutoff=5.0, n_gaussians=25, normalize_filter=False, coupled_interactions=False, return_intermediate=False, max_z=100, cutoff_network=HardCutoff, trainable_gaussians=False, distance_expansion=None, charged_systems=False, use_noise=False, noise_mean=0, noise_std=1, chargeEmbedding=True, ownFeatures=False, nFeatures=8, finalFeature=None, finalFeatureStart=7, finalFeatureStop=8): super(SchNet, self).__init__() self.finalFeature = finalFeature self.finalFeatureStart = finalFeatureStart self.finalFeatureStop = finalFeatureStop self.chargeEmbedding = chargeEmbedding self.ownFeatures = ownFeatures self.n_atom_basis = n_atom_basis # make a lookup table to store embeddings for each element (up to atomic # number max_z) each of which is a vector of size n_atom_basis if chargeEmbedding and not ownFeatures: self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0) elif chargeEmbedding and ownFeatures: if nFeatures is None: raise NotImplementedError self.embedding = nn.Embedding(max_z, int(n_atom_basis / 2), padding_idx=0) self.denseEmbedding = Dense(nFeatures, int(n_atom_basis / 2)) elif ownFeatures and not chargeEmbedding: if nFeatures is None: raise NotImplementedError self.denseEmbedding = Dense(nFeatures, n_atom_basis) else: raise NotImplementedError # layer for computing interatomic distances self.distances = AtomDistances() # layer for expanding interatomic distances in a basis if distance_expansion is None: self.distance_expansion = GaussianSmearing( 0.0, cutoff, n_gaussians, trainable=trainable_gaussians) else: self.distance_expansion = distance_expansion # block for computing interaction if isinstance(n_filters, list): self.interactions = nn.ModuleList([ SchNetInteraction( n_atom_basis=n_atom_basis, n_spatial_basis=n_gaussians, n_filters=n_filters[i], cutoff_network=cutoff_network, cutoff=cutoff, normalize_filter=normalize_filter, ) for i in range(n_interactions) ]) elif coupled_interactions: # use the same SchNetInteraction instance (hence the same weights) self.interactions = nn.ModuleList([ SchNetInteraction( n_atom_basis=n_atom_basis, n_spatial_basis=n_gaussians, n_filters=n_filters, cutoff_network=cutoff_network, cutoff=cutoff, normalize_filter=normalize_filter, ) ] * n_interactions) else: # use one SchNetInteraction instance for each interaction self.interactions = nn.ModuleList([ SchNetInteraction( n_atom_basis=n_atom_basis, n_spatial_basis=n_gaussians, n_filters=n_filters, cutoff_network=cutoff_network, cutoff=cutoff, normalize_filter=normalize_filter, ) for _ in range(n_interactions) ]) # set attributes self.use_noise = use_noise self.noise_mean = noise_mean self.noise_std = noise_std self.return_intermediate = return_intermediate self.charged_systems = charged_systems if charged_systems: self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis)) self.charge.data.normal_(0, 1.0 / n_atom_basis**0.5)
def __init__( self, ### Newtork Hyper-parameters: n_atom_basis=128, n_gaussians=32, ### 25 n_heads=8, n_hidden=128, activation=swish, dropout_rate=0, ### Model Hyper-parameters: n_interactions=4, n_scales=1, cutoff=5.0, apply_transition_function=False, ### If true, Apply Transition function as in Transformer use_act=True, ### Adaptive Computation Time use_mcr=False, ### Multiple-Channel Rep. return_intermediate=False, max_z=100, cutoff_network=CosineCutoff, trainable_gaussians=False, distance_expansion=None, charged_systems=False, if_cuda=True, ): super(TDTNet, self).__init__() self.n_atom_basis = n_atom_basis # make a lookup table to store embeddings for each element (up to atomic # number max_z) each of which is a vector of size n_atom_basis self.embedding = nn.Embedding(max_z, n_atom_basis, padding_idx=0) # layer for computing interatomic distances self.distances = AtomDistances(return_directions=True) # layer for expanding interatomic distances in a basis self.positional_embedding = Positional_Embedding( n_atom_basis=n_atom_basis, n_hidden=n_hidden, n_gaussians=n_gaussians, trainable_gaussians=trainable_gaussians, activation=activation, cutoff=cutoff, cutoff_network=cutoff_network, distance_expansion=None, ) # block for computing interaction self.interaction_blocks = nn.ModuleList([ TDT_Interaction( n_atom_basis=n_atom_basis, n_gaussians=n_gaussians, n_heads=n_heads, n_hidden=n_hidden, cutoff_network=cutoff_network, cutoff=cutoff, activation=activation, apply_transition_function=apply_transition_function, ) for _ in range(n_scales) ]) ### ### For Time Embedding: ### Note by Justin: still some bugs here even_mask = torch.cat([ torch.ones(n_atom_basis // 2, 1), torch.zeros(n_atom_basis // 2, 1) ], dim=-1) even_mask = even_mask.reshape(1, n_atom_basis) period = torch.pow( 10000, -2. * torch.arange(n_atom_basis // 2) / n_atom_basis).unsqueeze(-1) period = torch.cat([period, period], dim=-1) period = period.reshape(1, n_atom_basis) tt = torch.arange(n_interactions).reshape(n_interactions, 1) tt = tt * period ### [n_interactions,n_atom_basis] self.time_embedding = torch.sin(tt) * even_mask + torch.cos(tt) * ( 1. - even_mask) if if_cuda: self.time_embedding = self.time_embedding.cuda() self.time_embedding_list = torch.split( self.time_embedding, 1, dim=0) ### n_interactions*[1,n_atom_basis] ###print('debug: ', self.time_embedding) ### ACT: self.use_act = use_act if self.use_act and n_interactions > 1: self.act_blocks = nn.ModuleList([ AdaptiveComputationTime( n_atom_basis=n_atom_basis, n_hidden=n_hidden, activation=activation, dropout_rate=dropout_rate, ) for _ in range(n_scales) ]) ### MCR: Multiple-Channel Representation. self.use_mcr = use_mcr if self.use_mcr: assert (n_atom_basis % n_scales == 0), "n_scales should divide-out n_atom_basis!" self.mcr_proj_blocks = nn.ModuleList([ nn.Sequential( Dense(n_atom_basis, n_hidden, activation=activation), Dense(n_atom_basis, n_atom_basis // n_scales, activation=None), ) for _ in range(n_scales) ]) ################# # set attributes self.n_scales = n_scales self.use_act = use_act self.n_interactions = n_interactions self.return_intermediate = return_intermediate self.charged_systems = charged_systems if charged_systems: self.charge = nn.Parameter(torch.Tensor(1, n_atom_basis)) self.charge.data.normal_(0, 1.0 / n_atom_basis**0.5)