Exemplo n.º 1
0
def make_schnet_model(args):
    label = args.label
    negative_dr = args.derivative is not None
    atomrefs = None
    #if hasattr(args,'atomrefs') and  args.atomrefs is not None:
    #    atomrefs = self.read_atomrefs(args.atomrefs,args.max_z)
    reps = rep.SchNet(n_atom_basis=args.num_filters,
                      n_filters=args.num_filters,
                      n_interactions=args.num_interactions,
                      cutoff=args.cutoff,
                      n_gaussians=args.num_gaussians,
                      max_z=args.max_z,
                      cutoff_network=CosineCutoff)
    output = spk.atomistic.Atomwise(n_in=reps.n_atom_basis,
                                    aggregation_mode='sum',
                                    property=label,
                                    derivative=args.derivative,
                                    negative_dr=negative_dr,
                                    mean=None,
                                    stddev=None,
                                    atomref=atomrefs)
    model = atm.AtomisticModel(reps, output)
    total_params = sum(p.numel() for p in model.parameters()
                       if p.requires_grad)
    print('Number of trainable parameters {}'.format(total_params))
    return model
Exemplo n.º 2
0
import torch
import torch.nn.functional as F
from torch.optim import Adam

import schnetpack as spk
import schnetpack.atomistic as atm
import schnetpack.representation as rep
from schnetpack.datasets import *

# load qm9 dataset and download if necessary
data = QM9("qm9.db", properties=[QM9.U0])

# split in train and val
train, val, test = data.create_splits(100000, 10000)
loader = spk.data.AtomsLoader(train, batch_size=100, num_workers=4)
val_loader = spk.data.AtomsLoader(val)

# create model
reps = rep.SchNet()
output = atm.Atomwise()
model = atm.AtomisticModel(reps, output)

# create trainergit add
opt = Adam(model.parameters(), lr=1e-4)
loss = lambda b, p: F.mse_loss(p["y"], b[QM9.U0])
trainer = spk.train.Trainer("output/", model, loss, opt, loader, val_loader)

# start training
trainer.train(torch.device("cpu"))
Exemplo n.º 3
0

if torch.cuda.is_available():
    dev = "cuda"
else:
    dev = "cpu"
print('device is %s\n'%dev)
#hyperparameters correspond to a softmax in the attention layers, and also
#normalization by the dimension of the square root of the dimension of attention
#NOTE:during testing on ethanol, energy predictions were lower without softmax and division
#setting to 0 means the attention network does not use this hyperparameter
#setting to 1 means the hyperparater is used
hyperparams = [softmax,division] #[softmax,division by sqrt(dim_k)
print(dropout, 'dropout')
# layers_w, layers_c
reps = rep.SchNet(n_heads_weights=layers_w,n_heads_conv=layers_c,device = torch.device(dev),\
        hyperparams=hyperparams,dropout = dropout,exp=exp,n_interactions=3, normalize_filter = True)
# -----------------------#

#
output = schnetpack.atomistic.Atomwise(n_in=reps.n_atom_basis,aggregation_mode='avg')
model = schnetpack.atomistic.AtomisticModel(reps, output)

loss = lambda b, p: F.l1_loss(p["y"], b[QM9.U0])#mse_loss, l1_loss
numepoch = 10000
numepochepoch = 10
for epoch in range(numepoch):
#    if epoch %2 ==0:
#        v = val_loader
#        vp = 'primary'
#    else:
#        v = val2_loader