Example #1
0
mean, stddev = torch.tensor([-1202.6432]), torch.tensor([12.3304])

print(mean, stddev)
print(mean_forces, stddev_forces)

model = get_model(train_args,
                  atomref=None,
                  mean=mean,
                  stddev=stddev,
                  train_loader=train_loader,
                  parallelize=args.parallel)
data = DataBunch(train_loader,
                 val_loader,
                 collate_fn=schnetpack2.data.collate_atoms)

learn = Learner(data, model, model_dir=args.modelpath)

#learn.lr_find(start_lr=1e-6,end_lr=1e0, num_it=300, wd=wd)
#plot_recorder(learn.recorder, 'uncertainties_wd_{0}'.format(wd))

#print(hasattr(list(learn.model.children())[-3].weight, "dict"))
#pdb.set_trace()
try:
    if args.num_inducing_points:
        learn.model.load_state_dict(
            torch.load(
                os.path.join(
                    args.modelpath, "bulkVtrain3200" + "_induced_" +
                    str(sys.argv[3] + ".pth")))['model'])
    else:
        learn.model.load_state_dict(
print(mean_forces, stddev_forces)

model = get_model(train_args,
                  atomref=None,
                  mean=mean,
                  stddev=stddev,
                  train_loader=train_loader,
                  parallelize=args.parallel)
data = DataBunch(train_loader,
                 val_loader,
                 collate_fn=schnetpack2.data.collate_atoms)

#pdb.set_trace()
#print(hasattr(list(model.children())[-1].sigma_net[1].out_net[-1].weight, "dict"))

learn = Learner(data, model, model_dir=args.modelpath)

learn.split(split_sigma_net)

#learn.purge()
if args.sdr:
    learn.opt_func = Adam_sdr
else:
    learn.opt_func = Adam

mean, stddev = torch.tensor([-1202.6432]), torch.tensor([12.3304])

if args.uncertainty_forces:
    #    learn.loss_func = partial(NLLMSEloss_forces,mean=mean.cuda(), std=stddev.cuda(),kf=100.0,ke=1.0)#best before forces rescaling
    learn.loss_func = partial(NLLMSEloss_forces,
                              mean=mean.cuda(),
Example #3
0
if args.split_path is not None:
    copyfile(args.split_path, split_path)
    
train_loader = schnetpack2.custom.data.AtomsLoader(data_train, batch_size=args.batch_size, sampler=RandomSampler(data_train),
                                    num_workers=9*torch.cuda.device_count(), pin_memory=True)
val_loader = schnetpack2.custom.data.AtomsLoader(data_val, batch_size=args.batch_size, num_workers=9*torch.cuda.device_count(), pin_memory=True)
mean, stddev = train_loader.get_statistics('energy', False)

print(mean,stddev)
model = get_model(train_args, atomref=None, mean=torch.FloatTensor([mean]), stddev=torch.FloatTensor([stddev]),
                  train_loader=train_loader,
                  parallelize=args.parallel)

data = DataBunch( train_loader, val_loader,collate_fn=schnetpack2.data.collate_atoms)

learn = Learner(data, model, model_dir=args.modelpath)#,callback_fns=ShowGraph)#,callback_fns=SaveAllGrads

learn.opt_func = Adam
learn.loss_func = partial(MSEloss,kf=1,ke=0.1)

learn.metrics=[Emetric,Fmetric]

learn.fit_one_cycle(cyc_len=2, max_lr=3e-3,moms=(0.95, 0.85), div_factor=100.0, pct_start=0.3, wd=1e-7,no_grad_val=False)

learn.to_fp16(loss_scale=100, max_noskip=1000, dynamic=False, clip=None,flat_master=True)

torch.cuda.empty_cache()

#learn.lr_find(start_lr=1e-6,end_lr=1e-2,no_grad_val = False,num_it=63)
#learn.recorder.plot()
Example #4
0
    copyfile(args.split_path, split_path)
    
#from sklearn.model_selection import train_test_split
#train,test = train_test_split(df, test_size=0.20, random_state=42,stratify=df['Ebin'].values)
print(args.batch_size)
train_loader = schnetpack2.custom.data.AtomsLoader(data_train, batch_size=args.batch_size, sampler=RandomSampler(data_train),
                                    num_workers=0*torch.cuda.device_count(), pin_memory=True)#9*torch.cuda.device_count()
val_loader = schnetpack2.custom.data.AtomsLoader(data_val, batch_size=args.batch_size, num_workers=9*torch.cuda.device_count(), pin_memory=True)#9*torch.cuda.device_count()
mean, stddev = train_loader.get_statistics('energy', False)
#mean, stddev = torch.tensor([-1.5115]), torch.tensor([1.2643])
#stddev = 29.6958
print(mean,stddev)

model = get_model(train_args, atomref=None, mean=torch.FloatTensor([mean]), stddev=torch.FloatTensor([stddev]),
                  train_loader=train_loader,
                  parallelize=args.parallel)
data = DataBunch( train_loader, val_loader,collate_fn=schnetpack2.custom.data.collate_atoms)


learn = Learner(data, model, model_dir=args.modelpath)

learn.purge()
learn.opt_func = Adam
learn.loss_func = partial(MSEloss,kf=1.0,ke=0.1)
learn.metrics=[Emetric,Fmetric]
#learn.load('trainbulkVsurf128epochs')
torch.cuda.empty_cache()
print(sys.argv)
learn.fit_one_cycle(cyc_len=int(sys.argv[3]), max_lr=float(sys.argv[4]),moms=(0.95, 0.85), div_factor=150.0, pct_start=0.05, wd=1e-2,no_grad_val=False)
print(learn.save( sys.argv[1], return_path=True))
print(mean, stddev)
print(mean_forces, stddev_forces)

model = get_model(train_args,
                  atomref=None,
                  mean=mean,
                  stddev=stddev,
                  train_loader=train_loader,
                  parallelize=args.parallel)
data = DataBunch(train_loader,
                 val_loader,
                 collate_fn=schnetpack2.data.collate_atoms)

#print(hasattr(list(model.children())[-1].sigma_net[1].out_net[-1].weight, "dict"))

learn = Learner(data, model, model_dir=args.modelpath)

learn.split(split_sigma_net)

#learn.purge()
if args.sdr:
    learn.opt_func = Adam_sdr
else:
    learn.opt_func = Adam

mean, stddev = torch.tensor([-1202.6432]), torch.tensor([12.3304])
#mean, stddev = torch.tensor([-1202.6432,0]), torch.tensor([12.3304,1])

if args.uncertainty_forces:
    #    learn.loss_func = partial(NLLMSEloss_forces,mean=mean.cuda(), std=stddev.cuda(),kf=100.0,ke=1.0)#best before forces rescaling
    learn.loss_func = partial(NLLMSEloss_forces,
Example #6
0
    mean, stddev = torch.tensor([-1202.6432]), torch.tensor([12.3304])

print(mean, stddev)
print(mean_forces, stddev_forces)

model = get_model(train_args,
                  atomref=None,
                  mean=torch.FloatTensor([mean]),
                  stddev=torch.FloatTensor([stddev]),
                  train_loader=train_loader,
                  parallelize=args.parallel)
data = DataBunch(train_loader,
                 val_loader,
                 collate_fn=schnetpack2.data.collate_atoms)

learn = Learner(data, model, model_dir=args.modelpath)

#pdb.set_trace()
learn.opt_func = Adam
#learn.loss_func = partial(NLLMSEloss_forces, mean=mean.cuda(), std=stddev.cuda(), mean_forces=mean_forces.cuda(), std_forces=stddev_forces.cuda(), kf=0.001,ke=1)

if args.num_inducing_points:
    learn.loss_func = partial(
        NLLMSEloss,
        mean=mean.cuda(),
        std=stddev.cuda(),
        kf=0.5,
        ke=1.0,
        kgamma=0.0001)  #partial(MSEloss, kf=0.1,ke=1,kef=0)
else:
    learn.loss_func = partial(MSEloss, kf=0.1, ke=1, kef=0)
mean, stddev = train_loader.get_statistics('energy', False)
#mean, stddev = torch.tensor([-1.5115]), torch.tensor([1.2643])
#stddev = 29.6958
print(mean, stddev)

model = get_model(train_args,
                  atomref=None,
                  mean=torch.FloatTensor([mean]),
                  stddev=torch.FloatTensor([stddev]),
                  train_loader=train_loader,
                  parallelize=args.parallel)
data = DataBunch(train_loader,
                 val_loader,
                 collate_fn=schnetpack2.custom.data.collate_atoms)

learn = Learner(data, model, model_dir=args.modelpath)

learn.purge()
learn.opt_func = Adam
learn.loss_func = partial(MSEloss, kf=1.0, ke=0.1)
learn.metrics = [Emetric, Fmetric]
#learn.load('trainbulkVsurf128epochs')
import matplotlib.pyplot as plt
import numpy as np


def plot_recorder(recorder,
                  save_name,
                  skip_start: int = 10,
                  skip_end: int = 5):
    "Plot learning rate and losses, trimmed between `skip_start` and `skip_end`. Optionally plot and return min gradient"