def run(step=50000, convergence=0.99, loss_conver=20.0, writelib=1000, opt=None, cons=['val', 'vale', 'lp3', 'cutoff'], print_step=10, lr=1.0e-4, maxcycle=20, batch=50): accu, loss = 0.0, 100 c = 0 while accu < convergence and c < maxcycle: loss, accu, accMax, p, zpe, i = train_reax(step=step, opt=opt, cons=cons, convergence=convergence, lr=lr, writelib=writelib, print_step=print_step) if loss > loss_conver and accu > convergence: convergence = accu + 0.0003 # system('cp ffield.json ffield%dt%de%df%d.json' %(int(loss),messages,ef,fm)) c += 1 if loss > 0.0: send_msg('- Convergence reached, the loss %7.4f and accuracy %7.4f.' % (loss, accu))
def train_reax(writelib=1000, print_step=10, step=500, opt=None, cons=None, lr=1.0e-4, convergence=0.97, lossConvergence=100.0, batch=50): rn = ReaxFF(libfile='ffield.json', direcs=direcs, dft='siesta', optword='nocoul', opt=None, cons=None, batch_size=batch, losFunc='n2', lambda_bd=10000.0, lambda_me=0.01, weight={ 'al4': 2.0, 'others': 2.0 }, convergence=convergence, lossConvergence=lossConvergence ) # Loss Functon can be n2,abs,mse,huber # GradientDescentOptimizer AdamOptimizer AdagradOptimizer RMSPropOptimizer loss, accu, accMax, i, zpe = rn.run(learning_rate=lr, step=step, print_step=print_step, writelib=writelib) libstep = int(i - i % writelib) if i == libstep: libstep = libstep - writelib if libstep <= 0: ffd = 'ffield.json' else: ffd = 'ffield_' + str(libstep) + '.json' if loss == 0.0 and accu == 0.0: send_msg( '- Warning: the loss is NaN, parameters from %s changed auomatically ...' % ffd) return 0.0, 1.0, 1.0, None, None, i # with open(ffd,'r') as fj: # j = js.load(fj) # ic = Init_Check(nanv=nanv) # j['p'] = ic.auto(j['p']) # ic.close() #with open('ffield.json','w') as fj: # js.dump(j,fj,sort_keys=True,indent=2) p = rn.p_ ME = rn.MolEnergy_ rn.close() return loss, accu, accMax, p, ME, i