Beispiel #1
0
def train_reax(writelib=1000,
               print_step=10,
               step=500,
               opt=None,
               cons=None,
               lr=1.0e-4,
               convergence=0.97,
               lossConvergence=100.0,
               batch=50):
    rn = ReaxFF(libfile='ffield.json',
                direcs=direcs,
                dft='siesta',
                optword='nocoul',
                opt=None,
                cons=None,
                batch_size=batch,
                losFunc='n2',
                lambda_bd=10000.0,
                lambda_me=0.01,
                weight={
                    'al4': 2.0,
                    'others': 2.0
                },
                convergence=convergence,
                lossConvergence=lossConvergence
                )  # Loss Functon can be n2,abs,mse,huber

    # GradientDescentOptimizer AdamOptimizer AdagradOptimizer RMSPropOptimizer
    loss, accu, accMax, i, zpe = rn.run(learning_rate=lr,
                                        step=step,
                                        print_step=print_step,
                                        writelib=writelib)
    libstep = int(i - i % writelib)

    if i == libstep:
        libstep = libstep - writelib
    if libstep <= 0:
        ffd = 'ffield.json'
    else:
        ffd = 'ffield_' + str(libstep) + '.json'

    if loss == 0.0 and accu == 0.0:
        send_msg(
            '-  Warning: the loss is NaN, parameters from %s changed auomatically ...'
            % ffd)
        return 0.0, 1.0, 1.0, None, None, i
        # with open(ffd,'r') as fj:
        #     j = js.load(fj)
        #     ic = Init_Check(nanv=nanv)
        #     j['p'] = ic.auto(j['p'])
        #     ic.close()
        #with open('ffield.json','w') as fj:
        #     js.dump(j,fj,sort_keys=True,indent=2)

    p = rn.p_
    ME = rn.MolEnergy_

    rn.close()
    return loss, accu, accMax, p, ME, i
Beispiel #2
0
def z():
    opt = ['atomic', 'ovun5', 'Desi', 'Depi', 'Depp', 'Devdw', 'Dehb'],
    rn = ReaxFF(libfile='ffield',
                direcs=direcs,
                dft='siesta',
                atomic=True,
                optword='nocoul',
                opt=['atomic'],
                nn=False,
                cons=None,
                pkl=True,
                batch_size=batch,
                losFunc='n2',
                bo_penalty=10000.0)

    # tc.session(learning_rate=1.0e-4,method='AdamOptimizer')
    # GradientDescentOptimizer AdamOptimizer AdagradOptimizer RMSPropOptimizer
    rn.run(learning_rate=100, step=1000, print_step=10, writelib=1000)
    rn.close()
Beispiel #3
0
def r():
    rn = ReaxFF(libfile='ffield',
                direcs=direcs,
                dft='siesta',
                atomic=True,
                InitCheck=False,
                optword='nocoul',
                VariablesToOpt=vto,
                pkl=True,
                batch_size=batch,
                losFunc='n2',
                bo_penalty=10.0)

    # tc.session(learning_rate=1.0e-4,method='AdamOptimizer')
    # GradientDescentOptimizer AdamOptimizer AdagradOptimizer RMSPropOptimizer
    rn.run(
        learning_rate=1.0e-4,
        step=10000,
        method='AdamOptimizer',  # SGD
        print_step=10,
        writelib=1000)
    rn.close()