def train_data(images, setup_only=False): label = 'nodeplot_test/calc' train_images = images calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=(5, 5)), label=label, cores=1) loss = LossFunction(convergence={'energy_rmse': 0.02, 'force_rmse': 0.02}) calc.model.lossfunction = loss if not setup_only: calc.train(images=train_images, ) for image in train_images: print "energy =", calc.get_potential_energy(image) print "forces =", calc.get_forces(image) else: images = hash_images(train_images) calc.descriptor.calculate_fingerprints(images=images, log=calc.log, cores=1, calculate_derivatives=False) calc.model.fit(trainingimages=images, descriptor=calc.descriptor, log=calc.log, cores=1, only_setup=True) return calc
def test_gaussian(): label = 'noforce_test' if not os.path.exists(label): os.mkdir(label) print('Generating data.') images = generate_data(10) print('Training Gaussian-neural network.') calc = Amp(descriptor=Gaussian(), label=os.path.join(label, 'Gaussian'), regression=NeuralNetwork(hiddenlayers=(5, 5))) calc.train(images, force_goal=None)
def calc_train_images(images, HL, E_conv, f_conv, f_coeff, ncore): Hidden_Layer=tuple(HL) print("Hidden Layer: {}".format(Hidden_Layer)) print("Energy convergence: {}".format(E_conv)) cores={'localhost':ncore} # 'localhost' depress SSH, communication between nodes calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=Hidden_Layer), cores=cores) if f_conv <= 0.0: convergence={'energy_rmse': E_conv} else: convergence={'energy_rmse': E_conv, 'force_rmse':f_conv} calc.model.lossfunction = LossFunction(convergence=convergence, force_coefficient=f_coeff) #calc.model.lossfunction = LossFunction(force_coefficient=-0.1) calc.train(images=images, overwrite=True) return
def train_test(): label = 'train_test/calc' train_images = generate_data(2) calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=(3, 3)), label=label, cores=1) loss = LossFunction(convergence={'energy_rmse': 0.02, 'force_rmse': 0.02}) calc.model.lossfunction = loss calc.train(images=train_images,) for image in train_images: print "energy =", calc.get_potential_energy(image) print "forces =", calc.get_forces(image)
def test(): "FingerprintPlot test.""" generate_data(2, filename='fpplot-training.traj') calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(), label='fpplot-test' ) calc.model.lossfunction = LossFunction(convergence={'energy_rmse': 1.00, 'force_rmse': 1.00}) calc.train(images='fpplot-training.traj') images = ase.io.Trajectory('fpplot-training.traj') fpplot = FingerprintPlot(calc) fpplot(images) fpplot(images, overlay=images[0]) fpplot(images, overlay=[images[1][2], images[0][-1]])
def calc_train_images(images, HL, E_conv, f_conv, ncore): Hidden_Layer = tuple(HL) print("Hidden Layer: {}".format(Hidden_Layer)) print("Energy convergence: {}".format(E_conv)) calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=Hidden_Layer), cores=ncore) if f_conv <= 0.0: calc.model.lossfunction = LossFunction( convergence={'energy_rmse': E_conv}) else: calc.model.lossfunction = LossFunction(convergence={ 'energy_rmse': E_conv, 'force_rmse': f_conv }) #calc.model.lossfunction = LossFunction(force_coefficient=-0.1) calc.train(images=images, overwrite=True) return
def test_none(): label = 'force_test' if not os.path.exists(label): os.mkdir(label) print('Generating data.') all_images = generate_data(4) train_images, test_images = randomize_images(all_images) print('Training none-neural network.') calc1 = Amp(descriptor=None, label=os.path.join(label, 'none'), regression=NeuralNetwork(hiddenlayers=(5, 5))) calc1.train(train_images, energy_goal=0.01, force_goal=0.05, global_search=None) print('Testing none-neural network.') energies1 = [] for image in all_images: energies1.append(calc1.get_potential_energy(atoms=image)) print('Verify making new calc works.') params = calc1.todict() calc2 = Amp(**params) energies2 = [] for image in all_images: energies2.append(calc2.get_potential_energy(atoms=image)) assert energies1 == energies2 print('Verifying can move an atom and get new energy.') image = all_images[0] image.set_calculator(calc2) e1 = image.get_potential_energy(apply_constraint=False) f1 = image.get_forces(apply_constraint=False) image[0].x += 0.5 # perturb e2 = image.get_potential_energy(apply_constraint=False) f2 = image.get_forces(apply_constraint=False) assert e1 != e2 assert not (f1 == f2).all()
def calc_train_images(images, HL, E_conv, f_conv, f_coeff, ncore, amp_pot=None): Hidden_Layer=tuple(HL) print("Hidden Layer: {}".format(Hidden_Layer)) print("Energy convergence: {}".format(E_conv)) cores={'localhost':ncore} # 'localhost' depress SSH, communication between nodes ### load "amp.amp" if amp_pot: calc = Amp.load(amp_pot) calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=Hidden_Layer), cores=cores) ### Global Search in Param Space Annealer(calc=calc, images=images, Tmax=20, Tmin=1, steps=4000) if f_conv <= 0.0: E_maxresid = E_conv*3 #convergence={'energy_rmse': E_conv} convergence={'energy_rmse': E_conv, 'energy_maxresid': E_maxresid} else: convergence={'energy_rmse': E_conv, 'force_rmse':f_conv} calc.model.lossfunction = LossFunction(convergence=convergence, force_coefficient=f_coeff) # setting calc.train(images=images, overwrite=True) return
def train_test(): """Gaussian/tflow train test.""" perform, reason = check_perform() if not perform: print('Skipping this test because {}.'.format(reason)) return from amp.model.tflow import NeuralNetwork label = 'train_test/calc' train_images = generate_data(2) convergence = {'energy_rmse': 0.02, 'force_rmse': 0.02} calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=(3, 3), convergenceCriteria=convergence), label=label, cores=1) calc.train(images=train_images, ) for image in train_images: print("energy =", calc.get_potential_energy(image)) print("forces =", calc.get_forces(image))
def train_test(): """Gaussian/Neural train test.""" label = 'train_test/calc' train_images = generate_data(2) calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=(3, 3)), label=label, cores=1) loss = LossFunction(convergence={'energy_rmse': 0.02, 'force_rmse': 0.03}) calc.model.lossfunction = loss calc.train(images=train_images, ) for image in train_images: print("energy = %s" % str(calc.get_potential_energy(image))) print("forces = %s" % str(calc.get_forces(image))) # Test that we can re-load this calculator and call it again. del calc calc2 = Amp.load(label + '.amp') for image in train_images: print("energy = %s" % str(calc2.get_potential_energy(image))) print("forces = %s" % str(calc2.get_forces(image)))
# -*- coding: utf-8 -*- """ Created on Sun Apr 12 21:54:34 2020 @author: srava """ from ase.io import read from ase.io.trajectory import Trajectory import numpy as np from ase.calculators.emt import EMT from amp import Amp from amp.descriptor.gaussian import Gaussian from amp.utilities import hash_images from amp.model.neuralnetwork import NeuralNetwork import matplotlib.pyplot as plt calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(), label='calc') calc.model.lossfunction.parameters['convergence'].update( {'energy_rmse': 0.05,}) calc.train(images='training_data.traj')
def test(): pwd = os.getcwd() os.mkdir(os.path.join(pwd, 'consistnone')) images = generate_images() count = 0 for global_search in [None, 'SA']: for fortran in [False, True]: for extend_variables in [False, True]: for data_format in ['db', 'json']: for save_memory in [False]: for cores in range(1, 7): string = 'consistnone/%s-%s-%s-%s-%s-%i' label = string % (global_search, fortran, extend_variables, data_format, save_memory, cores) if global_search is 'SA': global_search = \ SimulatedAnnealing(temperature=10, steps=5) calc = Amp(descriptor=None, regression=NeuralNetwork( hiddenlayers=(2, 1), activation='tanh', weights=weights, scalings=scalings, ), fortran=fortran, label=label) calc.train(images=images, energy_goal=10.**10., force_goal=10.**10., cores=cores, data_format=data_format, save_memory=save_memory, global_search=global_search, extend_variables=extend_variables) if count == 0: reference_cost_function = calc.cost_function reference_energy_rmse = \ calc.energy_per_atom_rmse reference_force_rmse = calc.force_rmse ref_cost_fxn_variable_derivatives = \ calc.der_variables_cost_function else: assert (abs(calc.cost_function - reference_cost_function) < 10.**(-10.)), \ '''Cost function value for %r fortran, %r data format, %r save_memory, and %i cores is not consistent with the value of python version on single core.''' % (fortran, data_format, save_memory, cores) assert (abs(calc.energy_per_atom_rmse - reference_energy_rmse) < 10.**(-10.)), \ '''Energy rmse value for %r fortran, %r data format, %r save_memory, and %i cores is not consistent with the value of python version on single core.''' % (fortran, data_format, save_memory, cores) assert (abs(calc.force_rmse - reference_force_rmse) < 10.**(-10.)), \ '''Force rmse value for %r fortran, %r data format, %r save_memory, and %i cores is not consistent with the value of python version on single core.''' % (fortran, data_format, save_memory, cores) for _ in range( len(ref_cost_fxn_variable_derivatives)): assert (calc.der_variables_cost_function[_] - ref_cost_fxn_variable_derivatives[_] < 10.**(-10.)) '''Derivative of the cost function for %r fortran, %r data format, %r save_memory, and %i cores is not consistent with the value of python version on single core. ''' % (fortran, data_format, save_memory, cores) count = count + 1
#!/usr/bin/env python from amp import Amp from amp.descriptor import Behler from amp.regression import NeuralNetwork calc = Amp(label="./", descriptor=Behler(cutoff=6.0), regression=NeuralNetwork(hiddenlayers=(2, 16))) calc.train("../train.db", cores=12, force_goal=None, extend_variables=False)
def test(): """Gaussian/Neural numeric-analytic consistency.""" images = generate_data() regressor = Regressor(optimizer='BFGS') _G = make_symmetry_functions(type='G2', etas=[0.05, 5.], elements=['Cu', 'Pt']) _G += make_symmetry_functions(type='G4', etas=[0.005], zetas=[1., 4.], gammas=[1.], elements=['Cu', 'Pt']) Gs = {'Cu': _G, 'Pt': _G} calc = Amp(descriptor=Gaussian(Gs=Gs), model=NeuralNetwork( hiddenlayers=(2, 1), regressor=regressor, randomseed=42, ), cores=1) step = 0 for d in [None, 0.00001]: for fortran in [True, False]: for cores in [1, 2]: step += 1 label = \ 'numeric_analytic_test/analytic-%s-%i' % (fortran, cores) \ if d is None \ else 'numeric_analytic_test/numeric-%s-%i' \ % (fortran, cores) print(label) loss = LossFunction(convergence={ 'energy_rmse': 10**10, 'force_rmse': 10**10 }, d=d) calc.set_label(label) calc.dblabel = 'numeric_analytic_test/analytic-True-1' calc.model.lossfunction = loss calc.descriptor.fortran = fortran calc.model.fortran = fortran calc.cores = cores calc.train(images=images, ) if step == 1: ref_energies = [] ref_forces = [] for image in images: ref_energies += [calc.get_potential_energy(image)] ref_forces += [calc.get_forces(image)] ref_dloss_dparameters = \ calc.model.lossfunction.dloss_dparameters else: energies = [] forces = [] for image in images: energies += [calc.get_potential_energy(image)] forces += [calc.get_forces(image)] dloss_dparameters = \ calc.model.lossfunction.dloss_dparameters for image_no in range(2): diff = abs(energies[image_no] - ref_energies[image_no]) assert (diff < 10.**(-13.)), \ 'The calculated value of energy of image %i is ' \ 'wrong!' % (image_no + 1) for atom_no in range(len(images[0])): for i in range(3): diff = abs(forces[image_no][atom_no][i] - ref_forces[image_no][atom_no][i]) assert (diff < 10.**(-10.)), \ 'The calculated %i force of atom %i of ' \ 'image %i is wrong!' \ % (i, atom_no, image_no + 1) # Checks analytical and numerical dloss_dparameters for _ in range(len(ref_dloss_dparameters)): diff = abs(dloss_dparameters[_] - ref_dloss_dparameters[_]) assert(diff < 10 ** (-10.)), \ 'The calculated value of loss function ' \ 'derivative is wrong!' # Checks analytical and numerical forces forces = [] for image in images: image.set_calculator(calc) forces += [calc.calculate_numerical_forces(image, d=d)] for atom_no in range(len(images[0])): for i in range(3): diff = abs(forces[image_no][atom_no][i] - ref_forces[image_no][atom_no][i]) print('{:3d} {:1d} {:7.1e}'.format(atom_no, i, diff)) assert (diff < 10.**(-6.)), \ 'The calculated %i force of atom %i of ' \ 'image %i is wrong! (Diff = %f)' \ % (i, atom_no, image_no + 1, diff)
from amp import Amp from amp.descriptor.gaussian import Gaussian from amp.model.neuralnetwork import NeuralNetwork from amp.model import LossFunction from amp import analysis import ase from ase import io import sys calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(), label="calc") calc.model.lossfunction = LossFunction(convergence={ 'energy_rmse': 0.02, 'force_rmse': 0.02 }) images = ase.io.read("water.extxyz", ":") # images = ase.io.read("../datasets/COCu/COCu_pbc_500.traj", ":") IMAGES = [] for i in range(100): IMAGES.append(images[i]) calc.train(IMAGES) # analysis.plot_parity("calc.amp", IMAGES, overwrite=True)
def train_amp(baseframe=200, traj='ethane.traj', convergence={ 'energy_rmse': 0.25, 'force_rmse': 0.5 }, elements=['C', 'H', 'O'], cores=4): """Gaussian/tflow train test.""" p = ple() label = 'amp' all_images = Trajectory(traj) nimg, mean_e = get_mean_energy(all_images) G = make_symmetry_functions(elements=elements, type='G2', etas=np.logspace(np.log10(0.05), np.log10(5.), num=4)) G += make_symmetry_functions(elements=elements, type='G5', etas=[0.005], zetas=[1., 4.], gammas=[+1., -1.]) G = {element: G for element in elements} # Gs=G if not isfile('amp.amp'): # print('\nset up calculator ...\n') calc = Amp(descriptor=Gaussian(mode='atom-centered', Gs=G), model=NeuralNetwork(hiddenlayers=(1024, 1024, 1024, 512, 512, 256, 256, 256, 256, 128, 128), convergenceCriteria=convergence, activation='tanh', energy_coefficient=1.0, force_coefficient=None, optimizationMethod='ADAM', parameters={'energyMeanScale': mean_e}, maxTrainingEpochs=100000), label=label, cores=cores) # 'l-BFGS-b' or 'ADAM' trained_images = [all_images[j] for j in range(0, baseframe)] calc.train(overwrite=True, images=trained_images) del calc else: calc = Amp.load('amp.amp') calc.model.parameters['convergence'] = convergence calc.model.lossfunction = LossFunction(convergence=convergence) trained_images = [all_images[j] for j in range(0, baseframe)] calc.train(overwrite=True, images=trained_images) del calc edfts, eamps, eamps_ = [], [], [] dolabel = True basestep = int(baseframe / tframe) system('epstopdf energies.eps') p.scatter(x, edft, eamp, eamp_, dolabel=dolabel) p.plot() plot_energies(edfts, eamps, eamp_=eamps_) system('epstopdf energies_scatter.eps')
images = [atoms] g2_etas = [0.005] g2_rs_s = [0] * 4 g4_etas = [0.005] g4_zetas = [1., 4.] g4_gammas = [1., -1.] cutoff = 4 make_amp_descriptors_simple_nn(images, g2_etas, g2_rs_s, g4_etas, g4_zetas, g4_gammas, cutoff) G = make_symmetry_functions(elements=elements, type='G2', etas=g2_etas) #Add Rs parameter (0.0 for default) to comply with my version of AMP #for g in G: # g['Rs'] = 0.0 G += make_symmetry_functions(elements=elements, type='G4', etas=g4_etas, zetas=g4_zetas, gammas=g4_gammas) calc = Amp(descriptor=Gaussian(Gs=G, cutoff=4.), cores=ncores, model=NeuralNetwork(hiddenlayers=hiddenlayers)) calc.model.lossfunction = LossFunction(convergence=convergence, force_coefficient=0.001) calc.train(images=images)
#!/usr/bin/env python from amp import Amp from amp.descriptor import Behler from amp.regression import NeuralNetwork calc = Amp(label="./", descriptor=Behler(cutoff=6.5), regression=NeuralNetwork(hiddenlayers=(2, 7))) calc.train("../train.db", cores=16, energy_goal=0.0005, force_goal=None, extend_variables=False)
{"type":"G2", "element":"Pd", "eta":400., "Rs":4.0}, {"type":"G2", "element":"Pd", "eta":400., "Rs":5.0}, {"type":"G2", "element":"Pd", "eta":400., "Rs":6.0}, {"type":"G4", "elements":["Pd", "Pd"],"eta":0.005, "gamma":-1.0, "zeta":100.0, "theta_s":-1.0}, {"type":"G4", "elements":["Pd", "Pd"],"eta":0.005, "gamma":-1.0, "zeta":100.0, "theta_s":-1.5}, {"type":"G4", "elements":["Pd", "Pd"],"eta":0.005, "gamma":-1.0, "zeta":100.0, "theta_s":-1.75}, {"type":"G4", "elements":["Pd", "Pd"],"eta":0.005, "gamma":-1.0, "zeta":100.0, "theta_s":-2.0}, {"type":"G4", "elements":["Pd", "Pd"],"eta":0.005, "gamma":-1.0, "zeta":300.0, "theta_s":-2.15}, {"type":"G4", "elements":["Pd", "Pd"],"eta":0.005, "gamma":-1.0, "zeta":300.0, "theta_s":-2.25}, {"type":"G4", "elements":["Pd", "Pd"],"eta":0.005, "gamma":1.0, "zeta":160.0, "theta_s":0.0}, {"type":"G4", "elements":["Pd", "Pd"],"eta":0.005, "gamma":1.0, "zeta":320.0, "theta_s":0.25}, {"type":"G4", "elements":["Pd", "Pd"],"eta":0.005, "gamma":1.0, "zeta":640.0, "theta_s":0.35}, {"type":"G4", "elements":["Pd", "Pd"],"eta":0.005, "gamma":1.0, "zeta":160.0, "theta_s":0.5}, {"type":"G4", "elements":["Pd", "Pd"],"eta":0.005, "gamma":1.0, "zeta":160.0, "theta_s":0.75}, ]} calc = Amp(descriptor=Gaussian(Gs=Gs, cutoff=Cosine(6.0) ), cores=24, model=NeuralNetwork(hiddenlayers=(50,), activation='sigmoid', maxTrainingEpochs=20000, energy_coefficient=1.0, force_coefficient=0.1, optimizationMethod='l-BFGS-b', convergenceCriteria={'energy_rmse': 0.05, 'force_rmse': 0.05})) calc.train(images='train.traj',overwrite=True)
def test(): pwd = os.getcwd() os.mkdir(os.path.join(pwd, 'rotation_test')) for descriptor in [Gaussian(), Bispectrum(jmax=2.), Zernike(nmax=5)]: pwd = os.getcwd() os.mkdir(os.path.join(pwd, 'rotation_test/before_rot')) os.mkdir(os.path.join(pwd, 'rotation_test/after_rot')) # Non-rotated atomic configuration atoms = Atoms([ Atom('Pt', (0., 0., 0.)), Atom('Pt', (0., 0., 1.)), Atom('Pt', (0., 2., 1.)) ]) atoms.set_constraint(FixAtoms(indices=[0])) atoms.set_calculator(EMT()) atoms.get_potential_energy() atoms.get_forces(apply_constraint=False) calc = Amp(descriptor=descriptor, fortran=False, label='rotation_test/before_rot/') calc.train(images=[atoms], force_goal=None, energy_goal=10.**10., extend_variables=False, global_search=None, data_format='json') ########################################################################### # Randomly Rotated (and translated) atomic configuration rot = [random.random(), random.random(), random.random()] for i in range(1, len(atoms)): (atoms[i].x, atoms[i].y, atoms[i].z) = rotate_atom(atoms[i].x, atoms[i].y, atoms[i].z, rot[0] * np.pi, rot[1] * np.pi, rot[2] * np.pi) disp = [random.random(), random.random(), random.random()] for atom in atoms: atom.x += disp[0] atom.y += disp[1] atom.z += disp[2] calc = Amp(descriptor=descriptor, fortran=False, label='rotation_test/after_rot/') calc.train(images=[atoms], force_goal=None, energy_goal=10.**10., extend_variables=False, global_search=None, data_format='json') ########################################################################### fp1 = paropen('rotation_test/before_rot/fingerprints.json', 'rb') nonrotated_data = json.load(fp1) fp2 = paropen('rotation_test/after_rot/fingerprints.json', 'rb') rotated_data = json.load(fp2) for hash1, hash2 in zip(nonrotated_data.keys(), rotated_data.keys()): for index in nonrotated_data[hash1]: for _ in range(len(nonrotated_data[hash1][index])): assert abs(nonrotated_data[hash1][index][_] - rotated_data[hash2][index][_]) < 10.**(-7.) shutil.rmtree('rotation_test/before_rot') shutil.rmtree('rotation_test/after_rot')
else: label = label dblabel = dblabel desc = Gaussian(cutoff=cutoff) model = NeuralNetwork(hiddenlayers=framework) calc = Amp(label=label, dblabel=dblabel, descriptor=desc, regression=model) {% if optimizer %} # Change optimization algorithm from amp.regression import Regressor from scipy.optimize import {{ optimizer }} regressor = Regressor(optimizer={{ optimizer }}) calc.model.regressor = regressor {% endif %} # Train the network calc.train(images=images, data_format='db', cores=cores, energy_goal=energy_rmse, force_goal=force_rmse, global_search=SimulatedAnnealing(temperature=70, steps=50), extend_variables=False)
def train_images(images): calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=(10, 10, 10))) calc.model.lossfunction = LossFunction(convergence={'energy_rmse': 0.001}) calc.model.lossfunction = LossFunction(force_coefficient=-0.1) calc.train(images=images, overwrite=True)
os.chdir(work_dir) # Get atoms from the database images = [] db = connect(db_path) for d in db.select(['iteration<={}'.format(iteration), 'train_set=True']): atoms = db.get_atoms(d.id) del atoms.constraints images.append(atoms) # Build Amp object framework_str = "-".join([str(f) for f in framework]) label = label dblabel = dblabel desc = Gaussian(cutoff=cutoff) model = NeuralNetwork(hiddenlayers=framework) calc = Amp(label=label, dblabel=dblabel, descriptor=desc, model=model, cores=cores) loss = LossFunction(convergence={'energy_rmse': energy_rmse, 'force_rmse': force_rmse}) calc.model.lossfunction = loss # Perform simulated annealing for global search Annealer(calc=calc, images=images) # Train the network calc.train(images=images)
#import os #from ase import Atoms, Atom, units #import ase.io from amp import Amp from amp.descriptor.gaussian import Gaussian from amp.model.neuralnetwork import NeuralNetwork from amp.model import LossFunction calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=(10, 10, 10))) calc.model.lossfunction = LossFunction(convergence={ 'energy_rmse': 0.02, 'force_rmse': 0.03 }) calc.train(images='geoopt_LCAO.traj')
def non_periodic_0th_bfgs_step_test(): pwd = os.getcwd() os.mkdir(os.path.join(pwd, 'CuOPdbp')) os.mkdir(os.path.join(pwd, '_CuOPdbp')) os.mkdir(os.path.join(pwd, 'CuOPdbp/0')) os.mkdir(os.path.join(pwd, '_CuOPdbp/0')) os.mkdir(os.path.join(pwd, 'CuOPdbp/1')) os.mkdir(os.path.join(pwd, '_CuOPdbp/1')) os.mkdir(os.path.join(pwd, 'CuOPdbp/2')) os.mkdir(os.path.join(pwd, '_CuOPdbp/2')) ########################################################################### # Making the list of periodic image images = [ Atoms(symbols='PdOPd2', pbc=np.array([False, False, False], dtype=bool), cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array([[0., 0., 0.], [0., 2., 0.], [0., 0., 3.], [1., 0., 0.]])), Atoms(symbols='PdOPd2', pbc=np.array([False, False, False], dtype=bool), cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array([[0., 1., 0.], [1., 2., 1.], [-1., 1., 2.], [1., 3., 2.]])), Atoms(symbols='PdO', pbc=np.array([False, False, False], dtype=bool), cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array([[2., 1., -1.], [1., 2., 1.]])), Atoms(symbols='Pd2O', pbc=np.array([False, False, False], dtype=bool), cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array([[-2., -1., -1.], [1., 2., 1.], [3., 4., 4.]])), Atoms(symbols='Cu', pbc=np.array([False, False, False], dtype=bool), cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array([[0., 0., 0.]])) ] for image in images: image.set_calculator(EMT()) image.get_potential_energy(apply_constraint=False) image.get_forces(apply_constraint=False) ########################################################################### # Parameters Gs = { 'O': [{ 'type': 'G2', 'element': 'Pd', 'eta': 0.8 }, { 'type': 'G4', 'elements': ['Pd', 'Pd'], 'eta': 0.2, 'gamma': 0.3, 'zeta': 1 }, { 'type': 'G4', 'elements': ['O', 'Pd'], 'eta': 0.3, 'gamma': 0.6, 'zeta': 0.5 }], 'Pd': [{ 'type': 'G2', 'element': 'Pd', 'eta': 0.2 }, { 'type': 'G4', 'elements': ['Pd', 'Pd'], 'eta': 0.9, 'gamma': 0.75, 'zeta': 1.5 }, { 'type': 'G4', 'elements': ['O', 'Pd'], 'eta': 0.4, 'gamma': 0.3, 'zeta': 4 }], 'Cu': [{ 'type': 'G2', 'element': 'Cu', 'eta': 0.8 }, { 'type': 'G4', 'elements': ['Cu', 'O'], 'eta': 0.2, 'gamma': 0.3, 'zeta': 1 }, { 'type': 'G4', 'elements': ['Cu', 'Cu'], 'eta': 0.3, 'gamma': 0.6, 'zeta': 0.5 }] } hiddenlayers = {'O': (2), 'Pd': (2), 'Cu': (2)} weights = OrderedDict([ ('O', OrderedDict([(1, np.matrix([[-2.0, 6.0], [3.0, -3.0], [1.5, -0.9], [-2.5, -1.5]])), (2, np.matrix([[5.5], [3.6], [1.4]]))])), ('Pd', OrderedDict([(1, np.matrix([[-1.0, 3.0], [2.0, 4.2], [1.0, -0.7], [-3.0, 2.0]])), (2, np.matrix([[4.0], [0.5], [3.0]]))])), ('Cu', OrderedDict([(1, np.matrix([[0.0, 1.0], [-1.0, -2.0], [2.5, -1.9], [-3.5, 0.5]])), (2, np.matrix([[0.5], [1.6], [-1.4]]))])) ]) scalings = OrderedDict([ ('O', OrderedDict([('intercept', -2.3), ('slope', 4.5)])), ('Pd', OrderedDict([('intercept', 1.6), ('slope', 2.5)])), ('Cu', OrderedDict([('intercept', -0.3), ('slope', -0.5)])) ]) ########################################################################### # Correct values correct_cost = 7144.810783950215 correct_energy_rmse = 24.318837496017185 correct_force_rmse = 144.70282475062052 correct_der_cost_fxn = [ 0, 0, 0, 0, 0, 0, 0.01374139170953901, 0.36318423812749656, 0.028312691567496464, 0.6012336354445753, 0.9659002689921986, -1.2897770059416218, -0.5718960935176884, -2.6425667221503035, -1.1960399246712894, 0, 0, -2.7256379713943852, -0.9080181026559658, -0.7739948323247023, -0.2915789426043727, -2.05998290443513, -0.6156374289747903, -0.0060865174621348985, -0.8296785483640939, 0.0008092646748983969, 0.041613027034688874, 0.003426469079592851, -0.9578004568876517, -0.006281929608090211, -0.28835884773094056, -4.2457774110285245, -4.317412094174614, -8.02385959091948, -3.240512651984099, -27.289862194996896, -26.8177742762254, -82.45107056053345, -80.6816768350809 ] ########################################################################### # Testing pure-python and fortran versions of Gaussian-neural on different # number of processes for global_search in [None, 'SA']: for fortran in [False, True]: for extend_variables in [False, True]: for data_format in ['db', 'json']: for save_memory in [False]: for cores in range(1, 7): string = 'CuOPdbp/0/%s-%s-%s-%s-%s-%i' label = string % (global_search, fortran, extend_variables, data_format, save_memory, cores) if global_search is 'SA': gs = \ SimulatedAnnealing(temperature=10, steps=5) elif global_search is None: gs = None print label calc = Amp(descriptor=Gaussian(cutoff=6.5, Gs=Gs), regression=NeuralNetwork( hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation='sigmoid', ), fortran=fortran, label=label) calc.train(images=images, energy_goal=10.**10., force_goal=10.**10., force_coefficient=0.04, cores=cores, data_format=data_format, save_memory=save_memory, global_search=gs, extend_variables=extend_variables) assert (abs(calc.cost_function - correct_cost) < 10.**(-5.)), \ 'The calculated value of cost function is \ wrong!' assert (abs(calc.energy_per_atom_rmse - correct_energy_rmse) < 10.**(-10.)), \ 'The calculated value of energy per atom RMSE \ is wrong!' assert (abs(calc.force_rmse - correct_force_rmse) < 10 ** (-7)), \ 'The calculated value of force RMSE is wrong!' for _ in range(len(correct_der_cost_fxn)): assert(abs(calc.der_variables_cost_function[ _] - correct_der_cost_fxn[_]) < 10 ** (-9)), \ 'The calculated value of cost function \ derivative is wrong!' dblabel = label secondlabel = '_' + label calc = Amp(descriptor=Gaussian(cutoff=6.5, Gs=Gs), regression=NeuralNetwork( hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation='sigmoid', ), fortran=fortran, label=secondlabel, dblabel=dblabel) calc.train(images=images, energy_goal=10.**10., force_goal=10.**10., force_coefficient=0.04, cores=cores, data_format=data_format, save_memory=save_memory, global_search=gs, extend_variables=extend_variables) assert (abs(calc.cost_function - correct_cost) < 10.**(-5.)), \ 'The calculated value of cost function is \ wrong!' assert (abs(calc.energy_per_atom_rmse - correct_energy_rmse) < 10.**(-10.)), \ 'The calculated value of energy per atom RMSE \ is wrong!' assert (abs(calc.force_rmse - correct_force_rmse) < 10 ** (-7)), \ 'The calculated value of force RMSE is wrong!' for _ in range(len(correct_der_cost_fxn)): assert(abs(calc.der_variables_cost_function[ _] - correct_der_cost_fxn[_] < 10 ** (-9))), \ 'The calculated value of cost function \
def test(): """Guassian/Neural training. Checks consistency of pure-python and fortran versions. """ images = make_images() convergence = {'energy_rmse': 10.**10., 'energy_maxresid': 10.**10., 'force_rmse': 10.**10., 'force_maxresid': 10.**10., } regressor = Regressor(optimizer='BFGS') count = 0 for fortran in [False, True]: for cores in range(1, 2): string = 'consistgauss/%s-%i' label = string % (fortran, cores) calc = Amp(descriptor=Gaussian(cutoff=cutoff, Gs=Gs, fortran=fortran,), model=NeuralNetwork(hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation=activation, fprange=fingerprints_range, regressor=regressor,), label=label, cores=1) lossfunction = LossFunction(convergence=convergence) calc.model.lossfunction = lossfunction calc.train(images=images,) if count == 0: ref_loss = calc.model.lossfunction.loss ref_energy_loss = calc.model.lossfunction.energy_loss ref_force_loss = calc.model.lossfunction.force_loss ref_dloss_dparameters = \ calc.model.lossfunction.dloss_dparameters else: assert (abs(calc.model.lossfunction.loss - ref_loss) < 10.**(-10.)), \ '''Loss function value for %r fortran, and %i cores is not consistent with the value of python version on single core.''' % (fortran, cores) assert (abs(calc.model.lossfunction.energy_loss - ref_energy_loss) < 10.**(-9.)), \ '''Energy rmse value for %r fortran, and %i cores is not consistent with the value of python version on single core.''' % (fortran, cores) assert (abs(calc.model.lossfunction.force_loss - ref_force_loss) < 10.**(-9.)), \ '''Force rmse value for %r fortran, and %i cores is not consistent with the value of python version on single core.''' % (fortran, cores) for _ in range(len(ref_dloss_dparameters)): assert (calc.model.lossfunction.dloss_dparameters[_] - ref_dloss_dparameters[_] < 10.**(-10.)) '''Derivative of the cost function for %r fortran, and %i cores is not consistent with the value of python version on single core. ''' % (fortran, cores) count = count + 1
def non_periodic_0th_bfgs_step_test(): # Making the list of periodic image images = [Atoms(symbols='PdOPd2', pbc=np.array([False, False, False], dtype=bool), cell=np.array( [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array( [[0., 0., 0.], [0., 2., 0.], [0., 0., 3.], [1., 0., 0.]])), Atoms(symbols='PdOPd2', pbc=np.array([False, False, False], dtype=bool), cell=np.array( [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array( [[0., 1., 0.], [1., 2., 1.], [-1., 1., 2.], [1., 3., 2.]])), Atoms(symbols='PdO', pbc=np.array([False, False, False], dtype=bool), cell=np.array( [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array( [[2., 1., -1.], [1., 2., 1.]])), Atoms(symbols='Pd2O', pbc=np.array([False, False, False], dtype=bool), cell=np.array( [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array( [[-2., -1., -1.], [1., 2., 1.], [3., 4., 4.]])), Atoms(symbols='Cu', pbc=np.array([False, False, False], dtype=bool), cell=np.array( [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array( [[0., 0., 0.]]))] for image in images: image.set_calculator(EMT()) image.get_potential_energy(apply_constraint=False) image.get_forces(apply_constraint=False) # Parameters Gs = {'O': [{'type': 'G2', 'element': 'Pd', 'eta': 0.8}, {'type': 'G4', 'elements': [ 'Pd', 'Pd'], 'eta':0.2, 'gamma':0.3, 'zeta':1}, {'type': 'G4', 'elements': ['O', 'Pd'], 'eta':0.3, 'gamma':0.6, 'zeta':0.5}], 'Pd': [{'type': 'G2', 'element': 'Pd', 'eta': 0.2}, {'type': 'G4', 'elements': ['Pd', 'Pd'], 'eta':0.9, 'gamma':0.75, 'zeta':1.5}, {'type': 'G4', 'elements': ['O', 'Pd'], 'eta':0.4, 'gamma':0.3, 'zeta':4}], 'Cu': [{'type': 'G2', 'element': 'Cu', 'eta': 0.8}, {'type': 'G4', 'elements': ['Cu', 'O'], 'eta':0.2, 'gamma':0.3, 'zeta':1}, {'type': 'G4', 'elements': ['Cu', 'Cu'], 'eta':0.3, 'gamma':0.6, 'zeta':0.5}]} hiddenlayers = {'O': (2,), 'Pd': (2,), 'Cu': (2,)} weights = OrderedDict([('O', OrderedDict([(1, np.matrix([[-2.0, 6.0], [3.0, -3.0], [1.5, -0.9], [-2.5, -1.5]])), (2, np.matrix([[5.5], [3.6], [1.4]]))])), ('Pd', OrderedDict([(1, np.matrix([[-1.0, 3.0], [2.0, 4.2], [1.0, -0.7], [-3.0, 2.0]])), (2, np.matrix([[4.0], [0.5], [3.0]]))])), ('Cu', OrderedDict([(1, np.matrix([[0.0, 1.0], [-1.0, -2.0], [2.5, -1.9], [-3.5, 0.5]])), (2, np.matrix([[0.5], [1.6], [-1.4]]))]))]) scalings = OrderedDict([('O', OrderedDict([('intercept', -2.3), ('slope', 4.5)])), ('Pd', OrderedDict([('intercept', 1.6), ('slope', 2.5)])), ('Cu', OrderedDict([('intercept', -0.3), ('slope', -0.5)]))]) # Correct values ref_loss = 7144.8107853579895 ref_energyloss = (24.318837496016506 ** 2.) * 5 ref_forceloss = (144.70282477494519 ** 2.) * 5 ref_dloss_dparameters = np.array([0, 0, 0, 0, 0, 0, 0.01374139170953901, 0.36318423812749656, 0.028312691567496464, 0.6012336354445753, 0.9659002689921986, -1.289777005924742, -0.5718960934643078, -2.642566722179569, -1.196039924610482, 0, 0, -2.72563797131018, -0.9080181024866707, -0.7739948323226851, -0.29157894253717415, -2.0599829042717404, -0.6156374289895887, -0.006086517460749253, -0.829678548408266, 0.0008092646745710161, 0.04161302703491613, 0.0034264690790135606, -0.957800456897051, -0.006281929606579444, -0.2883588477371198, -4.245777410962108, -4.3174120941045535, -8.02385959091948, -3.240512651984099, -27.289862194988853, -26.8177742762544, -82.45107056051073, -80.68167683508715]) ref_energy_maxresid = 54.21915548269209 ref_force_maxresid = 791.6736436232306 # Testing pure-python and fortran versions of Gaussian-neural on different # number of processes for fortran in [False, True]: for cores in range(1, 6): label = 'train-nonperiodic/%s-%i' % (fortran, cores) print label calc = Amp(descriptor=Gaussian(cutoff=6.5, Gs=Gs, fortran=fortran,), model=NeuralNetwork(hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation='sigmoid', regressor=regressor, fortran=fortran,), label=label, dblabel=label, cores=cores) lossfunction = LossFunction(convergence=convergence) calc.model.lossfunction = lossfunction calc.train(images=images,) diff = abs(calc.model.lossfunction.loss - ref_loss) assert (diff < 10.**(-10.)), \ 'Calculated value of loss function is wrong!' diff = abs(calc.model.lossfunction.energy_loss - ref_energyloss) assert (diff < 10.**(-10.)), \ 'Calculated value of energy per atom RMSE is wrong!' diff = abs(calc.model.lossfunction.force_loss - ref_forceloss) assert (diff < 10 ** (-10.)), \ 'Calculated value of force RMSE is wrong!' diff = abs(calc.model.lossfunction.energy_maxresid - ref_energy_maxresid) assert (diff < 10.**(-10.)), \ 'Calculated value of energy per atom max residual is wrong!' diff = abs(calc.model.lossfunction.force_maxresid - ref_force_maxresid) assert (diff < 10 ** (-10.)), \ 'Calculated value of force max residual is wrong!' for _ in range(len(ref_dloss_dparameters)): diff = abs(calc.model.lossfunction.dloss_dparameters[_] - ref_dloss_dparameters[_]) assert(diff < 10 ** (-12.)), \ "Calculated value of loss function derivative is wrong!" dblabel = label secondlabel = '_' + label calc = Amp(descriptor=Gaussian(cutoff=6.5, Gs=Gs, fortran=fortran,), model=NeuralNetwork(hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation='sigmoid', regressor=regressor, fortran=fortran,), label=secondlabel, dblabel=dblabel, cores=cores) lossfunction = LossFunction(convergence=convergence) calc.model.lossfunction = lossfunction calc.train(images=images,) diff = abs(calc.model.lossfunction.loss - ref_loss) assert (diff < 10.**(-10.)), \ 'Calculated value of loss function is wrong!' diff = abs(calc.model.lossfunction.energy_loss - ref_energyloss) assert (diff < 10.**(-10.)), \ 'Calculated value of energy per atom RMSE is wrong!' diff = abs(calc.model.lossfunction.force_loss - ref_forceloss) assert (diff < 10 ** (-10.)), \ 'Calculated value of force RMSE is wrong!' diff = abs(calc.model.lossfunction.energy_maxresid - ref_energy_maxresid) assert (diff < 10.**(-10.)), \ 'Calculated value of energy per atom max residual is wrong!' diff = abs(calc.model.lossfunction.force_maxresid - ref_force_maxresid) assert (diff < 10 ** (-10.)), \ 'Calculated value of force max residual is wrong!' for _ in range(len(ref_dloss_dparameters)): diff = abs(calc.model.lossfunction.dloss_dparameters[_] - ref_dloss_dparameters[_]) assert(diff < 10 ** (-12.)), \ 'Calculated value of loss function derivative is wrong!'
#!/usr/bin/env python from amp import Amp from amp.descriptor import Gaussian from amp.regression import NeuralNetwork from ase.db import connect from amp import SimulatedAnnealing db = connect('../../../database/master.db') images = [] for d in db.select('train_set=True'): atoms = db.get_atoms(d.id) del atoms.constraints images += [atoms] for n in [2, 3]: calc = Amp(label='./', dblabel='../../', descriptor=Gaussian(cutoff=6.5), regression=NeuralNetwork(hiddenlayers=(2, n))) calc.train(images=images, data_format='db', cores=4, energy_goal=1e-2, force_goal=1e-1, global_search=SimulatedAnnealing(temperature=70, steps=50), extend_variables=False)
def non_periodic_0th_bfgs_step_test(): """Gaussian/Neural training non-periodic standard test. Compares results to that expected from separate mathematica calculations. """ images = [ Atoms(symbols='PdOPd2', pbc=np.array([False, False, False], dtype=bool), cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array([[0., 0., 0.], [0., 2., 0.], [0., 0., 3.], [1., 0., 0.]])), Atoms(symbols='PdOPd2', pbc=np.array([False, False, False], dtype=bool), cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array([[0., 1., 0.], [1., 2., 1.], [-1., 1., 2.], [1., 3., 2.]])), Atoms(symbols='PdO', pbc=np.array([False, False, False], dtype=bool), cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array([[2., 1., -1.], [1., 2., 1.]])), Atoms(symbols='Pd2O', pbc=np.array([False, False, False], dtype=bool), cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array([[-2., -1., -1.], [1., 2., 1.], [3., 4., 4.]])), Atoms(symbols='Cu', pbc=np.array([False, False, False], dtype=bool), cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]), positions=np.array([[0., 0., 0.]])) ] for image in images: image.set_calculator(EMT()) image.get_potential_energy(apply_constraint=False) image.get_forces(apply_constraint=False) # Parameters Gs = { 'O': [{ 'type': 'G2', 'element': 'Pd', 'eta': 0.8 }, { 'type': 'G4', 'elements': ['Pd', 'Pd'], 'eta': 0.2, 'gamma': 0.3, 'zeta': 1 }, { 'type': 'G4', 'elements': ['O', 'Pd'], 'eta': 0.3, 'gamma': 0.6, 'zeta': 0.5 }], 'Pd': [{ 'type': 'G2', 'element': 'Pd', 'eta': 0.2 }, { 'type': 'G4', 'elements': ['Pd', 'Pd'], 'eta': 0.9, 'gamma': 0.75, 'zeta': 1.5 }, { 'type': 'G4', 'elements': ['O', 'Pd'], 'eta': 0.4, 'gamma': 0.3, 'zeta': 4 }], 'Cu': [{ 'type': 'G2', 'element': 'Cu', 'eta': 0.8 }, { 'type': 'G4', 'elements': ['Cu', 'O'], 'eta': 0.2, 'gamma': 0.3, 'zeta': 1 }, { 'type': 'G4', 'elements': ['Cu', 'Cu'], 'eta': 0.3, 'gamma': 0.6, 'zeta': 0.5 }] } hiddenlayers = {'O': (2, ), 'Pd': (2, ), 'Cu': (2, )} weights = OrderedDict([ ('O', OrderedDict([(1, np.matrix([[-2.0, 6.0], [3.0, -3.0], [1.5, -0.9], [-2.5, -1.5]])), (2, np.matrix([[5.5], [3.6], [1.4]]))])), ('Pd', OrderedDict([(1, np.matrix([[-1.0, 3.0], [2.0, 4.2], [1.0, -0.7], [-3.0, 2.0]])), (2, np.matrix([[4.0], [0.5], [3.0]]))])), ('Cu', OrderedDict([(1, np.matrix([[0.0, 1.0], [-1.0, -2.0], [2.5, -1.9], [-3.5, 0.5]])), (2, np.matrix([[0.5], [1.6], [-1.4]]))])) ]) scalings = OrderedDict([ ('O', OrderedDict([('intercept', -2.3), ('slope', 4.5)])), ('Pd', OrderedDict([('intercept', 1.6), ('slope', 2.5)])), ('Cu', OrderedDict([('intercept', -0.3), ('slope', -0.5)])) ]) # Correct values if aseversion < 12: # EMT values have changed from 3.12.0 version ref_loss = 7144.8107853579895 ref_energyloss = (24.318837496016506**2.) * 5 ref_forceloss = (144.70282477494519**2.) * 5 ref_dloss_dparameters = np.array([ 0, 0, 0, 0, 0, 0, 0.01374139170953901, 0.36318423812749656, 0.028312691567496464, 0.6012336354445753, 0.9659002689921986, -1.289777005924742, -0.5718960934643078, -2.642566722179569, -1.196039924610482, 0, 0, -2.72563797131018, -0.9080181024866707, -0.7739948323226851, -0.29157894253717415, -2.0599829042717404, -0.6156374289895887, -0.006086517460749253, -0.829678548408266, 0.0008092646745710161, 0.04161302703491613, 0.0034264690790135606, -0.957800456897051, -0.006281929606579444, -0.2883588477371198, -4.245777410962108, -4.3174120941045535, -8.02385959091948, -3.240512651984099, -27.289862194988853, -26.8177742762544, -82.45107056051073, -80.68167683508715 ]) ref_energy_maxresid = 54.21915548269209 ref_force_maxresid = 791.6736436232306 else: ref_loss = 7144.807220773296 ref_energyloss = (24.318829702548342**2.) * 5 ref_forceloss = (144.70279593472887**2.) * 5 ref_dloss_dparameters = np.array([ 0, 0, 0, 0, 0, 0, 0.01374139170953901, 0.36318423812749656, 0.028312691567496464, 0.6012336354445753, 0.9659002689921986, -1.2897765357544038, -0.5718958286530584, -2.642565840915077, -1.1960394346870424, 0, 0, -2.7256370964673238, -0.9080177898160631, -0.7739945904033205, -0.29157882294526083, -2.0599825024556027, -0.6156371996742152, -0.006086514109432934, -0.8296782839032163, 0.0008092653341775424, 0.04161306816722683, 0.0034264692325982156, -0.9578001030483714, -0.006281927374160914, -0.28835874344086, -4.245775886469167, -4.317410633818672, -8.02385959091948, -3.240512651984099, -27.289853042932705, -26.81776520493048, -82.45104200076496, -80.68164887277251 ]) ref_energy_maxresid = 54.21913802238612 ref_force_maxresid = 791.6734866205463 # Testing pure-python and fortran versions of Gaussian-neural on different # number of processes for fortran in [False, True]: for cores in range(1, 6): label = 'train-nonperiodic/%s-%i' % (fortran, cores) print(label) calc = Amp(descriptor=Gaussian( cutoff=6.5, Gs=Gs, fortran=fortran, ), model=NeuralNetwork( hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation='sigmoid', regressor=regressor, fortran=fortran, ), label=label, dblabel=label, cores=cores) lossfunction = LossFunction(convergence=convergence) calc.model.lossfunction = lossfunction calc.train(images=images, ) diff = abs(calc.model.lossfunction.loss - ref_loss) print("diff at 204 =", diff) assert (diff < 10.**(-10.)), \ 'Calculated value of loss function is wrong!' diff = abs(calc.model.lossfunction.energy_loss - ref_energyloss) assert (diff < 10.**(-10.)), \ 'Calculated value of energy per atom RMSE is wrong!' diff = abs(calc.model.lossfunction.force_loss - ref_forceloss) assert (diff < 10 ** (-10.)), \ 'Calculated value of force RMSE is wrong!' diff = abs(calc.model.lossfunction.energy_maxresid - ref_energy_maxresid) assert (diff < 10.**(-10.)), \ 'Calculated value of energy per atom max residual is wrong!' diff = abs(calc.model.lossfunction.force_maxresid - ref_force_maxresid) assert (diff < 10 ** (-10.)), \ 'Calculated value of force max residual is wrong!' for _ in range(len(ref_dloss_dparameters)): diff = abs(calc.model.lossfunction.dloss_dparameters[_] - ref_dloss_dparameters[_]) assert(diff < 10 ** (-12.)), \ "Calculated value of loss function derivative is wrong!" dblabel = label secondlabel = '_' + label calc = Amp(descriptor=Gaussian( cutoff=6.5, Gs=Gs, fortran=fortran, ), model=NeuralNetwork( hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation='sigmoid', regressor=regressor, fortran=fortran, ), label=secondlabel, dblabel=dblabel, cores=cores) lossfunction = LossFunction(convergence=convergence) calc.model.lossfunction = lossfunction calc.train(images=images, ) diff = abs(calc.model.lossfunction.loss - ref_loss) assert (diff < 10.**(-10.)), \ 'Calculated value of loss function is wrong!' diff = abs(calc.model.lossfunction.energy_loss - ref_energyloss) assert (diff < 10.**(-10.)), \ 'Calculated value of energy per atom RMSE is wrong!' diff = abs(calc.model.lossfunction.force_loss - ref_forceloss) assert (diff < 10 ** (-10.)), \ 'Calculated value of force RMSE is wrong!' diff = abs(calc.model.lossfunction.energy_maxresid - ref_energy_maxresid) assert (diff < 10.**(-10.)), \ 'Calculated value of energy per atom max residual is wrong!' diff = abs(calc.model.lossfunction.force_maxresid - ref_force_maxresid) assert (diff < 10 ** (-10.)), \ 'Calculated value of force max residual is wrong!' for _ in range(len(ref_dloss_dparameters)): diff = abs(calc.model.lossfunction.dloss_dparameters[_] - ref_dloss_dparameters[_]) assert(diff < 10 ** (-12.)), \ 'Calculated value of loss function derivative is wrong!'
def test(): pwd = os.getcwd() os.mkdir(os.path.join(pwd, 'CuOPdnone')) os.mkdir(os.path.join(pwd, '_CuOPdnone')) ########################################################################### # Parameters weights = OrderedDict([(1, np.array([[1., 2.5], [0., 1.5], [0., -1.5], [3., 9.], [1., -2.5], [2., 3.], [2., 2.5], [3., 0.], [-3.5, 1.], [5., 3.], [-2., 2.5], [-4., 4.], [0., 0.]])), (2, np.array([[1.], [2.], [0.]])), (3, np.array([[3.5], [0.]]))]) scalings = OrderedDict([('intercept', 3.), ('slope', 2.)]) images = generate_images() ########################################################################### # Testing pure-python and fortran versions of Gaussian-neural on different # number of processes for global_search in [None, 'SA']: for fortran in [False, True]: for extend_variables in [False, True]: for data_format in ['db', 'json']: for save_memory in [False]: for cores in range(1, 7): string = 'CuOPdnone/%s-%s-%s-%s-%s-%i' label = string % (global_search, fortran, extend_variables, data_format, save_memory, cores) if global_search is 'SA': gs = \ SimulatedAnnealing(temperature=10, steps=5) elif global_search is None: gs = None print label calc = Amp( descriptor=None, regression=NeuralNetwork( hiddenlayers=(2, 1), activation='tanh', weights=weights, scalings=scalings, ), fortran=fortran, label=label, ) calc.train(images=images, energy_goal=10.**10., force_goal=10.**10., force_coefficient=0.04, cores=cores, data_format=data_format, save_memory=save_memory, global_search=gs, extend_variables=extend_variables) # Check for consistency between the two models assert (abs(calc.cost_function - cost_function) < 10.**(-5.)), \ 'The calculated value of cost function is \ wrong!' assert (abs(calc.energy_per_atom_rmse - energy_rmse) < 10.**(-5.)), \ 'The calculated value of energy per atom RMSE \ is wrong!' assert (abs(calc.force_rmse - force_rmse) < 10 ** (-5)), \ 'The calculated value of force RMSE is wrong!' dblabel = label secondlabel = '_' + label calc = Amp(descriptor=None, regression=NeuralNetwork( hiddenlayers=(2, 1), activation='tanh', weights=weights, scalings=scalings, ), fortran=fortran, label=secondlabel, dblabel=dblabel) calc.train(images=images, energy_goal=10.**10., force_goal=10.**10., force_coefficient=0.04, cores=cores, data_format=data_format, save_memory=save_memory, global_search=gs, extend_variables=extend_variables) # Check for consistency between the two models assert (abs(calc.cost_function - cost_function) < 10.**(-5.)), \ 'The calculated value of cost function is \ wrong!' assert (abs(calc.energy_per_atom_rmse - energy_rmse) < 10.**(-5.)), \ 'The calculated value of energy per atom RMSE \ is wrong!' assert (abs(calc.force_rmse - force_rmse) < 10 ** (-5)), \ 'The calculated value of force RMSE is wrong!'
#!/usr/bin/env python from amp import Amp from amp.descriptor import * from amp.regression import * calc = Amp(label="./", descriptor=Behler(cutoff=6.0), regression=NeuralNetwork(hiddenlayers=(2, 3))) calc.train("../train.db", # The training data cores=1, global_search=None, # not found the simulated annealing feature useful extend_variables=False) # feature does not work properly and will crash
def train_rnn(baseframe=100, tframe=8, total_step=10, traj='ethane.traj', convergence={ 'energy_rmse': 0.25, 'force_rmse': 0.5 }, elements=['C', 'H', 'O'], hiddenlayers=(64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64), optim='ADAM', cores=4): """Gaussian/tflow train test.""" p = ple() label = 'amp' all_images = Trajectory(traj) nimg, mean_e = get_mean_energy(all_images) G = make_symmetry_functions(elements=elements, type='G2', etas=np.logspace(np.log10(0.05), np.log10(5.), num=4)) G += make_symmetry_functions(elements=elements, type='G5', etas=[0.005], zetas=[1., 4.], gammas=[+1., -1.]) G = {element: G for element in elements} # Gs=G if not isfile('amp.amp'): print('\nset up calculator ...\n') calc = Amp(descriptor=Gaussian(mode='atom-centered', Gs=G), model=NeuralNetwork(hiddenlayers=hiddenlayers, convergenceCriteria=convergence, activation='tanh', energy_coefficient=1.0, force_coefficient=None, optimizationMethod=optim, parameters={'energyMeanScale': mean_e}, maxTrainingEpochs=100000), label=label, cores=cores) # 'l-BFGS-b' or 'ADAM' trained_images = [all_images[j] for j in range(0, baseframe)] calc.train(overwrite=True, images=trained_images) del calc else: calc = Amp.load('amp.amp') calc.model.parameters['convergence'] = convergence calc.model.lossfunction = LossFunction(convergence=convergence) trained_images = [all_images[j] for j in range(0, baseframe)] calc.train(overwrite=True, images=trained_images) del calc tstep = int((nimg - baseframe) / tframe) if total_step > tstep: total_step = tstep print('Max train cycle of %d is allowed.' % total_step) edfts, eamps, eamps_ = [], [], [] dolabel = True basestep = int(baseframe / tframe) for step in range(basestep, total_step + basestep): new_images = [ all_images[j] for j in range(0 + step * tframe, tframe + step * tframe) ] trained_images.extend(new_images) x, edft, eamp, eamp_ = [], [], [], [] ii = step * tframe # ----- test ----- calc1 = Amp.load('amp.amp') for i, image in enumerate(new_images): x.append(ii) eamp_.append(calc1.get_potential_energy(image)) eamps_.append(eamp_[-1]) edft.append(image.get_potential_energy()) edfts.append(edft[-1]) ii += 1 del calc1 # ----- train ----- calc = Amp.load('amp.amp') calc.model.lossfunction = LossFunction(convergence=convergence) # calc.model.convergenceCriteria=convergence calc.train(overwrite=True, images=trained_images) del calc # ----- test ----- calc2 = Amp.load('amp.amp') print('\n---- current training result ---- \n') for i, image in enumerate(new_images): eamp.append(calc2.get_potential_energy(image)) eamps.append(eamp[-1]) print("energy(AMP) = %f energy(DFT) = %f" % (eamp[-1], edft[i])) # print("forces = %s" % str(calc2.get_forces(image))) del calc2 plot_energies(edfts, eamps, eamp_=None) system('epstopdf energies.eps') p.scatter(x, edft, eamp, eamp_, dolabel=dolabel) if dolabel: dolabel = False p.plot() system('epstopdf energies_scatter.eps')
def train_images(images, HL, E_conv): Hidden_Layer=tuple(HL) calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=Hidden_Layer)) calc.model.lossfunction = LossFunction(convergence={'energy_rmse': E_conv}) #calc.model.lossfunction = LossFunction(force_coefficient=-0.1) calc.train(images=images, overwrite=True)
def test(): images = generate_data(2) regressor = Regressor(optimizer='BFGS') calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=(3, 3), regressor=regressor,), cores=1) step = 0 for d in [None, 0.00001]: for fortran in [True, False]: for cores in [1, 2]: step += 1 label = \ 'numeric_analytic_test/analytic-%s-%i' % (fortran, cores) \ if d is None \ else 'numeric_analytic_test/numeric-%s-%i' \ % (fortran, cores) print(label) loss = LossFunction(convergence={'energy_rmse': 10 ** 10, 'force_rmse': 10 ** 10}, d=d) calc.set_label(label) calc.dblabel = 'numeric_analytic_test/analytic-True-1' calc.model.lossfunction = loss calc.descriptor.fortran = fortran calc.model.fortran = fortran calc.cores = cores calc.train(images=images,) if step == 1: ref_energies = [] ref_forces = [] for image in images: ref_energies += [calc.get_potential_energy(image)] ref_forces += [calc.get_forces(image)] ref_dloss_dparameters = \ calc.model.lossfunction.dloss_dparameters else: energies = [] forces = [] for image in images: energies += [calc.get_potential_energy(image)] forces += [calc.get_forces(image)] dloss_dparameters = \ calc.model.lossfunction.dloss_dparameters for image_no in range(2): diff = abs(energies[image_no] - ref_energies[image_no]) assert (diff < 10.**(-13.)), \ 'The calculated value of energy of image %i is ' \ 'wrong!' % (image_no + 1) for atom_no in range(6): for i in range(3): diff = abs(forces[image_no][atom_no][i] - ref_forces[image_no][atom_no][i]) assert (diff < 10.**(-10.)), \ 'The calculated %i force of atom %i of ' \ 'image %i is wrong!' \ % (i, atom_no, image_no + 1) # Checks analytical and numerical dloss_dparameters for _ in range(len(ref_dloss_dparameters)): diff = abs(dloss_dparameters[_] - ref_dloss_dparameters[_]) assert(diff < 10 ** (-10.)), \ 'The calculated value of loss function ' \ 'derivative is wrong!' # Checks analytical and numerical forces forces = [] for image in images: image.set_calculator(calc) forces += [calc.calculate_numerical_forces(image, d=d)] for atom_no in range(6): for i in range(3): diff = abs(forces[image_no][atom_no][i] - ref_forces[image_no][atom_no][i]) assert (diff < 10.**(-9.)), \ 'The calculated %i force of atom %i of ' \ 'image %i is wrong!' % (i, atom_no, image_no + 1)
#!/usr/bin/env python from amp import Amp from amp.descriptor import * from amp.regression import * calc = Amp(label="./", descriptor=Behler(cutoff=6.0), regression=NeuralNetwork(hiddenlayers=(2, 3))) calc.train( "../train.db", # The training data cores=1, global_search=None, # not found the simulated annealing feature useful extend_variables=False) # feature does not work properly and will crash
def periodic_0th_bfgs_step_test(): """Gaussian/Neural training periodic standard test. Compares results to that expected from separate mathematica calculations. """ # Making the list of images images = [ Atoms(symbols='PdOPd', pbc=np.array([True, False, False], dtype=bool), cell=np.array([[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]]), positions=np.array([[0.5, 1., 0.5], [1., 0.5, 1.], [1.5, 1.5, 1.5]])), Atoms(symbols='PdO', pbc=np.array([True, True, False], dtype=bool), cell=np.array([[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]]), positions=np.array([[0.5, 1., 0.5], [1., 0.5, 1.]])), Atoms(symbols='Cu', pbc=np.array([True, True, False], dtype=bool), cell=np.array([[1.8, 0., 0.], [0., 1.8, 0.], [0., 0., 1.8]]), positions=np.array([[0., 0., 0.]])) ] for image in images: image.set_calculator(EMT()) image.get_potential_energy(apply_constraint=False) image.get_forces(apply_constraint=False) # Parameters Gs = { 'O': [{ 'type': 'G2', 'element': 'Pd', 'eta': 0.8 }, { 'type': 'G4', 'elements': ['O', 'Pd'], 'eta': 0.3, 'gamma': 0.6, 'zeta': 0.5 }], 'Pd': [{ 'type': 'G2', 'element': 'Pd', 'eta': 0.2 }, { 'type': 'G4', 'elements': ['Pd', 'Pd'], 'eta': 0.9, 'gamma': 0.75, 'zeta': 1.5 }], 'Cu': [{ 'type': 'G2', 'element': 'Cu', 'eta': 0.8 }, { 'type': 'G4', 'elements': ['Cu', 'Cu'], 'eta': 0.3, 'gamma': 0.6, 'zeta': 0.5 }] } hiddenlayers = {'O': (2, ), 'Pd': (2, ), 'Cu': (2, )} weights = OrderedDict([ ('O', OrderedDict([(1, np.matrix([[-2.0, 6.0], [3.0, -3.0], [1.5, -0.9]])), (2, np.matrix([[5.5], [3.6], [1.4]]))])), ('Pd', OrderedDict([(1, np.matrix([[-1.0, 3.0], [2.0, 4.2], [1.0, -0.7]])), (2, np.matrix([[4.0], [0.5], [3.0]]))])), ('Cu', OrderedDict([(1, np.matrix([[0.0, 1.0], [-1.0, -2.0], [2.5, -1.9]])), (2, np.matrix([[0.5], [1.6], [-1.4]]))])) ]) scalings = OrderedDict([ ('O', OrderedDict([('intercept', -2.3), ('slope', 4.5)])), ('Pd', OrderedDict([('intercept', 1.6), ('slope', 2.5)])), ('Cu', OrderedDict([('intercept', -0.3), ('slope', -0.5)])) ]) # Correct values if aseversion < 12: # EMT values have changed from 3.12.0 version ref_loss = 8004.292841411172 ref_energyloss = (43.7360019403031**2.) * 3 ref_forceloss = (137.40994760947325**2.) * 3 ref_dloss_dparameters = np.array([ 0.08141668748130322, 0.03231235582925534, 0.04388650395738586, 0.017417514465922313, 0.028431276597563077, 0.011283700608814465, 0.0941695726576061, -0.12322258890990219, 0.12679918754154568, 63.53960075374332, 0.01624770019548904, -86.6263955859162, -0.01777752828707744, 86.22415217526024, 0.017745913074496918, 104.58358033298292, -96.73280209888215, -99.09843648905876, -8.302880631972338, -1.2590007162074357, 8.302877346883133, 1.25875988418134, -8.302866610678247, -1.2563833805675353, 28.324298392680998, 28.093155094726413, -29.37874455931869, -11.247473567044866, 11.119951466664787, -87.08582317481387, -20.939485239182346, -125.73267675705365, -35.138524407482116 ]) else: ref_loss = 8004.287750978173 ref_energyloss = (43.73598563177581**2.) * 3 ref_forceloss = (137.409923023214**2.) * 3 ref_dloss_dparameters = np.array([ 0.08141663280688925, 0.03231233413027478, 0.043886474485922956, 0.01741750276939638, 0.02843125750487539, 0.011283693031378718, 0.09416950941914284, -0.12322250616122936, 0.1267991023910503, 63.53958764057119, 0.016247696749304368, -86.62637753054923, -0.01777752451341436, 86.22413420485914, 0.01774590930723711, 104.58353326982777, -96.73275667196937, -99.09839026204304, -8.302877823431269, -1.2590002903842232, 8.302874538343092, 1.2587594584335775, -8.302863802141216, -1.2563829555383859, 28.32428881173613, 28.093145591893936, -29.37873462156934, -11.24746601393696, 11.11994399919284, -87.08579155328007, -20.93947792122797, -125.73262989900473, -35.13850819392253 ]) # Testing pure-python and fortran versions of Gaussian-neural on different # number of processes for fortran in [False, True]: for cores in range(1, 4): label = 'train-periodic/%s-%i' % (fortran, cores) print(label) calc = Amp(descriptor=Gaussian( cutoff=4., Gs=Gs, fortran=fortran, ), model=NeuralNetwork( hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation='tanh', regressor=regressor, fortran=fortran, ), label=label, dblabel=label, cores=cores) lossfunction = LossFunction(convergence=convergence) calc.model.lossfunction = lossfunction calc.train(images=images, ) diff = abs(calc.model.lossfunction.loss - ref_loss) print("diff at 414 =", diff) assert (diff < 10.**(-10.)), \ 'Calculated value of loss function is wrong!' diff = abs(calc.model.lossfunction.energy_loss - ref_energyloss) assert (diff < 10.**(-10.)), \ 'Calculated value of energy per atom RMSE is wrong!' diff = abs(calc.model.lossfunction.force_loss - ref_forceloss) assert (diff < 10 ** (-9.)), \ 'Calculated value of force RMSE is wrong!' for _ in range(len(ref_dloss_dparameters)): diff = abs(calc.model.lossfunction.dloss_dparameters[_] - ref_dloss_dparameters[_]) assert(diff < 10 ** (-10.)), \ 'Calculated value of loss function derivative is wrong!' dblabel = label secondlabel = '_' + label calc = Amp(descriptor=Gaussian(cutoff=4., Gs=Gs, fortran=fortran), model=NeuralNetwork( hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation='tanh', regressor=regressor, fortran=fortran, ), label=secondlabel, dblabel=dblabel, cores=cores) lossfunction = LossFunction(convergence=convergence) calc.model.lossfunction = lossfunction calc.train(images=images, ) diff = abs(calc.model.lossfunction.loss - ref_loss) assert (diff < 10.**(-10.)), \ 'Calculated value of loss function is wrong!' diff = abs(calc.model.lossfunction.energy_loss - ref_energyloss) assert (diff < 10.**(-10.)), \ 'Calculated value of energy per atom RMSE is wrong!' diff = abs(calc.model.lossfunction.force_loss - ref_forceloss) assert (diff < 10 ** (-9.)), \ 'Calculated value of force RMSE is wrong!' for _ in range(len(ref_dloss_dparameters)): diff = abs(calc.model.lossfunction.dloss_dparameters[_] - ref_dloss_dparameters[_]) assert(diff < 10 ** (-10.)), \ 'Calculated value of loss function derivative is wrong!'
def test(): images = make_training_images() for descriptor in [None, Gaussian()]: for global_search in [ None, SimulatedAnnealing(temperature=10, steps=5) ]: for data_format in ['json', 'db']: for save_memory in [ False, ]: for fortran in [False, True]: for cores in range(1, 4): print(descriptor, global_search, data_format, save_memory, fortran, cores) pwd = os.getcwd() testdir = 'read_write_test' os.mkdir(testdir) os.chdir(testdir) regression = NeuralNetwork(hiddenlayers=( 5, 5, )) calc = Amp( label='calc', descriptor=descriptor, regression=regression, fortran=fortran, ) # Should start with new variables calc.train( images, energy_goal=0.01, force_goal=10., global_search=global_search, extend_variables=True, data_format=data_format, save_memory=save_memory, cores=cores, ) # Test that we cannot overwrite. (Strange code # here because we *want* it to raise an # exception...) try: calc.train( images, energy_goal=0.01, force_goal=10., global_search=global_search, extend_variables=True, data_format=data_format, save_memory=save_memory, cores=cores, ) except IOError: pass else: raise RuntimeError( 'Code allowed to overwrite!') # Test that we can manually overwrite. # Should start with existing variables calc.train( images, energy_goal=0.01, force_goal=10., global_search=global_search, extend_variables=True, data_format=data_format, save_memory=save_memory, overwrite=True, cores=cores, ) label = 'testdir' if not os.path.exists(label): os.mkdir(label) # New directory calculator. calc = Amp( label='testdir/calc', descriptor=descriptor, regression=regression, fortran=fortran, ) # Should start with new variables calc.train( images, energy_goal=0.01, force_goal=10., global_search=global_search, extend_variables=True, data_format=data_format, save_memory=save_memory, cores=cores, ) # Open existing, save under new name. calc = Amp( load='calc', label='calc2', descriptor=descriptor, regression=regression, fortran=fortran, ) # Should start with existing variables calc.train( images, energy_goal=0.01, force_goal=10., global_search=global_search, extend_variables=True, data_format=data_format, save_memory=save_memory, cores=cores, ) label = 'calc_new' if not os.path.exists(label): os.mkdir(label) # Change label and re-train calc.set_label('calc_new/calc') # Should start with existing variables calc.train( images, energy_goal=0.01, force_goal=10., global_search=global_search, extend_variables=True, data_format=data_format, save_memory=save_memory, cores=cores, ) # Open existing without specifying new name. calc = Amp( load='calc', descriptor=descriptor, regression=regression, fortran=fortran, ) # Should start with existing variables calc.train( images, energy_goal=0.01, force_goal=10., global_search=global_search, extend_variables=True, data_format=data_format, save_memory=save_memory, cores=cores, ) os.chdir(pwd) shutil.rmtree(testdir, ignore_errors=True) del calc, regression
def periodic_0th_bfgs_step_test(): ########################################################################### # Making the list of images images = [ Atoms(symbols='PdOPd', pbc=np.array([True, False, False], dtype=bool), cell=np.array([[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]]), positions=np.array([[0.5, 1., 0.5], [1., 0.5, 1.], [1.5, 1.5, 1.5]])), Atoms(symbols='PdO', pbc=np.array([True, True, False], dtype=bool), cell=np.array([[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]]), positions=np.array([[0.5, 1., 0.5], [1., 0.5, 1.]])), Atoms(symbols='Cu', pbc=np.array([True, True, False], dtype=bool), cell=np.array([[1.8, 0., 0.], [0., 1.8, 0.], [0., 0., 1.8]]), positions=np.array([[0., 0., 0.]])) ] for image in images: image.set_calculator(EMT()) image.get_potential_energy(apply_constraint=False) image.get_forces(apply_constraint=False) ########################################################################### # Parameters Gs = { 'O': [{ 'type': 'G2', 'element': 'Pd', 'eta': 0.8 }, { 'type': 'G4', 'elements': ['O', 'Pd'], 'eta': 0.3, 'gamma': 0.6, 'zeta': 0.5 }], 'Pd': [{ 'type': 'G2', 'element': 'Pd', 'eta': 0.2 }, { 'type': 'G4', 'elements': ['Pd', 'Pd'], 'eta': 0.9, 'gamma': 0.75, 'zeta': 1.5 }], 'Cu': [{ 'type': 'G2', 'element': 'Cu', 'eta': 0.8 }, { 'type': 'G4', 'elements': ['Cu', 'Cu'], 'eta': 0.3, 'gamma': 0.6, 'zeta': 0.5 }] } hiddenlayers = {'O': (2), 'Pd': (2), 'Cu': (2)} weights = OrderedDict([ ('O', OrderedDict([(1, np.matrix([[-2.0, 6.0], [3.0, -3.0], [1.5, -0.9]])), (2, np.matrix([[5.5], [3.6], [1.4]]))])), ('Pd', OrderedDict([(1, np.matrix([[-1.0, 3.0], [2.0, 4.2], [1.0, -0.7]])), (2, np.matrix([[4.0], [0.5], [3.0]]))])), ('Cu', OrderedDict([(1, np.matrix([[0.0, 1.0], [-1.0, -2.0], [2.5, -1.9]])), (2, np.matrix([[0.5], [1.6], [-1.4]]))])) ]) scalings = OrderedDict([ ('O', OrderedDict([('intercept', -2.3), ('slope', 4.5)])), ('Pd', OrderedDict([('intercept', 1.6), ('slope', 2.5)])), ('Cu', OrderedDict([('intercept', -0.3), ('slope', -0.5)])) ]) ########################################################################### # Correct values correct_cost = 8004.292841472513 correct_energy_rmse = 43.736001940333836 correct_force_rmse = 137.4099476110887 correct_der_cost_fxn = [ 0.0814166874813534, 0.03231235582927526, 0.04388650395741291, 0.017417514465933048, 0.0284312765975806, 0.011283700608821421, 0.09416957265766414, -0.12322258890997816, 0.12679918754162384, 63.5396007548815, 0.016247700195771732, -86.62639558745185, -0.017777528287386473, 86.22415217678898, 0.017745913074805372, 104.58358033260711, -96.7328020983672, -99.09843648854351, -8.302880631971407, -1.2590007162073242, 8.3028773468822, 1.258759884181224, -8.302866610677315, -1.2563833805673688, 28.324298392677846, 28.09315509472324, -29.378744559315365, -11.247473567051799, 11.119951466671642, -87.08582317485761, -20.93948523898559, -125.73267675714658, -35.13852440758523 ] ########################################################################### # Testing pure-python and fortran versions of Gaussian-neural on different # number of processes for global_search in [None, 'SA']: for fortran in [False, True]: for extend_variables in [False, True]: for data_format in ['db', 'json']: for save_memory in [False]: for cores in range(1, 5): string = 'CuOPdbp/2/%s-%s-%s-%s-%s-%i' label = string % (global_search, fortran, extend_variables, data_format, save_memory, cores) if global_search is 'SA': gs = \ SimulatedAnnealing(temperature=10, steps=5) elif global_search is None: gs = None print label calc = Amp(descriptor=Gaussian(cutoff=4., Gs=Gs), regression=NeuralNetwork( hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation='tanh', ), fortran=fortran, label=label) calc.train(images=images, energy_goal=10.**10., force_goal=10.**10, force_coefficient=0.04, cores=cores, data_format=data_format, save_memory=save_memory, global_search=gs, extend_variables=extend_variables) assert (abs(calc.cost_function - correct_cost) < 10.**(-7.)), \ 'The calculated value of cost function is \ wrong!' assert (abs(calc.energy_per_atom_rmse - correct_energy_rmse) < 10.**(-10.)), \ 'The calculated value of energy per atom RMSE \ is wrong!' assert (abs(calc.force_rmse - correct_force_rmse) < 10 ** (-8.)), \ 'The calculated value of force RMSE is wrong!' for _ in range(len(correct_der_cost_fxn)): assert(abs(calc.der_variables_cost_function[ _] - correct_der_cost_fxn[_]) < 10 ** (-8)), \ 'The calculated value of cost function \ derivative is wrong!' dblabel = label secondlabel = '_' + label calc = Amp(descriptor=Gaussian(cutoff=4., Gs=Gs), regression=NeuralNetwork( hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation='tanh', ), fortran=fortran, label=secondlabel, dblabel=dblabel) calc.train(images=images, energy_goal=10.**10., force_goal=10.**10, force_coefficient=0.04, cores=cores, data_format=data_format, save_memory=save_memory, global_search=gs, extend_variables=extend_variables) assert (abs(calc.cost_function - correct_cost) < 10.**(-7.)), \ 'The calculated value of cost function is \ wrong!' assert (abs(calc.energy_per_atom_rmse - correct_energy_rmse) < 10.**(-10.)), \ 'The calculated value of energy per atom RMSE \ is wrong!' assert (abs(calc.force_rmse - correct_force_rmse) < 10 ** (-8.)), \ 'The calculated value of force RMSE is wrong!' for _ in range(len(correct_der_cost_fxn)): assert(abs(calc.der_variables_cost_function[ _] - correct_der_cost_fxn[_] < 10 ** (-8))), \ 'The calculated value of cost function \
def periodic_0th_bfgs_step_test(): # Making the list of images images = [Atoms(symbols='PdOPd', pbc=np.array([True, False, False], dtype=bool), cell=np.array( [[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]]), positions=np.array( [[0.5, 1., 0.5], [1., 0.5, 1.], [1.5, 1.5, 1.5]])), Atoms(symbols='PdO', pbc=np.array([True, True, False], dtype=bool), cell=np.array( [[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]]), positions=np.array( [[0.5, 1., 0.5], [1., 0.5, 1.]])), Atoms(symbols='Cu', pbc=np.array([True, True, False], dtype=bool), cell=np.array( [[1.8, 0., 0.], [0., 1.8, 0.], [0., 0., 1.8]]), positions=np.array( [[0., 0., 0.]]))] for image in images: image.set_calculator(EMT()) image.get_potential_energy(apply_constraint=False) image.get_forces(apply_constraint=False) # Parameters Gs = {'O': [{'type': 'G2', 'element': 'Pd', 'eta': 0.8}, {'type': 'G4', 'elements': ['O', 'Pd'], 'eta':0.3, 'gamma':0.6, 'zeta':0.5}], 'Pd': [{'type': 'G2', 'element': 'Pd', 'eta': 0.2}, {'type': 'G4', 'elements': ['Pd', 'Pd'], 'eta':0.9, 'gamma':0.75, 'zeta':1.5}], 'Cu': [{'type': 'G2', 'element': 'Cu', 'eta': 0.8}, {'type': 'G4', 'elements': ['Cu', 'Cu'], 'eta':0.3, 'gamma':0.6, 'zeta':0.5}]} hiddenlayers = {'O': (2,), 'Pd': (2,), 'Cu': (2,)} weights = OrderedDict([('O', OrderedDict([(1, np.matrix([[-2.0, 6.0], [3.0, -3.0], [1.5, -0.9]])), (2, np.matrix([[5.5], [3.6], [1.4]]))])), ('Pd', OrderedDict([(1, np.matrix([[-1.0, 3.0], [2.0, 4.2], [1.0, -0.7]])), (2, np.matrix([[4.0], [0.5], [3.0]]))])), ('Cu', OrderedDict([(1, np.matrix([[0.0, 1.0], [-1.0, -2.0], [2.5, -1.9]])), (2, np.matrix([[0.5], [1.6], [-1.4]]))]))]) scalings = OrderedDict([('O', OrderedDict([('intercept', -2.3), ('slope', 4.5)])), ('Pd', OrderedDict([('intercept', 1.6), ('slope', 2.5)])), ('Cu', OrderedDict([('intercept', -0.3), ('slope', -0.5)]))]) # Correct values ref_loss = 8004.292841411172 ref_energyloss = (43.7360019403031 ** 2.) * 3 ref_forceloss = (137.40994760947325 ** 2.) * 3 ref_dloss_dparameters = np.array([0.08141668748130322, 0.03231235582925534, 0.04388650395738586, 0.017417514465922313, 0.028431276597563077, 0.011283700608814465, 0.0941695726576061, -0.12322258890990219, 0.12679918754154568, 63.53960075374332, 0.01624770019548904, -86.6263955859162, -0.01777752828707744, 86.22415217526024, 0.017745913074496918, 104.58358033298292, -96.73280209888215, -99.09843648905876, -8.302880631972338, -1.2590007162074357, 8.302877346883133, 1.25875988418134, -8.302866610678247, -1.2563833805675353, 28.324298392680998, 28.093155094726413, -29.37874455931869, -11.247473567044866, 11.119951466664787, -87.08582317481387, -20.939485239182346, -125.73267675705365, -35.138524407482116]) # Testing pure-python and fortran versions of Gaussian-neural on different # number of processes for fortran in [False, True]: for cores in range(1, 4): label = 'train-periodic/%s-%i' % (fortran, cores) print label calc = Amp(descriptor=Gaussian(cutoff=4., Gs=Gs, fortran=fortran,), model=NeuralNetwork(hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation='tanh', regressor=regressor, fortran=fortran,), label=label, dblabel=label, cores=cores) lossfunction = LossFunction(convergence=convergence) calc.model.lossfunction = lossfunction calc.train(images=images,) diff = abs(calc.model.lossfunction.loss - ref_loss) assert (diff < 10.**(-10.)), \ 'Calculated value of loss function is wrong!' diff = abs(calc.model.lossfunction.energy_loss - ref_energyloss) assert (diff < 10.**(-10.)), \ 'Calculated value of energy per atom RMSE is wrong!' diff = abs(calc.model.lossfunction.force_loss - ref_forceloss) assert (diff < 10 ** (-9.)), \ 'Calculated value of force RMSE is wrong!' for _ in range(len(ref_dloss_dparameters)): diff = abs(calc.model.lossfunction.dloss_dparameters[_] - ref_dloss_dparameters[_]) assert(diff < 10 ** (-10.)), \ 'Calculated value of loss function derivative is wrong!' dblabel = label secondlabel = '_' + label calc = Amp(descriptor=Gaussian(cutoff=4., Gs=Gs, fortran=fortran), model=NeuralNetwork(hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation='tanh', regressor=regressor, fortran=fortran,), label=secondlabel, dblabel=dblabel, cores=cores) lossfunction = LossFunction(convergence=convergence) calc.model.lossfunction = lossfunction calc.train(images=images,) diff = abs(calc.model.lossfunction.loss - ref_loss) assert (diff < 10.**(-10.)), \ 'Calculated value of loss function is wrong!' diff = abs(calc.model.lossfunction.energy_loss - ref_energyloss) assert (diff < 10.**(-10.)), \ 'Calculated value of energy per atom RMSE is wrong!' diff = abs(calc.model.lossfunction.force_loss - ref_forceloss) assert (diff < 10 ** (-9.)), \ 'Calculated value of force RMSE is wrong!' for _ in range(len(ref_dloss_dparameters)): diff = abs(calc.model.lossfunction.dloss_dparameters[_] - ref_dloss_dparameters[_]) assert(diff < 10 ** (-10.)), \ 'Calculated value of loss function derivative is wrong!'