def create_Gs(self, elements, num_radial_etas, num_angular_etas, num_zetas, angular_type): uncentered_etas = np.linspace(1.0, 20.0, num_radial_etas) centers = np.zeros(num_radial_etas) G2_uncentered = make_symmetry_functions(elements=elements, type="G2", etas=uncentered_etas, centers=centers) centered_etas = 5.0 * np.ones(num_radial_etas) centers = np.linspace(0.5, self.cutoff.Rc - 0.5, num_radial_etas) G2_centered = make_symmetry_functions(elements=elements, type="G2", etas=centered_etas, centers=centers) angular_etas = np.linspace(0.01, 3.0, num_angular_etas) zetas = [2**i for i in range(num_zetas)] G_ang = make_symmetry_functions( elements=elements, type=angular_type, etas=angular_etas, zetas=zetas, gammas=[1.0, -1.0], ) self.Gs = G2_uncentered + G2_centered + G_ang
def train_test(): label = 'train_test_g5/calc' train_images = generate_data(2) elements = ['Pt', 'Cu'] G = make_symmetry_functions(elements=elements, type='G2', etas=np.logspace(np.log10(0.05), np.log10(5.), num=4)) G += make_symmetry_functions(elements=elements, type='G5', etas=[0.005], zetas=[1., 4.], gammas=[+1., -1.]) G = {element: G for element in elements} calc = Amp(descriptor=Gaussian(Gs=G), model=NeuralNetwork(hiddenlayers=(3, 3)), label=label, cores=1) loss = LossFunction(convergence={'energy_rmse': 0.02, 'force_rmse': 0.03}) calc.model.lossfunction = loss calc.train(images=train_images, ) for image in train_images: print("energy = %s" % str(calc.get_potential_energy(image))) print("forces = %s" % str(calc.get_forces(image))) # Test that we can re-load this calculator and call it again. del calc calc2 = Amp.load(label + '.amp') for image in train_images: print("energy = %s" % str(calc2.get_potential_energy(image))) print("forces = %s" % str(calc2.get_forces(image)))
def test_fp_match(): for i in range(100): # Tests whether the generated fingerprints are consistent with that of AMPs atoms = molecule("H2O") atoms.set_cell([10, 10, 10]) atoms.set_pbc = [True] * 3 atoms.set_calculator( sp(atoms=atoms, energy=-1, forces=np.array([[-1, -1, -1], [-1, -1, -1]])) ) Gs = {} images = [atoms] Gs["G2_etas"] = [0.005] * 2 Gs["G2_rs_s"] = [0] * 2 Gs["G4_etas"] = [0.005] * 2 Gs["G4_zetas"] = [1.0, 4.0] Gs["G4_gammas"] = [1.0, -1.0] Gs["cutoff"] = 6.5 elements = list( sorted(set([atom.symbol for atoms in images for atom in atoms])) ) G = make_symmetry_functions(elements=elements, type="G2", etas=Gs["G2_etas"]) G += make_symmetry_functions( elements=elements, type="G4", etas=Gs["G4_etas"], zetas=Gs["G4_zetas"], gammas=Gs["G4_gammas"], ) G = {"O": G, "H": G} hashes = stock_hash(images) amp_hash = list(hashes.keys())[0] make_amp_descriptors_simple_nn(images, Gs, cores=1, label='test', elements=elements) s_nn_hash = list(new_hash(images, Gs).keys())[0] with open("amp-data-fingerprints.ampdb/loose/" + s_nn_hash, "rb") as f: simple_nn = load(f) os.system("rm amp-data-fingerprints.ampdb/loose/" + s_nn_hash) descriptor = Gaussian(elements=elements, Gs=G, cutoff=Cosine(Gs["cutoff"])) descriptor.calculate_fingerprints(hashes, calculate_derivatives=True) with open("amp-data-fingerprints.ampdb/loose/" + amp_hash, "rb") as f: amp = load(f) os.system("rm amp-data-fingerprints.ampdb/loose/" + amp_hash) for s, am in zip(simple_nn, amp): for i, j in zip(s[1], am[1]): assert abs(i - j) <= 1e-5, "Fingerprints do not match!"
def test(): """Gaussian/Neural numeric-analytic consistency.""" images = generate_data() regressor = Regressor(optimizer='BFGS') _G = make_symmetry_functions(type='G2', etas=[0.05, 5.], elements=['Cu', 'Pt']) _G += make_symmetry_functions(type='G4', etas=[0.005], zetas=[1., 4.], gammas=[1.], elements=['Cu', 'Pt']) Gs = {'Cu': _G, 'Pt': _G} calc = Amp(descriptor=Gaussian(Gs=Gs), model=NeuralNetwork( hiddenlayers=(2, 1), regressor=regressor, randomseed=42, ), cores=1) step = 0 for d in [None, 0.00001]: for fortran in [True, False]: for cores in [1, 2]: step += 1 label = \ 'numeric_analytic_test/analytic-%s-%i' % (fortran, cores) \ if d is None \ else 'numeric_analytic_test/numeric-%s-%i' \ % (fortran, cores) print(label) loss = LossFunction(convergence={ 'energy_rmse': 10**10, 'force_rmse': 10**10 }, d=d) calc.set_label(label) calc.dblabel = 'numeric_analytic_test/analytic-True-1' calc.model.lossfunction = loss calc.descriptor.fortran = fortran calc.model.fortran = fortran calc.cores = cores calc.train(images=images, ) if step == 1: ref_energies = [] ref_forces = [] for image in images: ref_energies += [calc.get_potential_energy(image)] ref_forces += [calc.get_forces(image)] ref_dloss_dparameters = \ calc.model.lossfunction.dloss_dparameters else: energies = [] forces = [] for image in images: energies += [calc.get_potential_energy(image)] forces += [calc.get_forces(image)] dloss_dparameters = \ calc.model.lossfunction.dloss_dparameters for image_no in range(2): diff = abs(energies[image_no] - ref_energies[image_no]) assert (diff < 10.**(-13.)), \ 'The calculated value of energy of image %i is ' \ 'wrong!' % (image_no + 1) for atom_no in range(len(images[0])): for i in range(3): diff = abs(forces[image_no][atom_no][i] - ref_forces[image_no][atom_no][i]) assert (diff < 10.**(-10.)), \ 'The calculated %i force of atom %i of ' \ 'image %i is wrong!' \ % (i, atom_no, image_no + 1) # Checks analytical and numerical dloss_dparameters for _ in range(len(ref_dloss_dparameters)): diff = abs(dloss_dparameters[_] - ref_dloss_dparameters[_]) assert(diff < 10 ** (-10.)), \ 'The calculated value of loss function ' \ 'derivative is wrong!' # Checks analytical and numerical forces forces = [] for image in images: image.set_calculator(calc) forces += [calc.calculate_numerical_forces(image, d=d)] for atom_no in range(len(images[0])): for i in range(3): diff = abs(forces[image_no][atom_no][i] - ref_forces[image_no][atom_no][i]) print('{:3d} {:1d} {:7.1e}'.format(atom_no, i, diff)) assert (diff < 10.**(-6.)), \ 'The calculated %i force of atom %i of ' \ 'image %i is wrong! (Diff = %f)' \ % (i, atom_no, image_no + 1, diff)
def test_fps_memory(): slab = fcc100("Cu", size=(3, 3, 3)) ads = molecule("CO") add_adsorbate(slab, ads, 4, offset=(1, 1)) cons = FixAtoms(indices=[ atom.index for atom in slab if (atom.tag == 2 or atom.tag == 3) ]) slab.set_constraint(cons) slab.center(vacuum=13.0, axis=2) slab.set_pbc(True) slab.wrap(pbc=[True] * 3) slab.set_calculator(EMT()) images = [slab] Gs = {} Gs["G2_etas"] = [2] Gs["G2_rs_s"] = [0] Gs["G4_etas"] = [0.005] Gs["G4_zetas"] = [1.0] Gs["G4_gammas"] = [1.0] Gs["cutoff"] = 6.5 elements = np.array([atom.symbol for atoms in images for atom in atoms]) _, idx = np.unique(elements, return_index=True) elements = list(elements[np.sort(idx)]) G = make_symmetry_functions(elements=elements, type="G2", etas=Gs["G2_etas"]) G += make_symmetry_functions( elements=elements, type="G4", etas=Gs["G4_etas"], zetas=Gs["G4_zetas"], gammas=Gs["G4_gammas"], ) G = {"O": G, "C": G, "Cu": G} snn_hashes = new_hash(images, Gs=Gs) base = AtomsDataset(images, SNN_Gaussian, Gs, forcetraining=True, label="test", cores=10) for idx in range(len(images)): s_nn_hash = list(snn_hashes.keys())[idx] # SimpleNN with open("amp-data-fingerprints.ampdb/loose/" + s_nn_hash, "rb") as f: simple_nn = load(f) os.system("rm amp-data-fingerprints.ampdb/loose/" + s_nn_hash) with open("amp-data-fingerprint-primes.ampdb/loose/" + s_nn_hash, "rb") as f: simple_nn_prime = load(f) os.system("rm amp-data-fingerprint-primes.ampdb/loose/" + s_nn_hash) test = TestDataset(images[idx], base.elements, base.base_descriptor, Gs, base.fprange, 'test2', cores=2) test_fp = test.fps test_prime = test.fp_primes key = simple_nn_prime.keys() for s, am in zip(simple_nn, test_fp): for i, j in zip(s[1], am[1]): assert abs(i - j) <= 1e-4, "Fingerprints do not match! %s, %s" % ( i, j) for idx in key: for s, am in zip(simple_nn_prime[idx], test_prime[idx]): assert abs(s - am) <= 1e-4, "Fingerprint primes do not match!"
def test_calcs(): """Gaussian/Neural non-periodic standard. Checks that the answer matches that expected from previous Mathematica calculations. """ #: Making the list of non-periodic images images = [ Atoms( symbols="PdOPd2", pbc=np.array([False, False, False], dtype=bool), calculator=EMT(), cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), positions=np.array([[0.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0], [1.0, 0.0, 0.0]]), ), Atoms( symbols="PdOPd2", pbc=np.array([False, False, False], dtype=bool), calculator=EMT(), cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), positions=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [-1.0, 1.0, 2.0], [1.0, 3.0, 2.0]]), ), Atoms( symbols="PdO", pbc=np.array([False, False, False], dtype=bool), calculator=EMT(), cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), positions=np.array([[2.0, 1.0, -1.0], [1.0, 2.0, 1.0]]), ), Atoms( symbols="Pd2O", pbc=np.array([False, False, False], dtype=bool), calculator=EMT(), cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), positions=np.array([[-2.0, -1.0, -1.0], [1.0, 2.0, 1.0], [3.0, 4.0, 4.0]]), ), Atoms( symbols="Cu", pbc=np.array([False, False, False], dtype=bool), calculator=EMT(), cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), positions=np.array([[0.0, 0.0, 0.0]]), ), ] [a.get_potential_energy() for a in images] # Parameters hiddenlayers = {"O": (2, ), "Pd": (2, ), "Cu": (2, )} Gs = {} Gs["G2_etas"] = [0.2] Gs["G2_rs_s"] = [0] Gs["G4_etas"] = [0.4] Gs["G4_zetas"] = [1] Gs["G4_gammas"] = [1] Gs["cutoff"] = 6.5 elements = ["O", "Pd", "Cu"] G = make_symmetry_functions(elements=elements, type="G2", etas=Gs["G2_etas"]) G += make_symmetry_functions( elements=elements, type="G4", etas=Gs["G4_etas"], zetas=Gs["G4_zetas"], gammas=Gs["G4_gammas"], ) hashed_images = hash_images(images, Gs) descriptor = Gaussian(Gs=G, cutoff=Gs["cutoff"]) descriptor.calculate_fingerprints(hashed_images, calculate_derivatives=True) fingerprints_range = calculate_fingerprints_range(descriptor, hashed_images) weights = OrderedDict([ ( "O", OrderedDict([ ( 1, np.array([ [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], ]), ), (2, np.matrix([[0.5], [0.5], [0.5]])), ]), ), ( "Pd", OrderedDict([ ( 1, np.array([ [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], ]), ), (2, np.array([[0.5], [0.5], [0.5]])), ]), ), ( "Cu", OrderedDict([ ( 1, np.array([ [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], ]), ), (2, np.array([[0.5], [0.5], [0.5]])), ]), ), ]) scalings = OrderedDict([ ("O", OrderedDict([("intercept", 0), ("slope", 1)])), ("Pd", OrderedDict([("intercept", 0), ("slope", 1)])), ("Cu", OrderedDict([("intercept", 0), ("slope", 1)])), ]) calc = Amp( descriptor, model=NeuralNetwork( hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation="linear", fprange=fingerprints_range, mode="atom-centered", fortran=False, ), logging=False, ) amp_energies = [calc.get_potential_energy(image) for image in images] amp_forces = [calc.get_forces(image) for image in images] amp_forces = np.concatenate(amp_forces) device = "cpu" dataset = AtomsDataset(images, descriptor=DummyGaussian, cores=1, label='test', Gs=Gs, forcetraining=True) fp_length = dataset.fp_length batch_size = len(dataset) dataloader = DataLoader(dataset, batch_size, collate_fn=collate_amp, shuffle=False) model = FullNN(elements, [fp_length, 2, 2], device, forcetraining=True) for name, layer in model.named_modules(): if isinstance(layer, Dense): layer.activation = None init.constant_(layer.weight, 0.5) init.constant_(layer.bias, 0.5) for batch in dataloader: input_data = [batch[0], len(batch[1]), batch[3]] for element in elements: input_data[0][element][0] = ( input_data[0][element][0].to(device).requires_grad_(True)) fp_primes = batch[4] energy_pred, force_pred = model(input_data, fp_primes) for idx, i in enumerate(amp_energies): assert round(i, 4) == round( energy_pred.tolist()[idx][0], 4), "The predicted energy of image %i is wrong!" % (idx + 1) print("Energy predictions are correct!") for idx, sample in enumerate(amp_forces): for idx_d, value in enumerate(sample): predict = force_pred.tolist()[idx][idx_d] assert abs(value - predict) < 0.00001, ( "The predicted force of image % i, direction % i is wrong! Values: %s vs %s" % (idx + 1, idx_d, value, force_pred.tolist()[idx][idx_d])) print("Force predictions are correct!")
def test_calcs(): """Gaussian/Neural non-periodic standard. Checks that the answer matches that expected from previous Mathematica calculations. """ #: Making the list of non-periodic images images = [ Atoms( symbols="PdOPd2", pbc=np.array([False, False, False], dtype=bool), calculator=EMT(), cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), positions=np.array([[0.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0], [1.0, 0.0, 0.0]]), ), Atoms( symbols="PdOPd2", pbc=np.array([False, False, False], dtype=bool), calculator=EMT(), cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), positions=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [-1.0, 1.0, 2.0], [1.0, 3.0, 2.0]]), ), Atoms( symbols="PdO", pbc=np.array([False, False, False], dtype=bool), calculator=EMT(), cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), positions=np.array([[2.0, 1.0, -1.0], [1.0, 2.0, 1.0]]), ), Atoms( symbols="Pd2O", pbc=np.array([False, False, False], dtype=bool), calculator=EMT(), cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), positions=np.array([[-2.0, -1.0, -1.0], [1.0, 2.0, 1.0], [3.0, 4.0, 4.0]]), ), Atoms( symbols="Cu", pbc=np.array([False, False, False], dtype=bool), calculator=EMT(), cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), positions=np.array([[0.0, 0.0, 0.0]]), ), ] # Parameters hiddenlayers = {"O": (2, ), "Pd": (2, ), "Cu": (2, )} Gs = {} Gs["G2_etas"] = [0.2] Gs["G2_rs_s"] = [0] Gs["G4_etas"] = [0.4] Gs["G4_zetas"] = [1] Gs["G4_gammas"] = [1] Gs["cutoff"] = 6.5 elements = ["O", "Pd", "Cu"] G = make_symmetry_functions(elements=elements, type="G2", etas=Gs["G2_etas"]) G += make_symmetry_functions( elements=elements, type="G4", etas=Gs["G4_etas"], zetas=Gs["G4_zetas"], gammas=Gs["G4_gammas"], ) amp_images = amp_hash(images) descriptor = Gaussian(Gs=G, cutoff=Gs["cutoff"]) descriptor.calculate_fingerprints(amp_images, calculate_derivatives=True) fingerprints_range = calculate_fingerprints_range(descriptor, amp_images) np.random.seed(1) O_weights_1 = np.random.rand(10, 2) O_weights_2 = np.random.rand(1, 3).reshape(-1, 1) np.random.seed(2) Pd_weights_1 = np.random.rand(10, 2) Pd_weights_2 = np.random.rand(1, 3).reshape(-1, 1) np.random.seed(3) Cu_weights_1 = np.random.rand(10, 2) Cu_weights_2 = np.random.rand(1, 3).reshape(-1, 1) weights = OrderedDict([ ("O", OrderedDict([(1, O_weights_1), (2, O_weights_2)])), ("Pd", OrderedDict([(1, Pd_weights_1), (2, Pd_weights_2)])), ("Cu", OrderedDict([(1, Cu_weights_1), (2, Cu_weights_2)])), ]) scalings = OrderedDict([ ("O", OrderedDict([("intercept", 0), ("slope", 1)])), ("Pd", OrderedDict([("intercept", 0), ("slope", 1)])), ("Cu", OrderedDict([("intercept", 0), ("slope", 1)])), ]) calc = Amp( descriptor, model=NeuralNetwork( hiddenlayers=hiddenlayers, weights=weights, scalings=scalings, activation="tanh", fprange=fingerprints_range, mode="atom-centered", fortran=False, ), logging=False, ) amp_energies = [calc.get_potential_energy(image) for image in images] amp_forces = [calc.get_forces(image) for image in images] amp_forces = np.concatenate(amp_forces) torch_O_weights_1 = torch.FloatTensor(O_weights_1[:-1, :]).t() torch_O_bias_1 = torch.FloatTensor(O_weights_1[-1, :]) torch_O_weights_2 = torch.FloatTensor(O_weights_2[:-1, :]).t() torch_O_bias_2 = torch.FloatTensor(O_weights_2[-1, :]) torch_Pd_weights_1 = torch.FloatTensor(Pd_weights_1[:-1, :]).t() torch_Pd_bias_1 = torch.FloatTensor(Pd_weights_1[-1, :]) torch_Pd_weights_2 = torch.FloatTensor(Pd_weights_2[:-1, :]).t() torch_Pd_bias_2 = torch.FloatTensor(Pd_weights_2[-1, :]) torch_Cu_weights_1 = torch.FloatTensor(Cu_weights_1[:-1, :]).t() torch_Cu_bias_1 = torch.FloatTensor(Cu_weights_1[-1, :]) torch_Cu_weights_2 = torch.FloatTensor(Cu_weights_2[:-1, :]).t() torch_Cu_bias_2 = torch.FloatTensor(Cu_weights_2[-1, :]) device = "cpu" dataset = AtomsDataset( images, descriptor=Gaussian, cores=1, label="consistency", Gs=Gs, forcetraining=True, ) fp_length = dataset.fp_length batch_size = len(dataset) dataloader = DataLoader(dataset, batch_size, collate_fn=collate_amp, shuffle=False) model = FullNN(elements, [fp_length, 2, 2], device, forcetraining=True) model.state_dict()["elementwise_models.O.model_net.0.weight"].copy_( torch_O_weights_1) model.state_dict()["elementwise_models.O.model_net.0.bias"].copy_( torch_O_bias_1) model.state_dict()["elementwise_models.O.model_net.2.weight"].copy_( torch_O_weights_2) model.state_dict()["elementwise_models.O.model_net.2.bias"].copy_( torch_O_bias_2) model.state_dict()["elementwise_models.Pd.model_net.0.weight"].copy_( torch_Pd_weights_1) model.state_dict()["elementwise_models.Pd.model_net.0.bias"].copy_( torch_Pd_bias_1) model.state_dict()["elementwise_models.Pd.model_net.2.weight"].copy_( torch_Pd_weights_2) model.state_dict()["elementwise_models.Pd.model_net.2.bias"].copy_( torch_Pd_bias_2) model.state_dict()["elementwise_models.Cu.model_net.0.weight"].copy_( torch_Cu_weights_1) model.state_dict()["elementwise_models.Cu.model_net.0.bias"].copy_( torch_Cu_bias_1) model.state_dict()["elementwise_models.Cu.model_net.2.weight"].copy_( torch_Cu_weights_2) model.state_dict()["elementwise_models.Cu.model_net.2.bias"].copy_( torch_Cu_bias_2) import torch.nn as nn for name, layer in model.named_modules(): if isinstance(layer, MLP): layer.model_net = nn.Sequential(layer.model_net, Tanh()) for batch in dataloader: x = to_tensor(batch[0], device) y = to_tensor(batch[1], device) energy_pred, force_pred = model(x) for idx, i in enumerate(amp_energies): assert round(i, 4) == round( energy_pred.tolist()[idx][0], 4), "The predicted energy of image %i is wrong!" % (idx + 1) print("Energy predictions are correct!") for idx, sample in enumerate(amp_forces): for idx_d, value in enumerate(sample): predict = force_pred.tolist()[idx][idx_d] assert abs(value - predict) < 0.0001, ( "The predicted force of image % i, direction % i is wrong! Values: %s vs %s" % (idx + 1, idx_d, value, force_pred.tolist()[idx][idx_d])) print("Force predictions are correct!")
train_traj = "trajs/training.traj" cutoff = Polynomial(6.0, gamma=5.0) elements = ["Cu"] num_radial_etas = 8 num_angular_etas = 10 num_zetas = 1 angular_type = "G4" symm_funcs = {} trn = Trainer(cutoff=cutoff) trn.create_Gs(elements, num_radial_etas, num_angular_etas, num_zetas, angular_type) symm_funcs["Selected"] = trn.Gs G2 = make_symmetry_functions(elements=elements, type="G2", etas=[0.05, 0.23, 1.0, 5.0], centers=np.zeros(4)) G4 = make_symmetry_functions( elements=elements, type="G4", etas=0.005 * np.ones(1), zetas=[1.0, 4.0], gammas=[1.0, -1.0], ) symm_funcs["Default"] = G2 + G4 anl = Analyzer() plter = Plotter() r, rdf = anl.calculate_rdf(train_traj, r_max=cutoff.Rc) for label, symm_func in symm_funcs.items():
ncores = 1 hiddenlayers = (5, 5) elements = atoms.get_chemical_symbols() images = [atoms] g2_etas = [0.005] g2_rs_s = [0] * 4 g4_etas = [0.005] g4_zetas = [1., 4.] g4_gammas = [1., -1.] cutoff = 4 make_amp_descriptors_simple_nn(images, g2_etas, g2_rs_s, g4_etas, g4_zetas, g4_gammas, cutoff) G = make_symmetry_functions(elements=elements, type='G2', etas=g2_etas) #Add Rs parameter (0.0 for default) to comply with my version of AMP #for g in G: # g['Rs'] = 0.0 G += make_symmetry_functions(elements=elements, type='G4', etas=g4_etas, zetas=g4_zetas, gammas=g4_gammas) calc = Amp(descriptor=Gaussian(Gs=G, cutoff=4.), cores=ncores, model=NeuralNetwork(hiddenlayers=hiddenlayers))
def train_amp(baseframe=200, traj='ethane.traj', convergence={ 'energy_rmse': 0.25, 'force_rmse': 0.5 }, elements=['C', 'H', 'O'], cores=4): """Gaussian/tflow train test.""" p = ple() label = 'amp' all_images = Trajectory(traj) nimg, mean_e = get_mean_energy(all_images) G = make_symmetry_functions(elements=elements, type='G2', etas=np.logspace(np.log10(0.05), np.log10(5.), num=4)) G += make_symmetry_functions(elements=elements, type='G5', etas=[0.005], zetas=[1., 4.], gammas=[+1., -1.]) G = {element: G for element in elements} # Gs=G if not isfile('amp.amp'): # print('\nset up calculator ...\n') calc = Amp(descriptor=Gaussian(mode='atom-centered', Gs=G), model=NeuralNetwork(hiddenlayers=(1024, 1024, 1024, 512, 512, 256, 256, 256, 256, 128, 128), convergenceCriteria=convergence, activation='tanh', energy_coefficient=1.0, force_coefficient=None, optimizationMethod='ADAM', parameters={'energyMeanScale': mean_e}, maxTrainingEpochs=100000), label=label, cores=cores) # 'l-BFGS-b' or 'ADAM' trained_images = [all_images[j] for j in range(0, baseframe)] calc.train(overwrite=True, images=trained_images) del calc else: calc = Amp.load('amp.amp') calc.model.parameters['convergence'] = convergence calc.model.lossfunction = LossFunction(convergence=convergence) trained_images = [all_images[j] for j in range(0, baseframe)] calc.train(overwrite=True, images=trained_images) del calc edfts, eamps, eamps_ = [], [], [] dolabel = True basestep = int(baseframe / tframe) system('epstopdf energies.eps') p.scatter(x, edft, eamp, eamp_, dolabel=dolabel) p.plot() plot_energies(edfts, eamps, eamp_=eamps_) system('epstopdf energies_scatter.eps')
def train_rnn(baseframe=100, tframe=8, total_step=10, traj='ethane.traj', convergence={ 'energy_rmse': 0.25, 'force_rmse': 0.5 }, elements=['C', 'H', 'O'], hiddenlayers=(64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64), optim='ADAM', cores=4): """Gaussian/tflow train test.""" p = ple() label = 'amp' all_images = Trajectory(traj) nimg, mean_e = get_mean_energy(all_images) G = make_symmetry_functions(elements=elements, type='G2', etas=np.logspace(np.log10(0.05), np.log10(5.), num=4)) G += make_symmetry_functions(elements=elements, type='G5', etas=[0.005], zetas=[1., 4.], gammas=[+1., -1.]) G = {element: G for element in elements} # Gs=G if not isfile('amp.amp'): print('\nset up calculator ...\n') calc = Amp(descriptor=Gaussian(mode='atom-centered', Gs=G), model=NeuralNetwork(hiddenlayers=hiddenlayers, convergenceCriteria=convergence, activation='tanh', energy_coefficient=1.0, force_coefficient=None, optimizationMethod=optim, parameters={'energyMeanScale': mean_e}, maxTrainingEpochs=100000), label=label, cores=cores) # 'l-BFGS-b' or 'ADAM' trained_images = [all_images[j] for j in range(0, baseframe)] calc.train(overwrite=True, images=trained_images) del calc else: calc = Amp.load('amp.amp') calc.model.parameters['convergence'] = convergence calc.model.lossfunction = LossFunction(convergence=convergence) trained_images = [all_images[j] for j in range(0, baseframe)] calc.train(overwrite=True, images=trained_images) del calc tstep = int((nimg - baseframe) / tframe) if total_step > tstep: total_step = tstep print('Max train cycle of %d is allowed.' % total_step) edfts, eamps, eamps_ = [], [], [] dolabel = True basestep = int(baseframe / tframe) for step in range(basestep, total_step + basestep): new_images = [ all_images[j] for j in range(0 + step * tframe, tframe + step * tframe) ] trained_images.extend(new_images) x, edft, eamp, eamp_ = [], [], [], [] ii = step * tframe # ----- test ----- calc1 = Amp.load('amp.amp') for i, image in enumerate(new_images): x.append(ii) eamp_.append(calc1.get_potential_energy(image)) eamps_.append(eamp_[-1]) edft.append(image.get_potential_energy()) edfts.append(edft[-1]) ii += 1 del calc1 # ----- train ----- calc = Amp.load('amp.amp') calc.model.lossfunction = LossFunction(convergence=convergence) # calc.model.convergenceCriteria=convergence calc.train(overwrite=True, images=trained_images) del calc # ----- test ----- calc2 = Amp.load('amp.amp') print('\n---- current training result ---- \n') for i, image in enumerate(new_images): eamp.append(calc2.get_potential_energy(image)) eamps.append(eamp[-1]) print("energy(AMP) = %f energy(DFT) = %f" % (eamp[-1], edft[i])) # print("forces = %s" % str(calc2.get_forces(image))) del calc2 plot_energies(edfts, eamps, eamp_=None) system('epstopdf energies.eps') p.scatter(x, edft, eamp, eamp_, dolabel=dolabel) if dolabel: dolabel = False p.plot() system('epstopdf energies_scatter.eps')
from amp.descriptor.gaussian import Gaussian from amp.descriptor.zernike import Zernike from amp.descriptor.bispectrum import Bispectrum from amp.descriptor.cutoffs import Cosine, Polynomial from amp.descriptor.gaussian import make_symmetry_functions from amp.model.kernelridge import KernelRidge import numpy as np print("Training New Potential") #cutoff=Polynomial(cut_off_radius,gamma = 4) cutoff = Cosine(cut_off_radius) sigmas = np.logspace(np.log10(0.03), np.log10(0.8), num=9) etas = 1.0 / (2.0 * sigmas**2) Gf = make_symmetry_functions(elements=elements, type='G2', etas=etas) sigmas = np.logspace(np.log10(0.03), np.log10(0.8), num=7) zetas = np.logspace(np.log10(1.0), np.log10(32), num=4) etas = 1.0 / (2.0 * sigmas**2) Gf += make_symmetry_functions(elements=elements, type='G4', etas=etas, zetas=zetas, gammas=[1.0, -1.0]) #lambda on the AMP docs layer_config = (16, 8, 4) G = {} hiddenlayers = {} for el in elements: