Beispiel #1
0
def test_fp_match():
    for i in range(100):
        # Tests whether the generated fingerprints are consistent with that of AMPs
        atoms = molecule("H2O")
        atoms.set_cell([10, 10, 10])
        atoms.set_pbc = [True] * 3

        atoms.set_calculator(
            sp(atoms=atoms, energy=-1, forces=np.array([[-1, -1, -1], [-1, -1, -1]]))
        )

        Gs = {}
        images = [atoms]
        Gs["G2_etas"] = [0.005] * 2
        Gs["G2_rs_s"] = [0] * 2
        Gs["G4_etas"] = [0.005] * 2
        Gs["G4_zetas"] = [1.0, 4.0]
        Gs["G4_gammas"] = [1.0, -1.0]
        Gs["cutoff"] = 6.5

        elements = list(
            sorted(set([atom.symbol for atoms in images for atom in atoms]))
        )
        G = make_symmetry_functions(elements=elements, type="G2", etas=Gs["G2_etas"])
        G += make_symmetry_functions(
            elements=elements,
            type="G4",
            etas=Gs["G4_etas"],
            zetas=Gs["G4_zetas"],
            gammas=Gs["G4_gammas"],
        )
        G = {"O": G, "H": G}

        hashes = stock_hash(images)
        amp_hash = list(hashes.keys())[0]

        make_amp_descriptors_simple_nn(images, Gs, cores=1, label='test', elements=elements)
        s_nn_hash = list(new_hash(images, Gs).keys())[0]

        with open("amp-data-fingerprints.ampdb/loose/" + s_nn_hash, "rb") as f:
            simple_nn = load(f)
        os.system("rm amp-data-fingerprints.ampdb/loose/" + s_nn_hash)

        descriptor = Gaussian(elements=elements, Gs=G, cutoff=Cosine(Gs["cutoff"]))
        descriptor.calculate_fingerprints(hashes, calculate_derivatives=True)
        with open("amp-data-fingerprints.ampdb/loose/" + amp_hash, "rb") as f:
            amp = load(f)
        os.system("rm amp-data-fingerprints.ampdb/loose/" + amp_hash)

        for s, am in zip(simple_nn, amp):
            for i, j in zip(s[1], am[1]):
                assert abs(i - j) <= 1e-5, "Fingerprints do not match!"
Beispiel #2
0
def train_test():
    label = 'train_test_g5/calc'
    train_images = generate_data(2)
    elements = ['Pt', 'Cu']
    G = make_symmetry_functions(elements=elements,
                                type='G2',
                                etas=np.logspace(np.log10(0.05),
                                                 np.log10(5.),
                                                 num=4))
    G += make_symmetry_functions(elements=elements,
                                 type='G5',
                                 etas=[0.005],
                                 zetas=[1., 4.],
                                 gammas=[+1., -1.])

    G = {element: G for element in elements}

    calc = Amp(descriptor=Gaussian(Gs=G),
               model=NeuralNetwork(hiddenlayers=(3, 3)),
               label=label,
               cores=1)
    loss = LossFunction(convergence={'energy_rmse': 0.02, 'force_rmse': 0.03})
    calc.model.lossfunction = loss

    calc.train(images=train_images, )
    for image in train_images:
        print("energy = %s" % str(calc.get_potential_energy(image)))
        print("forces = %s" % str(calc.get_forces(image)))

    # Test that we can re-load this calculator and call it again.
    del calc
    calc2 = Amp.load(label + '.amp')
    for image in train_images:
        print("energy = %s" % str(calc2.get_potential_energy(image)))
        print("forces = %s" % str(calc2.get_forces(image)))
Beispiel #3
0
    def create_calc(self, label, dblabel):
        amp_label = os.path.join(self.calc_dir, label)
        amp_dblabel = os.path.join(self.calc_dir, dblabel)
        amp_name = amp_label + ".amp"
        if not os.path.exists(amp_name):
            print("Creating calculator {}...".format(amp_name))
            loss_function = LossFunction(
                convergence=self.convergence,
                energy_coefficient=self.energy_coefficient,
                force_coefficient=self.force_coefficient,
                overfit=self.overfit,
            )
            model = NeuralNetwork(
                hiddenlayers=self.hidden_layers,
                activation=self.activation,
                lossfunction=loss_function,
                weights=None,
                scalings=None,
                prescale=True,
            )
            descriptor = Gaussian(cutoff=self.cutoff, Gs=self.Gs, fortran=True)
            calc = Amp(descriptor=descriptor,
                       model=model,
                       label=amp_label,
                       dblabel=amp_dblabel)

            return calc
        else:
            print("Calculator {} already exists!".format(amp_name))
            calc = Amp.load(amp_name, label=amp_label, dblabel=amp_dblabel)

            return calc
Beispiel #4
0
def train_test():
    """Gaussian/KRR train test."""
    label = 'train_test/calc'
    train_images = generate_data(2)
    traj = Trajectory('trainingset.traj', mode='w')

    for image in train_images:
        traj.write(image)

    calc = Amp(descriptor=Gaussian(),
               model=KernelRidge(forcetraining=True, trainingimages='trainingset.traj'),
               label=label,
               cores=1)

    calc.train(images=train_images,)
    for image in train_images:
        print("energy = %s" % str(calc.get_potential_energy(image)))
        print("forces = %s" % str(calc.get_forces(image)))

    # Test that we can re-load this calculator and call it again.
    del calc
    calc2 = Amp.load(label + '.amp')
    for image in train_images:
        print("energy = %s" % str(calc2.get_potential_energy(image)))
        print("forces = %s" % str(calc2.get_forces(image)))
Beispiel #5
0
def train_images(images, HL, E_conv):
    Hidden_Layer = tuple(HL)
    calc = Amp(descriptor=Gaussian(),
               model=NeuralNetwork(hiddenlayers=Hidden_Layer))
    calc.model.lossfunction = LossFunction(convergence={'energy_rmse': E_conv})
    #calc.model.lossfunction = LossFunction(force_coefficient=-0.1)
    calc.train(images=images, overwrite=True)
Beispiel #6
0
def train_data(images, setup_only=False):
    label = 'nodeplot_test/calc'
    train_images = images

    calc = Amp(descriptor=Gaussian(),
               model=NeuralNetwork(hiddenlayers=(5, 5)),
               label=label,
               cores=1)
    loss = LossFunction(convergence={'energy_rmse': 0.02, 'force_rmse': 0.02})
    calc.model.lossfunction = loss

    if not setup_only:
        calc.train(images=train_images, )
        for image in train_images:
            print("energy =", calc.get_potential_energy(image))
            print("forces =", calc.get_forces(image))
    else:
        images = hash_images(train_images)
        calc.descriptor.calculate_fingerprints(images=images,
                                               log=calc._log,
                                               parallel={'cores': 1},
                                               calculate_derivatives=False)
        calc.model.fit(trainingimages=images,
                       descriptor=calc.descriptor,
                       log=calc._log,
                       parallel={'cores': 1},
                       only_setup=True)
        return calc
Beispiel #7
0
def compute_population_performance(population, reference):

    for individual in population:

        # Construct NNP
        nnp = Amp(descriptor=Gaussian(),
                  model=NeuralNetwork(hiddenlayers=(5, 5)),
                  cores=8)
Beispiel #8
0
def test():
    """Guassian/Neural force call.

    Checks consistency of pure-python and fortran versions.
    """

    images = make_images()

    for fortran in [False, True]:
        label = 'forcecall/%s' % fortran
        calc = Amp(
            descriptor=Gaussian(
                cutoff=cutoff,
                Gs=Gs,
                fortran=fortran,
            ),
            model=NeuralNetwork(
                hiddenlayers=hiddenlayers,
                weights=weights,
                scalings=scalings,
                activation=activation,
                mode='atom-centered',
                fprange=fingerprints_range,
                fortran=fortran,
            ),
            label=label,
        )

        if fortran is False:
            reference_energies = [
                calc.get_potential_energy(image) for image in images
            ]
        else:
            predicted_energies = [
                calc.get_potential_energy(image) for image in images
            ]
            for image_no in range(len(predicted_energies)):
                assert (abs(predicted_energies[image_no] -
                            reference_energies[image_no]) < 10.**(-5.)), \
                    'Calculated energy value of image %i by \
                    fortran version is not consistent with the \
                    value of python version.'                                              % (image_no + 1)

        if fortran is False:
            reference_forces = [calc.get_forces(image) for image in images]
        else:
            predicted_forces = [calc.get_forces(image) for image in images]

            for image_no in range(len(predicted_forces)):
                for index in range(np.shape(predicted_forces[image_no])[0]):
                    for k in range(np.shape(predicted_forces[image_no])[1]):
                        assert (abs(predicted_forces[image_no][index][k] -
                                    reference_forces[image_no][index][k]) <
                                10.**(-5.)), \
                            'Calculated %i force of atom %i of \
                            image %i by fortran version is not  \
                            consistent with the value of python \
                            version.'                                      % (k, index, image_no + 1)
Beispiel #9
0
def test():
    """Gaussian fingerprints consistency.

    Tests that pure-python and fortran, plus different number of cores
    give same results.
    """

    images = make_images()
    images = hash_images(images, ordered=True)

    ref_fps = {}
    ref_fp_primes = {}
    count = 0
    for fortran in [False, True]:
        for cores in range(1, 2):
            descriptor = Gaussian(fortran=fortran,
                                  dblabel='Gaussian-%s-%d' % (fortran, cores))
            descriptor.calculate_fingerprints(images,
                                              parallel={'cores': cores},
                                              log=None,
                                              calculate_derivatives=True)
            for hash, image in images.items():
                if count == 0:
                    ref_fps[hash] = descriptor.fingerprints[hash]
                    ref_fp_primes[hash] = descriptor.fingerprintprimes[hash]
                else:
                    fps = descriptor.fingerprints[hash]
                    # Checking consistency between fingerprints
                    for (element1, afp1), \
                            (element2, afp2) in zip(ref_fps[hash], fps):
                        assert element1 == element2, \
                            'fortran-python consistency for Gaussian '
                        'fingerprints broken!'
                        for _, __ in zip(afp1, afp2):
                            assert (abs(_ - __) < 10 ** (-15.)), \
                                'fortran-python consistency for Gaussian '
                            'fingerprints broken!'
                    # Checking consistency between fingerprint primes
                    fpprime = descriptor.fingerprintprimes[hash]
                    for key, value in ref_fp_primes[hash].items():
                        for _, __ in zip(value, fpprime[key]):
                            assert (abs(_ - __) < 10 ** (-15.)), \
                                'fortran-python consistency for Gaussian '
                            'fingerprint primes broken!'
            count += 1
Beispiel #10
0
def run_amp(fin):
    images = ase.io.read(fin, index=':')

    print(len(images))

    calc = Amp(descriptor=Gaussian(),
               model=NeuralNetwork(hiddenlayers=(10, 10, 10)))
    calc.model.lossfunction = LossFunction(convergence={'energy_rmse': 0.001})
    calc.model.lossfunction = LossFunction(force_coefficient=-0.1)
    calc.train(images=images, overwrite=True)
Beispiel #11
0
def exe_train_images(images, HL, E_conv):
    Hidden_Layer = tuple(HL)
    print("Hidden Layer: {}".format(Hidden_Layer))
    print("Energy convergence: {}".format(E_conv))
    calc = Amp(descriptor=Gaussian(),
               model=NeuralNetwork(hiddenlayers=Hidden_Layer))
    calc.model.lossfunction = LossFunction(convergence={'energy_rmse': E_conv})
    #calc.model.lossfunction = LossFunction(convergence={'energy_rmse': E_conv},force_coefficient=-0.1)
    #calc.model.lossfunction = LossFunction(force_coefficient=-0.1)
    calc.train(images=images, overwrite=True)
    return
def calc_train_images(images, HL, E_conv, f_conv, f_coeff, ncore):
    Hidden_Layer=tuple(HL)
    print("Hidden Layer: {}".format(Hidden_Layer))
    print("Energy convergence: {}".format(E_conv))
    cores={'localhost':ncore}   # 'localhost' depress SSH, communication between nodes
    calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=Hidden_Layer), cores=cores)
    if f_conv <= 0.0:
        convergence={'energy_rmse': E_conv}
    else:
        convergence={'energy_rmse': E_conv, 'force_rmse':f_conv}
    calc.model.lossfunction = LossFunction(convergence=convergence, force_coefficient=f_coeff)
        
    #calc.model.lossfunction = LossFunction(force_coefficient=-0.1)
    calc.train(images=images, overwrite=True)
    return
Beispiel #13
0
def test():
    "FingerprintPlot test."""
    generate_data(2, filename='fpplot-training.traj')

    calc = Amp(descriptor=Gaussian(),
               model=NeuralNetwork(),
               label='fpplot-test'
               )
    calc.model.lossfunction = LossFunction(convergence={'energy_rmse': 1.00,
                                                        'force_rmse': 1.00})
    calc.train(images='fpplot-training.traj')

    images = ase.io.Trajectory('fpplot-training.traj')
    fpplot = FingerprintPlot(calc)
    fpplot(images)
    fpplot(images, overlay=images[0])
    fpplot(images, overlay=[images[1][2], images[0][-1]])
Beispiel #14
0
def test():
    """Rotational/translational invariance."""

    for descriptor in [
            Gaussian(fortran=False),
    ]:

        # Non-rotated atomic configuration
        atoms = Atoms([
            Atom('Pt', (0., 0., 0.)),
            Atom('Pt', (0., 0., 1.)),
            Atom('Pt', (0., 2., 1.))
        ])

        images = hash_images([atoms], ordered=True)
        descriptor1 = descriptor
        descriptor1.calculate_fingerprints(images)
        fp1 = descriptor1.fingerprints[list(images.keys())[0]]

        # Randomly Rotated (and translated) atomic configuration
        rot = [random.random(), random.random(), random.random()]
        for i in range(1, len(atoms)):
            (atoms[i].x, atoms[i].y,
             atoms[i].z) = rotate_atom(atoms[i].x, atoms[i].y, atoms[i].z,
                                       rot[0] * np.pi, rot[1] * np.pi,
                                       rot[2] * np.pi)
        disp = [random.random(), random.random(), random.random()]
        for atom in atoms:
            atom.x += disp[0]
            atom.y += disp[1]
            atom.z += disp[2]

        images = hash_images([atoms], ordered=True)
        descriptor2 = descriptor
        descriptor2.calculate_fingerprints(images)
        fp2 = descriptor2.fingerprints[list(images.keys())[0]]

        for (element1, afp1), (element2, afp2) in zip(fp1, fp2):
            assert element1 == element2, 'rotated atoms test broken!'
            for _, __ in zip(afp1, afp2):
                assert (abs(_ - __) < 10 ** (-10.)), \
                    'rotated atoms test broken!'
Beispiel #15
0
def calc_train_images(images, HL, E_conv, f_conv, f_coeff, ncore, amp_pot=None):
    Hidden_Layer=tuple(HL)
    print("Hidden Layer: {}".format(Hidden_Layer))
    print("Energy convergence: {}".format(E_conv))
    cores={'localhost':ncore}   # 'localhost' depress SSH, communication between nodes
    ### load "amp.amp"
    if amp_pot:
        calc = Amp.load(amp_pot)
    calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(hiddenlayers=Hidden_Layer), cores=cores)
    ### Global Search in Param Space
    Annealer(calc=calc, images=images, Tmax=20, Tmin=1, steps=4000)

    if f_conv <= 0.0:
        E_maxresid = E_conv*3
        #convergence={'energy_rmse': E_conv}
        convergence={'energy_rmse': E_conv, 'energy_maxresid': E_maxresid}
    else:
        convergence={'energy_rmse': E_conv, 'force_rmse':f_conv}
    calc.model.lossfunction = LossFunction(convergence=convergence, force_coefficient=f_coeff)  # setting
    calc.train(images=images, overwrite=True)
    return
Beispiel #16
0
def train_test():
    """Gaussian/tflow train test."""
    perform, reason = check_perform()
    if not perform:
        print('Skipping this test because {}.'.format(reason))
        return

    from amp.model.tflow import NeuralNetwork
    label = 'train_test/calc'
    train_images = generate_data(2)
    convergence = {'energy_rmse': 0.02, 'force_rmse': 0.02}

    calc = Amp(descriptor=Gaussian(),
               model=NeuralNetwork(hiddenlayers=(3, 3),
                                   convergenceCriteria=convergence),
               label=label,
               cores=1)

    calc.train(images=train_images, )
    for image in train_images:
        print("energy =", calc.get_potential_energy(image))
        print("forces =", calc.get_forces(image))
Beispiel #17
0
def train_test():
    """Gaussian/Neural train test."""
    label = 'train_test/calc'
    train_images = generate_data(2)

    calc = Amp(descriptor=Gaussian(),
               model=NeuralNetwork(hiddenlayers=(3, 3)),
               label=label,
               cores=1)
    loss = LossFunction(convergence={'energy_rmse': 0.02, 'force_rmse': 0.03})
    calc.model.lossfunction = loss

    calc.train(images=train_images, )
    for image in train_images:
        print("energy = %s" % str(calc.get_potential_energy(image)))
        print("forces = %s" % str(calc.get_forces(image)))

    # Test that we can re-load this calculator and call it again.
    del calc
    calc2 = Amp.load(label + '.amp')
    for image in train_images:
        print("energy = %s" % str(calc2.get_potential_energy(image)))
        print("forces = %s" % str(calc2.get_forces(image)))
Beispiel #18
0
def test():
    """Gaussian/Neural numeric-analytic consistency."""
    images = generate_data()
    regressor = Regressor(optimizer='BFGS')

    _G = make_symmetry_functions(type='G2',
                                 etas=[0.05, 5.],
                                 elements=['Cu', 'Pt'])
    _G += make_symmetry_functions(type='G4',
                                  etas=[0.005],
                                  zetas=[1., 4.],
                                  gammas=[1.],
                                  elements=['Cu', 'Pt'])
    Gs = {'Cu': _G, 'Pt': _G}
    calc = Amp(descriptor=Gaussian(Gs=Gs),
               model=NeuralNetwork(
                   hiddenlayers=(2, 1),
                   regressor=regressor,
                   randomseed=42,
               ),
               cores=1)

    step = 0
    for d in [None, 0.00001]:
        for fortran in [True, False]:
            for cores in [1, 2]:
                step += 1
                label = \
                    'numeric_analytic_test/analytic-%s-%i' % (fortran, cores) \
                    if d is None \
                    else 'numeric_analytic_test/numeric-%s-%i' \
                    % (fortran, cores)
                print(label)

                loss = LossFunction(convergence={
                    'energy_rmse': 10**10,
                    'force_rmse': 10**10
                },
                                    d=d)
                calc.set_label(label)
                calc.dblabel = 'numeric_analytic_test/analytic-True-1'
                calc.model.lossfunction = loss
                calc.descriptor.fortran = fortran
                calc.model.fortran = fortran
                calc.cores = cores

                calc.train(images=images, )

                if step == 1:
                    ref_energies = []
                    ref_forces = []
                    for image in images:
                        ref_energies += [calc.get_potential_energy(image)]
                        ref_forces += [calc.get_forces(image)]
                        ref_dloss_dparameters = \
                            calc.model.lossfunction.dloss_dparameters
                else:
                    energies = []
                    forces = []
                    for image in images:
                        energies += [calc.get_potential_energy(image)]
                        forces += [calc.get_forces(image)]
                        dloss_dparameters = \
                            calc.model.lossfunction.dloss_dparameters

                    for image_no in range(2):

                        diff = abs(energies[image_no] - ref_energies[image_no])
                        assert (diff < 10.**(-13.)), \
                            'The calculated value of energy of image %i is ' \
                            'wrong!' % (image_no + 1)

                        for atom_no in range(len(images[0])):
                            for i in range(3):
                                diff = abs(forces[image_no][atom_no][i] -
                                           ref_forces[image_no][atom_no][i])
                                assert (diff < 10.**(-10.)), \
                                    'The calculated %i force of atom %i of ' \
                                    'image %i is wrong!' \
                                    % (i, atom_no, image_no + 1)
                        # Checks analytical and numerical dloss_dparameters
                        for _ in range(len(ref_dloss_dparameters)):
                            diff = abs(dloss_dparameters[_] -
                                       ref_dloss_dparameters[_])
                            assert(diff < 10 ** (-10.)), \
                                'The calculated value of loss function ' \
                                'derivative is wrong!'
    # Checks analytical and numerical forces
    forces = []
    for image in images:
        image.set_calculator(calc)
        forces += [calc.calculate_numerical_forces(image, d=d)]
    for atom_no in range(len(images[0])):
        for i in range(3):
            diff = abs(forces[image_no][atom_no][i] -
                       ref_forces[image_no][atom_no][i])
            print('{:3d} {:1d} {:7.1e}'.format(atom_no, i, diff))
            assert (diff < 10.**(-6.)), \
                'The calculated %i force of atom %i of ' \
                'image %i is wrong! (Diff = %f)' \
                % (i, atom_no, image_no + 1, diff)
Beispiel #19
0
def non_periodic_0th_bfgs_step_test():
    """Gaussian/Neural training non-periodic standard test.

    Compares results to that expected from separate mathematica
    calculations.
    """

    images = [
        Atoms(symbols='PdOPd2',
              pbc=np.array([False, False, False], dtype=bool),
              cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]),
              positions=np.array([[0., 0., 0.], [0., 2., 0.], [0., 0., 3.],
                                  [1., 0., 0.]])),
        Atoms(symbols='PdOPd2',
              pbc=np.array([False, False, False], dtype=bool),
              cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]),
              positions=np.array([[0., 1., 0.], [1., 2., 1.], [-1., 1., 2.],
                                  [1., 3., 2.]])),
        Atoms(symbols='PdO',
              pbc=np.array([False, False, False], dtype=bool),
              cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]),
              positions=np.array([[2., 1., -1.], [1., 2., 1.]])),
        Atoms(symbols='Pd2O',
              pbc=np.array([False, False, False], dtype=bool),
              cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]),
              positions=np.array([[-2., -1., -1.], [1., 2., 1.], [3., 4.,
                                                                  4.]])),
        Atoms(symbols='Cu',
              pbc=np.array([False, False, False], dtype=bool),
              cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]),
              positions=np.array([[0., 0., 0.]]))
    ]

    for image in images:
        image.set_calculator(EMT())
        image.get_potential_energy(apply_constraint=False)
        image.get_forces(apply_constraint=False)

    # Parameters

    Gs = {
        'O': [{
            'type': 'G2',
            'element': 'Pd',
            'eta': 0.8
        }, {
            'type': 'G4',
            'elements': ['Pd', 'Pd'],
            'eta': 0.2,
            'gamma': 0.3,
            'zeta': 1
        }, {
            'type': 'G4',
            'elements': ['O', 'Pd'],
            'eta': 0.3,
            'gamma': 0.6,
            'zeta': 0.5
        }],
        'Pd': [{
            'type': 'G2',
            'element': 'Pd',
            'eta': 0.2
        }, {
            'type': 'G4',
            'elements': ['Pd', 'Pd'],
            'eta': 0.9,
            'gamma': 0.75,
            'zeta': 1.5
        }, {
            'type': 'G4',
            'elements': ['O', 'Pd'],
            'eta': 0.4,
            'gamma': 0.3,
            'zeta': 4
        }],
        'Cu': [{
            'type': 'G2',
            'element': 'Cu',
            'eta': 0.8
        }, {
            'type': 'G4',
            'elements': ['Cu', 'O'],
            'eta': 0.2,
            'gamma': 0.3,
            'zeta': 1
        }, {
            'type': 'G4',
            'elements': ['Cu', 'Cu'],
            'eta': 0.3,
            'gamma': 0.6,
            'zeta': 0.5
        }]
    }

    hiddenlayers = {'O': (2, ), 'Pd': (2, ), 'Cu': (2, )}

    weights = OrderedDict([
        ('O',
         OrderedDict([(1,
                       np.matrix([[-2.0, 6.0], [3.0, -3.0], [1.5, -0.9],
                                  [-2.5, -1.5]])),
                      (2, np.matrix([[5.5], [3.6], [1.4]]))])),
        ('Pd',
         OrderedDict([(1,
                       np.matrix([[-1.0, 3.0], [2.0, 4.2], [1.0, -0.7],
                                  [-3.0, 2.0]])),
                      (2, np.matrix([[4.0], [0.5], [3.0]]))])),
        ('Cu',
         OrderedDict([(1,
                       np.matrix([[0.0, 1.0], [-1.0, -2.0], [2.5, -1.9],
                                  [-3.5, 0.5]])),
                      (2, np.matrix([[0.5], [1.6], [-1.4]]))]))
    ])

    scalings = OrderedDict([
        ('O', OrderedDict([('intercept', -2.3), ('slope', 4.5)])),
        ('Pd', OrderedDict([('intercept', 1.6), ('slope', 2.5)])),
        ('Cu', OrderedDict([('intercept', -0.3), ('slope', -0.5)]))
    ])

    # Correct values
    if aseversion < 12:  # EMT values have changed from 3.12.0 version
        ref_loss = 7144.8107853579895
        ref_energyloss = (24.318837496016506**2.) * 5
        ref_forceloss = (144.70282477494519**2.) * 5
        ref_dloss_dparameters = np.array([
            0, 0, 0, 0, 0, 0, 0.01374139170953901, 0.36318423812749656,
            0.028312691567496464, 0.6012336354445753, 0.9659002689921986,
            -1.289777005924742, -0.5718960934643078, -2.642566722179569,
            -1.196039924610482, 0, 0, -2.72563797131018, -0.9080181024866707,
            -0.7739948323226851, -0.29157894253717415, -2.0599829042717404,
            -0.6156374289895887, -0.006086517460749253, -0.829678548408266,
            0.0008092646745710161, 0.04161302703491613, 0.0034264690790135606,
            -0.957800456897051, -0.006281929606579444, -0.2883588477371198,
            -4.245777410962108, -4.3174120941045535, -8.02385959091948,
            -3.240512651984099, -27.289862194988853, -26.8177742762544,
            -82.45107056051073, -80.68167683508715
        ])
        ref_energy_maxresid = 54.21915548269209
        ref_force_maxresid = 791.6736436232306
    else:
        ref_loss = 7144.807220773296
        ref_energyloss = (24.318829702548342**2.) * 5
        ref_forceloss = (144.70279593472887**2.) * 5
        ref_dloss_dparameters = np.array([
            0, 0, 0, 0, 0, 0, 0.01374139170953901, 0.36318423812749656,
            0.028312691567496464, 0.6012336354445753, 0.9659002689921986,
            -1.2897765357544038, -0.5718958286530584, -2.642565840915077,
            -1.1960394346870424, 0, 0, -2.7256370964673238,
            -0.9080177898160631, -0.7739945904033205, -0.29157882294526083,
            -2.0599825024556027, -0.6156371996742152, -0.006086514109432934,
            -0.8296782839032163, 0.0008092653341775424, 0.04161306816722683,
            0.0034264692325982156, -0.9578001030483714, -0.006281927374160914,
            -0.28835874344086, -4.245775886469167, -4.317410633818672,
            -8.02385959091948, -3.240512651984099, -27.289853042932705,
            -26.81776520493048, -82.45104200076496, -80.68164887277251
        ])
        ref_energy_maxresid = 54.21913802238612
        ref_force_maxresid = 791.6734866205463

    # Testing pure-python and fortran versions of Gaussian-neural on different
    # number of processes

    for fortran in [False, True]:
        for cores in range(1, 6):
            label = 'train-nonperiodic/%s-%i' % (fortran, cores)
            print(label)
            calc = Amp(descriptor=Gaussian(
                cutoff=6.5,
                Gs=Gs,
                fortran=fortran,
            ),
                       model=NeuralNetwork(
                           hiddenlayers=hiddenlayers,
                           weights=weights,
                           scalings=scalings,
                           activation='sigmoid',
                           regressor=regressor,
                           fortran=fortran,
                       ),
                       label=label,
                       dblabel=label,
                       cores=cores)

            lossfunction = LossFunction(convergence=convergence)
            calc.model.lossfunction = lossfunction
            calc.train(images=images, )
            diff = abs(calc.model.lossfunction.loss - ref_loss)
            print("diff at 204 =", diff)
            assert (diff < 10.**(-10.)), \
                'Calculated value of loss function is wrong!'
            diff = abs(calc.model.lossfunction.energy_loss - ref_energyloss)
            assert (diff < 10.**(-10.)), \
                'Calculated value of energy per atom RMSE is wrong!'
            diff = abs(calc.model.lossfunction.force_loss - ref_forceloss)
            assert (diff < 10 ** (-10.)), \
                'Calculated value of force RMSE is wrong!'
            diff = abs(calc.model.lossfunction.energy_maxresid -
                       ref_energy_maxresid)
            assert (diff < 10.**(-10.)), \
                'Calculated value of energy per atom max residual is wrong!'
            diff = abs(calc.model.lossfunction.force_maxresid -
                       ref_force_maxresid)
            assert (diff < 10 ** (-10.)), \
                'Calculated value of force max residual is wrong!'

            for _ in range(len(ref_dloss_dparameters)):
                diff = abs(calc.model.lossfunction.dloss_dparameters[_] -
                           ref_dloss_dparameters[_])
                assert(diff < 10 ** (-12.)), \
                    "Calculated value of loss function derivative is wrong!"

            dblabel = label
            secondlabel = '_' + label

            calc = Amp(descriptor=Gaussian(
                cutoff=6.5,
                Gs=Gs,
                fortran=fortran,
            ),
                       model=NeuralNetwork(
                           hiddenlayers=hiddenlayers,
                           weights=weights,
                           scalings=scalings,
                           activation='sigmoid',
                           regressor=regressor,
                           fortran=fortran,
                       ),
                       label=secondlabel,
                       dblabel=dblabel,
                       cores=cores)

            lossfunction = LossFunction(convergence=convergence)
            calc.model.lossfunction = lossfunction
            calc.train(images=images, )
            diff = abs(calc.model.lossfunction.loss - ref_loss)
            assert (diff < 10.**(-10.)), \
                'Calculated value of loss function is wrong!'
            diff = abs(calc.model.lossfunction.energy_loss - ref_energyloss)
            assert (diff < 10.**(-10.)), \
                'Calculated value of energy per atom RMSE is wrong!'
            diff = abs(calc.model.lossfunction.force_loss - ref_forceloss)
            assert (diff < 10 ** (-10.)), \
                'Calculated value of force RMSE is wrong!'
            diff = abs(calc.model.lossfunction.energy_maxresid -
                       ref_energy_maxresid)
            assert (diff < 10.**(-10.)), \
                'Calculated value of energy per atom max residual is wrong!'
            diff = abs(calc.model.lossfunction.force_maxresid -
                       ref_force_maxresid)
            assert (diff < 10 ** (-10.)), \
                'Calculated value of force max residual is wrong!'

            for _ in range(len(ref_dloss_dparameters)):
                diff = abs(calc.model.lossfunction.dloss_dparameters[_] -
                           ref_dloss_dparameters[_])
                assert(diff < 10 ** (-12.)), \
                    'Calculated value of loss function derivative is wrong!'
def test_fp_match():
    slab = fcc100("Cu", size=(3, 3, 3))
    ads = molecule("CO")
    add_adsorbate(slab, ads, 4, offset=(1, 1))
    cons = FixAtoms(indices=[
        atom.index for atom in slab if (atom.tag == 2 or atom.tag == 3)
    ])
    slab.set_constraint(cons)
    slab.center(vacuum=13.0, axis=2)
    slab.set_pbc(True)
    slab.wrap(pbc=[True] * 3)
    slab.set_calculator(EMT())

    images = [slab]

    Gs = {}
    Gs["G2_etas"] = [2]
    Gs["G2_rs_s"] = [0]
    Gs["G4_etas"] = [0.005]
    Gs["G4_zetas"] = [1.0]
    Gs["G4_gammas"] = [1.0]
    Gs["cutoff"] = 6.5

    elements = np.array([atom.symbol for atoms in images for atom in atoms])
    _, idx = np.unique(elements, return_index=True)
    elements = list(elements[np.sort(idx)])

    G = make_symmetry_functions(elements=elements,
                                type="G2",
                                etas=Gs["G2_etas"])
    G += make_symmetry_functions(
        elements=elements,
        type="G4",
        etas=Gs["G4_etas"],
        zetas=Gs["G4_zetas"],
        gammas=Gs["G4_gammas"],
    )
    G = {"O": G, "C": G, "Cu": G}

    hashes = stock_hash(images)
    snn_hashes = new_hash(images, Gs=Gs)
    AtomsDataset(images,
                 SNN_Gaussian,
                 Gs,
                 forcetraining=True,
                 label="test",
                 cores=10)

    descriptor = Gaussian(elements=elements,
                          Gs=G,
                          cutoff=Gs["cutoff"],
                          dblabel='amp')
    descriptor.calculate_fingerprints(hashes, calculate_derivatives=True)

    for idx in range(len(images)):
        amp_hash = list(hashes.keys())[idx]
        s_nn_hash = list(snn_hashes.keys())[idx]
        # SimpleNN
        with open("amp-data-fingerprints.ampdb/loose/" + s_nn_hash, "rb") as f:
            simple_nn = load(f)
        os.system("rm amp-data-fingerprints.ampdb/loose/" + s_nn_hash)

        with open("amp-data-fingerprint-primes.ampdb/loose/" + s_nn_hash,
                  "rb") as f:
            simple_nn_prime = load(f)
        os.system("rm amp-data-fingerprint-primes.ampdb/loose/" + s_nn_hash)

        # AMP
        with open("amp-fingerprints.ampdb/loose/" + amp_hash, "rb") as f:
            amp = load(f)
        os.system("rm amp-fingerprints.ampdb/loose/" + amp_hash)

        with open("amp-fingerprint-primes.ampdb/loose/" + amp_hash, "rb") as f:
            amp_prime = load(f)
        os.system("rm amp-fingerprint-primes.ampdb/loose/" + amp_hash)

        key = amp_prime.keys()

        for s, am in zip(simple_nn, amp):
            for i, j in zip(s[1], am[1]):
                assert abs(i -
                           j) <= 1e-4, "Fingerprints do not match! %s, %s" % (
                               i, j)
        for idx in key:
            for s, am in zip(simple_nn_prime[idx], amp_prime[idx]):
                assert abs(s - am) <= 1e-4, "Fingerprint primes do not match!"
def test_calcs():
    """Gaussian/Neural non-periodic standard.

    Checks that the answer matches that expected from previous Mathematica
    calculations.
    """

    #: Making the list of non-periodic images
    images = [
        Atoms(
            symbols="PdOPd2",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[0.0, 0.0, 0.0], [0.0, 2.0, 0.0],
                                [0.0, 0.0, 3.0], [1.0, 0.0, 0.0]]),
        ),
        Atoms(
            symbols="PdOPd2",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0],
                                [-1.0, 1.0, 2.0], [1.0, 3.0, 2.0]]),
        ),
        Atoms(
            symbols="PdO",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[2.0, 1.0, -1.0], [1.0, 2.0, 1.0]]),
        ),
        Atoms(
            symbols="Pd2O",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[-2.0, -1.0, -1.0], [1.0, 2.0, 1.0],
                                [3.0, 4.0, 4.0]]),
        ),
        Atoms(
            symbols="Cu",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[0.0, 0.0, 0.0]]),
        ),
    ]

    [a.get_potential_energy() for a in images]
    # Parameters
    hiddenlayers = {"O": (2, ), "Pd": (2, ), "Cu": (2, )}

    Gs = {}
    Gs["G2_etas"] = [0.2]
    Gs["G2_rs_s"] = [0]
    Gs["G4_etas"] = [0.4]
    Gs["G4_zetas"] = [1]
    Gs["G4_gammas"] = [1]
    Gs["cutoff"] = 6.5

    elements = ["O", "Pd", "Cu"]

    G = make_symmetry_functions(elements=elements,
                                type="G2",
                                etas=Gs["G2_etas"])
    G += make_symmetry_functions(
        elements=elements,
        type="G4",
        etas=Gs["G4_etas"],
        zetas=Gs["G4_zetas"],
        gammas=Gs["G4_gammas"],
    )
    hashed_images = hash_images(images, Gs)
    descriptor = Gaussian(Gs=G, cutoff=Gs["cutoff"])
    descriptor.calculate_fingerprints(hashed_images,
                                      calculate_derivatives=True)
    fingerprints_range = calculate_fingerprints_range(descriptor,
                                                      hashed_images)

    weights = OrderedDict([
        (
            "O",
            OrderedDict([
                (
                    1,
                    np.array([
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                    ]),
                ),
                (2, np.matrix([[0.5], [0.5], [0.5]])),
            ]),
        ),
        (
            "Pd",
            OrderedDict([
                (
                    1,
                    np.array([
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                    ]),
                ),
                (2, np.array([[0.5], [0.5], [0.5]])),
            ]),
        ),
        (
            "Cu",
            OrderedDict([
                (
                    1,
                    np.array([
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                    ]),
                ),
                (2, np.array([[0.5], [0.5], [0.5]])),
            ]),
        ),
    ])

    scalings = OrderedDict([
        ("O", OrderedDict([("intercept", 0), ("slope", 1)])),
        ("Pd", OrderedDict([("intercept", 0), ("slope", 1)])),
        ("Cu", OrderedDict([("intercept", 0), ("slope", 1)])),
    ])

    calc = Amp(
        descriptor,
        model=NeuralNetwork(
            hiddenlayers=hiddenlayers,
            weights=weights,
            scalings=scalings,
            activation="linear",
            fprange=fingerprints_range,
            mode="atom-centered",
            fortran=False,
        ),
        logging=False,
    )

    amp_energies = [calc.get_potential_energy(image) for image in images]
    amp_forces = [calc.get_forces(image) for image in images]
    amp_forces = np.concatenate(amp_forces)

    device = "cpu"
    dataset = AtomsDataset(images,
                           descriptor=DummyGaussian,
                           cores=1,
                           label='test',
                           Gs=Gs,
                           forcetraining=True)
    fp_length = dataset.fp_length
    batch_size = len(dataset)
    dataloader = DataLoader(dataset,
                            batch_size,
                            collate_fn=collate_amp,
                            shuffle=False)
    model = FullNN(elements, [fp_length, 2, 2], device, forcetraining=True)
    for name, layer in model.named_modules():
        if isinstance(layer, Dense):
            layer.activation = None
            init.constant_(layer.weight, 0.5)
            init.constant_(layer.bias, 0.5)
    for batch in dataloader:
        input_data = [batch[0], len(batch[1]), batch[3]]
        for element in elements:
            input_data[0][element][0] = (
                input_data[0][element][0].to(device).requires_grad_(True))
        fp_primes = batch[4]
        energy_pred, force_pred = model(input_data, fp_primes)

    for idx, i in enumerate(amp_energies):
        assert round(i, 4) == round(
            energy_pred.tolist()[idx][0],
            4), "The predicted energy of image %i is wrong!" % (idx + 1)
    print("Energy predictions are correct!")
    for idx, sample in enumerate(amp_forces):
        for idx_d, value in enumerate(sample):
            predict = force_pred.tolist()[idx][idx_d]
            assert abs(value - predict) < 0.00001, (
                "The predicted force of image % i, direction % i is wrong! Values: %s vs %s"
                % (idx + 1, idx_d, value, force_pred.tolist()[idx][idx_d]))
    print("Force predictions are correct!")
Beispiel #22
0
def test():
    """Guassian/Neural training.

    Checks consistency of pure-python and fortran versions.
    """

    images = make_images()

    convergence = {'energy_rmse': 10.**10.,
                   'energy_maxresid': 10.**10.,
                   'force_rmse': 10.**10.,
                   'force_maxresid': 10.**10., }

    regressor = Regressor(optimizer='BFGS')

    count = 0
    for fortran in [False, True]:
        for cores in range(1, 2):
            string = 'consistgauss/%s-%i'
            label = string % (fortran, cores)
            calc = Amp(descriptor=Gaussian(cutoff=cutoff,
                                           Gs=Gs,
                                           fortran=fortran,),
                       model=NeuralNetwork(hiddenlayers=hiddenlayers,
                                           weights=weights,
                                           scalings=scalings,
                                           activation=activation,
                                           fprange=fingerprints_range,
                                           regressor=regressor,),
                       label=label,
                       cores=1)

            lossfunction = LossFunction(convergence=convergence)
            calc.model.lossfunction = lossfunction
            calc.train(images=images,)

            if count == 0:
                ref_loss = calc.model.lossfunction.loss
                ref_energy_loss = calc.model.lossfunction.energy_loss
                ref_force_loss = calc.model.lossfunction.force_loss
                ref_dloss_dparameters = \
                    calc.model.lossfunction.dloss_dparameters
            else:
                assert (abs(calc.model.lossfunction.loss -
                            ref_loss) < 10.**(-10.)), \
                    '''Loss function value for %r fortran, and %i cores is
                not consistent with the value of python version
                on single core.''' % (fortran, cores)

                assert (abs(calc.model.lossfunction.energy_loss -
                            ref_energy_loss) <
                        10.**(-9.)), \
                    '''Energy rmse value for %r fortran, and %i cores is not
                consistent with the value of python version on
                single core.''' % (fortran, cores)

                assert (abs(calc.model.lossfunction.force_loss -
                            ref_force_loss) <
                        10.**(-9.)), \
                    '''Force rmse value for %r fortran, and %i cores is not
                consistent with the value of python version on
                single core.''' % (fortran, cores)

                for _ in range(len(ref_dloss_dparameters)):
                    assert (calc.model.lossfunction.dloss_dparameters[_] -
                            ref_dloss_dparameters[_] < 10.**(-10.))
                    '''Derivative of the cost function for %r
                    fortran, and %i
                    cores is not consistent with the value of
                    python version on single
                    core. ''' % (fortran, cores)

            count = count + 1
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 12 21:54:34 2020

@author: srava
"""
from ase.io import read
from ase.io.trajectory import Trajectory
import numpy as np
from ase.calculators.emt import EMT

from amp import Amp
from amp.descriptor.gaussian import Gaussian
from amp.utilities import hash_images
from amp.model.neuralnetwork import NeuralNetwork

import matplotlib.pyplot as plt

calc = Amp(descriptor=Gaussian(), model=NeuralNetwork(),
           label='calc')
calc.model.lossfunction.parameters['convergence'].update(
    {'energy_rmse': 0.05,})
calc.train(images='training_data.traj')
Beispiel #24
0
def test():
    """Displaced atom test."""

    ###########################################################################
    # Parameters

    atoms = Atoms(symbols='PdOPd2',
                  pbc=np.array([False, False, False], dtype=bool),
                  cell=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]),
                  positions=np.array([[0., 1., 0.], [1., 2., 1.],
                                      [-1., 1., 2.], [1., 3., 2.]]))

    ###########################################################################
    # Parameters

    Gs = {
        'O': [{
            'type': 'G2',
            'element': 'Pd',
            'eta': 0.8
        }, {
            'type': 'G4',
            'elements': ['Pd', 'Pd'],
            'eta': 0.2,
            'gamma': 0.3,
            'zeta': 1
        }, {
            'type': 'G4',
            'elements': ['O', 'Pd'],
            'eta': 0.3,
            'gamma': 0.6,
            'zeta': 0.5
        }],
        'Pd': [{
            'type': 'G2',
            'element': 'Pd',
            'eta': 0.2
        }, {
            'type': 'G4',
            'elements': ['Pd', 'Pd'],
            'eta': 0.9,
            'gamma': 0.75,
            'zeta': 1.5
        }, {
            'type': 'G4',
            'elements': ['O', 'Pd'],
            'eta': 0.4,
            'gamma': 0.3,
            'zeta': 4
        }]
    }

    hiddenlayers = {'O': (2, ), 'Pd': (2, )}

    weights = OrderedDict([
        ('O',
         OrderedDict([(1,
                       np.matrix([[-2.0, 6.0], [3.0, -3.0], [1.5, -0.9],
                                  [-2.5, -1.5]])),
                      (2, np.matrix([[5.5], [3.6], [1.4]]))])),
        ('Pd',
         OrderedDict([(1,
                       np.matrix([[-1.0, 3.0], [2.0, 4.2], [1.0, -0.7],
                                  [-3.0, 2.0]])),
                      (2, np.matrix([[4.0], [0.5], [3.0]]))]))
    ])

    scalings = OrderedDict([
        ('O', OrderedDict([('intercept', -2.3), ('slope', 4.5)])),
        ('Pd', OrderedDict([('intercept', 1.6), ('slope', 2.5)]))
    ])

    fingerprints_range = {
        "O":
        np.array([[0.21396177208585404, 2.258090276328769],
                  [0.0, 2.1579067008202975], [0.0, 0.0]]),
        "Pd":
        np.array([[0.0, 1.4751761770313006], [0.0, 0.697686078889583],
                  [0.0, 0.37848964715610417]])
    }

    ###########################################################################

    calc = Amp(descriptor=Gaussian(
        cutoff=6.5,
        Gs=Gs,
        fortran=False,
    ),
               model=NeuralNetwork(hiddenlayers=hiddenlayers,
                                   weights=weights,
                                   scalings=scalings,
                                   fprange=fingerprints_range,
                                   mode='atom-centered'),
               cores=1)

    atoms.set_calculator(calc)

    e1 = atoms.get_potential_energy(apply_constraint=False)
    e2 = calc.get_potential_energy(atoms)
    f1 = atoms.get_forces(apply_constraint=False)

    atoms[0].x += 0.5

    boolean = atoms.calc.calculation_required(atoms, properties=['energy'])

    e3 = atoms.get_potential_energy(apply_constraint=False)
    e4 = calc.get_potential_energy(atoms)
    f2 = atoms.get_forces(apply_constraint=False)

    assert (e1 == e2 and e3 == e4 and abs(e1 - e3) > 10.**(-3.)
            and (boolean is True)
            and (not (f1 == f2).all())), 'Displaced-atom test broken!'
    dataset_size = len(num_atoms)
    raw_force_target = force_target * sd_scaling
    num_atoms_force = torch.cat([idx.repeat(int(idx)) for idx in num_atoms])
    num_atoms_force = torch.sqrt(num_atoms_force).reshape(
        len(num_atoms_force), 1)
    force_pred_per_atom = torch.div(force_pred, num_atoms_force)
    force_targets_per_atom = torch.div(raw_force_target, num_atoms_force)
    force_mse = mse_loss(force_pred_per_atom, force_targets_per_atom)
    force_mse /= (3 * dataset_size)
    force_rmse = torch.sqrt(force_mse)
    return force_rmse


forcetraining = True
data = AtomsDataset("../../datasets/water/water.extxyz",
                    descriptor=Gaussian(),
                    cores=1,
                    forcetraining=forcetraining)
scalings = data.scalings
unique_atoms = data.unique_atoms
fp_length = data.fp_length

device = 'cpu'
# device = 'cuda:0'

net = NeuralNetRegressor(
    module=FullNN(unique_atoms, [fp_length, 5, 5],
                  device,
                  forcetraining=forcetraining),
    criterion=CustomLoss,
    criterion__force_coefficient=0.3,
Beispiel #26
0
def non_periodic_test():
    """Gaussian/tflowNeural non-periodic."""
    perform, reason = check_perform()
    if not perform:
        print('Skipping this test because {}'.format(reason))
        return

    from amp.model.tflow import NeuralNetwork
    # Making the list of non-periodic images
    images = [Atoms(symbols='PdOPd2',
                    pbc=np.array([False, False, False], dtype=bool),
                    cell=np.array(
                        [[1.,  0.,  0.],
                         [0.,  1.,  0.],
                            [0.,  0.,  1.]]),
                    positions=np.array(
                        [[0.,  0.,  0.],
                         [0.,  2.,  0.],
                            [0.,  0.,  3.],
                            [1.,  0.,  0.]])),
              Atoms(symbols='PdOPd2',
                    pbc=np.array([False, False, False], dtype=bool),
                    cell=np.array(
                        [[1.,  0.,  0.],
                         [0.,  1.,  0.],
                            [0.,  0.,  1.]]),
                    positions=np.array(
                        [[0.,  1.,  0.],
                         [1.,  2.,  1.],
                            [-1.,  1.,  2.],
                            [1.,  3.,  2.]])),
              Atoms(symbols='PdO',
                    pbc=np.array([False, False, False], dtype=bool),
                    cell=np.array(
                        [[1.,  0.,  0.],
                         [0.,  1.,  0.],
                         [0.,  0.,  1.]]),
                    positions=np.array(
                        [[2.,  1., -1.],
                         [1.,  2.,  1.]])),
              Atoms(symbols='Pd2O',
                    pbc=np.array([False, False, False], dtype=bool),
                    cell=np.array(
                        [[1.,  0.,  0.],
                         [0.,  1.,  0.],
                         [0.,  0.,  1.]]),
                    positions=np.array(
                        [[-2., -1., -1.],
                         [1.,  2.,  1.],
                         [3.,  4.,  4.]])),
              Atoms(symbols='Cu',
                    pbc=np.array([False, False, False], dtype=bool),
                    cell=np.array(
                        [[1.,  0.,  0.],
                         [0.,  1.,  0.],
                         [0.,  0.,  1.]]),
                    positions=np.array(
                        [[0.,  0.,  0.]]))]

    # Correct energies and forces
    correct_energies = [14.231186811226152, 14.327219917287948,
                        5.5742510565528285, 9.41456771216968,
                        -0.5019297954597407]
    correct_forces = \
        [[[-0.05095024246182649, -0.10709193432146558, -0.09734321482638622],
          [-0.044550772904033635, 0.2469763195486647, -0.07617425912869778],
            [-0.02352490951707703, -0.050782839419131864, 0.24409220250631508],
            [0.11902592488293715, -0.08910154580806727, -0.07057472855123109]],
            [[-0.024868720575099375, -0.07417891957113862,
              -0.12121240797223251],
             [0.060156158438252574, 0.017517013378773042,
              -0.020047135079325505],
             [-0.10901144291312388, -0.06671262448352767, 0.06581556263014315],
             [0.07372400504997068, 0.12337453067589325, 0.07544398042141486]],
            [[0.10151747265164626, -0.10151747265164626, -0.20303494530329252],
             [-0.10151747265164626, 0.10151747265164626, 0.20303494530329252]],
            [[-0.00031177673224312745, -0.00031177673224312745,
              -0.0002078511548287517],
             [0.004823209772264884, 0.004823209772264884,
              0.006975000714861393],
             [-0.004511433040021756, -0.004511433040021756,
              -0.006767149560032641]],
            [[0.0, 0.0, 0.0]]]

    # Parameters
    Gs = {'O': [{'type': 'G2', 'element': 'Pd', 'eta': 0.8},
                {'type': 'G4', 'elements': [
                    'Pd', 'Pd'], 'eta':0.2, 'gamma':0.3, 'zeta':1},
                {'type': 'G4', 'elements': ['O', 'Pd'], 'eta':0.3, 'gamma':0.6,
                 'zeta':0.5}],
          'Pd': [{'type': 'G2', 'element': 'Pd', 'eta': 0.2},
                 {'type': 'G4', 'elements': ['Pd', 'Pd'],
                  'eta':0.9, 'gamma':0.75, 'zeta':1.5},
                 {'type': 'G4', 'elements': ['O', 'Pd'], 'eta':0.4,
                  'gamma':0.3, 'zeta':4}],
          'Cu': [{'type': 'G2', 'element': 'Cu', 'eta': 0.8},
                 {'type': 'G4', 'elements': ['Cu', 'O'],
                  'eta':0.2, 'gamma':0.3, 'zeta':1},
                 {'type': 'G4', 'elements': ['Cu', 'Cu'], 'eta':0.3,
                  'gamma':0.6, 'zeta':0.5}]}

    hiddenlayers = {'O': (2, 1), 'Pd': (2, 1), 'Cu': (2, 1)}

    weights = OrderedDict([('O', OrderedDict([(1, np.matrix([[-2.0, 6.0],
                                                             [3.0, -3.0],
                                                             [1.5, -0.9],
                                                             [-2.5, -1.5]])),
                                              (2, np.matrix([[5.5],
                                                             [3.6],
                                                             [1.4]]))])),
                           ('Pd', OrderedDict([(1, np.matrix([[-1.0, 3.0],
                                                              [2.0, 4.2],
                                                              [1.0, -0.7],
                                                              [-3.0, 2.0]])),
                                               (2, np.matrix([[4.0],
                                                              [0.5],
                                                              [3.0]]))])),
                           ('Cu', OrderedDict([(1, np.matrix([[0.0, 1.0],
                                                              [-1.0, -2.0],
                                                              [2.5, -1.9],
                                                              [-3.5, 0.5]])),
                                               (2, np.matrix([[0.5],
                                                              [1.6],
                                                              [-1.4]]))]))])

    scalings = OrderedDict([('O', OrderedDict([('intercept', -2.3),
                                               ('slope', 4.5)])),
                            ('Pd', OrderedDict([('intercept', 1.6),
                                                ('slope', 2.5)])),
                            ('Cu', OrderedDict([('intercept', -0.3),
                                                ('slope', -0.5)]))])

    fingerprints_range = {"Cu": np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),
                          "O": np.array([[0.2139617720858539,
                                          2.258090276328769],
                                         [0.0, 1.085656080548734],
                                         [0.0, 0.0]]),
                          "Pd": np.array([[0.0, 1.4751761770313006],
                                          [0.0, 0.28464992134267897],
                                          [0.0, 0.20167521020630502]])}

    # Testing pure-python and fortran versions of Gaussian-neural force call
    for fortran in [False, True]:
        for cores in range(1, 6):
            label = 'call-nonperiodic/%s-%i' % (fortran, cores)
            calc = Amp(descriptor=Gaussian(cutoff=6.5,
                                           Gs=Gs,
                                           fortran=fortran),
                       model=NeuralNetwork(hiddenlayers=hiddenlayers,
                                           weights=weights,
                                           scalings=scalings,
                                           activation='sigmoid',
                                           fprange=fingerprints_range),
                       label=label,
                       dblabel=label,
                       cores=cores)

            predicted_energies = [calc.get_potential_energy(image) for image in
                                  images]

            for image_no in range(len(predicted_energies)):
                print(predicted_energies[image_no])
                print(correct_energies[image_no])
                diff = abs(predicted_energies[image_no] -
                           correct_energies[image_no])
                assert (diff < 10.**(-3.)), \
                    'The predicted energy of image %i is wrong!' % (
                        image_no + 1)

            predicted_forces = [calc.get_forces(image) for image in images]

            for image_no in range(len(predicted_forces)):
                print('predicted forces:')
                print(predicted_forces[image_no])
                print('correct forces:')
                print(np.array(correct_forces[image_no]))
                for index in range(np.shape(predicted_forces[image_no])[0]):
                    for direction in range(
                            np.shape(predicted_forces[image_no])[1]):
                        diff = abs(predicted_forces[image_no][index][
                            direction] -
                            correct_forces[image_no][index][direction])
                        assert (diff < 10.**(-3.)), \
                            'The predicted %i force of atom %i of image %i ' \
                            'is wrong!' % (direction, index, image_no + 1)
Beispiel #27
0
for ang1, ang2 in zip(angs1, angs2):
    nn1_ = nn1.copy()
    nn1_.rotate([0, 0, 1], ang1)

    nn2_ = nn2.copy()
    nn2_.rotate([0, 0, 1], ang2)
    nn2_.positions = nn2_.positions + np.array([3.0, 0.0, 0.0])

    for atom in nn2_:
        nn1_.append(atom)
    images.append(nn1_)

view(images)

# # Fingerprint using Amp.
descriptor = Gaussian()
images = hash_images(images, ordered=True)
descriptor.calculate_fingerprints(images)


def barplot(hash, name, title):
    """Makes a barplot of the fingerprint about the O atom."""
    fp = descriptor.fingerprints[hash][0]
    fig, ax = pyplot.subplots()
    ax.bar(range(len(fp[1])), fp[1])
    ax.set_title(title)
    ax.set_ylim(0., 2.)
    ax.set_xlabel('fingerprint')
    ax.set_ylabel('value')
    fig.savefig(name)
Beispiel #28
0
def test_calcs():
    """Gaussian/Neural non-periodic standard.

    Checks that the answer matches that expected from previous Mathematica
    calculations.
    """

    #: Making the list of non-periodic images
    images = [
        Atoms(
            symbols="PdOPd2",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[0.0, 0.0, 0.0], [0.0, 2.0, 0.0],
                                [0.0, 0.0, 3.0], [1.0, 0.0, 0.0]]),
        ),
        Atoms(
            symbols="PdOPd2",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0],
                                [-1.0, 1.0, 2.0], [1.0, 3.0, 2.0]]),
        ),
        Atoms(
            symbols="PdO",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[2.0, 1.0, -1.0], [1.0, 2.0, 1.0]]),
        ),
        Atoms(
            symbols="Pd2O",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[-2.0, -1.0, -1.0], [1.0, 2.0, 1.0],
                                [3.0, 4.0, 4.0]]),
        ),
        Atoms(
            symbols="Cu",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[0.0, 0.0, 0.0]]),
        ),
    ]

    # Parameters
    hiddenlayers = {"O": (2, ), "Pd": (2, ), "Cu": (2, )}

    Gs = {}
    Gs["G2_etas"] = [0.2]
    Gs["G2_rs_s"] = [0]
    Gs["G4_etas"] = [0.4]
    Gs["G4_zetas"] = [1]
    Gs["G4_gammas"] = [1]
    Gs["cutoff"] = 6.5

    elements = ["O", "Pd", "Cu"]

    G = make_symmetry_functions(elements=elements,
                                type="G2",
                                etas=Gs["G2_etas"])
    G += make_symmetry_functions(
        elements=elements,
        type="G4",
        etas=Gs["G4_etas"],
        zetas=Gs["G4_zetas"],
        gammas=Gs["G4_gammas"],
    )
    amp_images = amp_hash(images)
    descriptor = Gaussian(Gs=G, cutoff=Gs["cutoff"])
    descriptor.calculate_fingerprints(amp_images, calculate_derivatives=True)
    fingerprints_range = calculate_fingerprints_range(descriptor, amp_images)
    np.random.seed(1)
    O_weights_1 = np.random.rand(10, 2)
    O_weights_2 = np.random.rand(1, 3).reshape(-1, 1)
    np.random.seed(2)
    Pd_weights_1 = np.random.rand(10, 2)
    Pd_weights_2 = np.random.rand(1, 3).reshape(-1, 1)
    np.random.seed(3)
    Cu_weights_1 = np.random.rand(10, 2)
    Cu_weights_2 = np.random.rand(1, 3).reshape(-1, 1)

    weights = OrderedDict([
        ("O", OrderedDict([(1, O_weights_1), (2, O_weights_2)])),
        ("Pd", OrderedDict([(1, Pd_weights_1), (2, Pd_weights_2)])),
        ("Cu", OrderedDict([(1, Cu_weights_1), (2, Cu_weights_2)])),
    ])

    scalings = OrderedDict([
        ("O", OrderedDict([("intercept", 0), ("slope", 1)])),
        ("Pd", OrderedDict([("intercept", 0), ("slope", 1)])),
        ("Cu", OrderedDict([("intercept", 0), ("slope", 1)])),
    ])

    calc = Amp(
        descriptor,
        model=NeuralNetwork(
            hiddenlayers=hiddenlayers,
            weights=weights,
            scalings=scalings,
            activation="tanh",
            fprange=fingerprints_range,
            mode="atom-centered",
            fortran=False,
        ),
        logging=False,
    )

    amp_energies = [calc.get_potential_energy(image) for image in images]
    amp_forces = [calc.get_forces(image) for image in images]
    amp_forces = np.concatenate(amp_forces)

    torch_O_weights_1 = torch.FloatTensor(O_weights_1[:-1, :]).t()
    torch_O_bias_1 = torch.FloatTensor(O_weights_1[-1, :])
    torch_O_weights_2 = torch.FloatTensor(O_weights_2[:-1, :]).t()
    torch_O_bias_2 = torch.FloatTensor(O_weights_2[-1, :])
    torch_Pd_weights_1 = torch.FloatTensor(Pd_weights_1[:-1, :]).t()
    torch_Pd_bias_1 = torch.FloatTensor(Pd_weights_1[-1, :])
    torch_Pd_weights_2 = torch.FloatTensor(Pd_weights_2[:-1, :]).t()
    torch_Pd_bias_2 = torch.FloatTensor(Pd_weights_2[-1, :])
    torch_Cu_weights_1 = torch.FloatTensor(Cu_weights_1[:-1, :]).t()
    torch_Cu_bias_1 = torch.FloatTensor(Cu_weights_1[-1, :])
    torch_Cu_weights_2 = torch.FloatTensor(Cu_weights_2[:-1, :]).t()
    torch_Cu_bias_2 = torch.FloatTensor(Cu_weights_2[-1, :])

    device = "cpu"
    dataset = AtomsDataset(
        images,
        descriptor=Gaussian,
        cores=1,
        label="consistency",
        Gs=Gs,
        forcetraining=True,
    )

    fp_length = dataset.fp_length
    batch_size = len(dataset)
    dataloader = DataLoader(dataset,
                            batch_size,
                            collate_fn=collate_amp,
                            shuffle=False)
    model = FullNN(elements, [fp_length, 2, 2], device, forcetraining=True)
    model.state_dict()["elementwise_models.O.model_net.0.weight"].copy_(
        torch_O_weights_1)
    model.state_dict()["elementwise_models.O.model_net.0.bias"].copy_(
        torch_O_bias_1)
    model.state_dict()["elementwise_models.O.model_net.2.weight"].copy_(
        torch_O_weights_2)
    model.state_dict()["elementwise_models.O.model_net.2.bias"].copy_(
        torch_O_bias_2)
    model.state_dict()["elementwise_models.Pd.model_net.0.weight"].copy_(
        torch_Pd_weights_1)
    model.state_dict()["elementwise_models.Pd.model_net.0.bias"].copy_(
        torch_Pd_bias_1)
    model.state_dict()["elementwise_models.Pd.model_net.2.weight"].copy_(
        torch_Pd_weights_2)
    model.state_dict()["elementwise_models.Pd.model_net.2.bias"].copy_(
        torch_Pd_bias_2)
    model.state_dict()["elementwise_models.Cu.model_net.0.weight"].copy_(
        torch_Cu_weights_1)
    model.state_dict()["elementwise_models.Cu.model_net.0.bias"].copy_(
        torch_Cu_bias_1)
    model.state_dict()["elementwise_models.Cu.model_net.2.weight"].copy_(
        torch_Cu_weights_2)
    model.state_dict()["elementwise_models.Cu.model_net.2.bias"].copy_(
        torch_Cu_bias_2)
    import torch.nn as nn
    for name, layer in model.named_modules():
        if isinstance(layer, MLP):
            layer.model_net = nn.Sequential(layer.model_net, Tanh())

    for batch in dataloader:
        x = to_tensor(batch[0], device)
        y = to_tensor(batch[1], device)
        energy_pred, force_pred = model(x)
    for idx, i in enumerate(amp_energies):
        assert round(i, 4) == round(
            energy_pred.tolist()[idx][0],
            4), "The predicted energy of image %i is wrong!" % (idx + 1)
    print("Energy predictions are correct!")
    for idx, sample in enumerate(amp_forces):
        for idx_d, value in enumerate(sample):
            predict = force_pred.tolist()[idx][idx_d]
            assert abs(value - predict) < 0.0001, (
                "The predicted force of image % i, direction % i is wrong! Values: %s vs %s"
                % (idx + 1, idx_d, value, force_pred.tolist()[idx][idx_d]))
    print("Force predictions are correct!")
Beispiel #29
0
def periodic_test():
    """Gaussian/tflowNeural periodic."""
    perform, reason = check_perform()
    if not perform:
        print('Skipping this test because {}'.format(reason))
        return

    from amp.model.tflow import NeuralNetwork
    # Making the list of periodic images
    images = [Atoms(symbols='PdOPd',
                    pbc=np.array([True, False, False], dtype=bool),
                    cell=np.array(
                        [[2.,  0.,  0.],
                         [0.,  2.,  0.],
                         [0.,  0.,  2.]]),
                    positions=np.array(
                        [[0.5,  1., 0.5],
                         [1.,  0.5,  1.],
                         [1.5,  1.5,  1.5]])),
              Atoms(symbols='PdO',
                    pbc=np.array([True, True, False], dtype=bool),
                    cell=np.array(
                        [[2.,  0.,  0.],
                         [0.,  2.,  0.],
                            [0.,  0.,  2.]]),
                    positions=np.array(
                        [[0.5,  1., 0.5],
                         [1.,  0.5,  1.]])),
              Atoms(symbols='Cu',
                    pbc=np.array([True, True, False], dtype=bool),
                    cell=np.array(
                        [[1.8,  0.,  0.],
                         [0.,  1.8,  0.],
                            [0.,  0.,  1.8]]),
                    positions=np.array(
                        [[0.,  0., 0.]]))]

    # Correct energies and forces
    correct_energies = [3.8560954326995978, 1.6120748520627273,
                        0.19433107801410093]
    correct_forces = \
        [[[0.14747720528015523, -3.3010645563584973, 3.3008168318984463],
          [0.03333579762326405, 9.050780376599887, -0.42608278400777605],
            [-0.1808130029034193, -5.7497158202413905, -2.8747340478906698]],
            [[6.5035267996045045 * (10.**(-6.)),
              -6.503526799604495 * (10.**(-6.)),
              0.00010834689201069249],
             [-6.5035267996045045 * (10.**(-6.)),
              6.503526799604495 * (10.**(-6.)),
              -0.00010834689201069249]],
            [[0.0, 0.0, 0.0]]]

    # Parameters
    Gs = {'O': [{'type': 'G2', 'element': 'Pd', 'eta': 0.8},
                {'type': 'G4', 'elements': ['O', 'Pd'], 'eta':0.3, 'gamma':0.6,
                 'zeta':0.5}],
          'Pd': [{'type': 'G2', 'element': 'Pd', 'eta': 0.2},
                 {'type': 'G4', 'elements': ['Pd', 'Pd'],
                  'eta':0.9, 'gamma':0.75, 'zeta':1.5}],
          'Cu': [{'type': 'G2', 'element': 'Cu', 'eta': 0.8},
                 {'type': 'G4', 'elements': ['Cu', 'Cu'], 'eta':0.3,
                          'gamma':0.6, 'zeta':0.5}]}

    hiddenlayers = {'O': (2, 1), 'Pd': (2, 1), 'Cu': (2, 1)}

    weights = OrderedDict([('O', OrderedDict([(1, np.matrix([[-2.0, 6.0],
                                                             [3.0, -3.0],
                                                             [1.5, -0.9]])),
                                              (2, np.matrix([[5.5],
                                                             [3.6],
                                                             [1.4]]))])),
                           ('Pd', OrderedDict([(1, np.matrix([[-1.0, 3.0],
                                                              [2.0, 4.2],
                                                              [1.0, -0.7]])),
                                               (2, np.matrix([[4.0],
                                                              [0.5],
                                                              [3.0]]))])),
                           ('Cu', OrderedDict([(1, np.matrix([[0.0, 1.0],
                                                              [-1.0, -2.0],
                                                              [2.5, -1.9]])),
                                               (2, np.matrix([[0.5],
                                                              [1.6],
                                                              [-1.4]]))]))])

    scalings = OrderedDict([('O', OrderedDict([('intercept', -2.3),
                                               ('slope', 4.5)])),
                            ('Pd', OrderedDict([('intercept', 1.6),
                                                ('slope', 2.5)])),
                            ('Cu', OrderedDict([('intercept', -0.3),
                                                ('slope', -0.5)]))])

    fingerprints_range = {"Cu": np.array([[2.8636310860653253,
                                           2.8636310860653253],
                                          [1.5435994865298275,
                                           1.5435994865298275]]),
                          "O": np.array([[2.9409056366723028,
                                          2.972494902604392],
                                         [1.9522542722823606,
                                          4.0720361595017245]]),
                          "Pd": np.array([[2.4629488092411096,
                                           2.6160138774087125],
                                          [0.27127576524253594,
                                           0.5898312261433813]])}

    # Testing pure-python and fortran versions of Gaussian-neural force call
    for fortran in [False, True]:
        for cores in range(1, 4):
            label = 'call-periodic/%s-%i' % (fortran, cores)
            calc = Amp(descriptor=Gaussian(cutoff=4.,
                                           Gs=Gs,
                                           fortran=fortran),
                       model=NeuralNetwork(hiddenlayers=hiddenlayers,
                                           weights=weights,
                                           scalings=scalings,
                                           activation='tanh',
                                           fprange=fingerprints_range,
                                           unit_type="double"),
                       label=label,
                       dblabel=label,
                       cores=cores)

            predicted_energies = [calc.get_potential_energy(image) for image in
                                  images]

            for image_no in range(len(predicted_energies)):
                print(predicted_energies[image_no])
                print(correct_energies[image_no])
                diff = abs(predicted_energies[image_no] -
                           correct_energies[image_no])
                assert (diff < 10.**(-14.)), \
                    'The predicted energy of image %i is wrong!' % (
                        image_no + 1)

            predicted_forces = [calc.get_forces(image) for image in images]

            for image_no in range(len(predicted_forces)):
                print('predicted forces:')
                print(predicted_forces[image_no])
                print('correct forces:')
                print(np.array(correct_forces[image_no]))
                for index in range(np.shape(predicted_forces[image_no])[0]):
                    for direction in range(
                            np.shape(predicted_forces[image_no])[1]):
                        diff = abs(predicted_forces[image_no][index][
                            direction] -
                            correct_forces[image_no][index][direction])
                        assert (diff < 10.**(-11.)), \
                            'The predicted %i force of atom %i of image' \
                            ' %i is wrong!' % (direction,
                                               index,
                                               image_no + 1)
Beispiel #30
0
from mlutils.neb import accelerate_neb
from amp import Amp
from amp.descriptor.gaussian import Gaussian
from amp.model.neuralnetwork import NeuralNetwork
from amp.model import LossFunction

from ase.calculators.emt import EMT

initial = 'initial.traj'
final = 'final.traj'

Gs = None
n = 5
cutoff = 6.5
amp_calc = Amp(descriptor=Gaussian(cutoff=cutoff, fortran=True, Gs=Gs),
               model=NeuralNetwork(hiddenlayers=(n, n),
                                   fortran=True,
                                   checkpoints=None))

convergence = {'energy_rmse': 0.0001, 'force_rmse': 0.01}
amp_calc.model.lossfunction = LossFunction(convergence=convergence)

dft_calc = EMT()

neb = accelerate_neb(initial=initial,
                     final=final,
                     tolerance=0.05,
                     maxiter=200,
                     fmax=0.05,
                     ifmax=1.,