コード例 #1
0
def test_single_precon_initialisation(setup_images):
    images, _, _ = setup_images
    precon = Exp()
    mep = NEB(images, method='spline', precon=precon)
    mep.get_forces()
    assert len(mep.precon) == len(mep.images)
    assert mep.precon[0].mu == mep.precon[1].mu
コード例 #2
0
ファイル: test_precon_neb.py プロジェクト: YanJordan/ase
def test_neb_optimizers(setup_images, method):
    images, _, _ = setup_images
    mep = NEB(images, method='spline', precon='Exp')
    mep.get_forces()  # needed so residuals are available
    R0 = mep.get_residual()
    opt = NEBOptimizer(mep, method=method)
    opt.run(steps=2)  # take two steps
    R1 = mep.get_residual()
    # check residual has got smaller
    assert R1 < R0
コード例 #3
0
ファイル: test_precon_neb.py プロジェクト: YanJordan/ase
def test_precon_assembly(setup_images):
    images, _, _ = setup_images
    neb = NEB(images, method='spline', precon='Exp')
    neb.get_forces()  # trigger precon assembly

    # check precon for each image is symmetric positive definite
    for image, precon in zip(neb.images, neb.precon):
        assert isinstance(precon, Exp)
        P = precon.asarray()
        N = 3 * len(image)
        assert P.shape == (N, N)
        assert np.abs(P - P.T).max() < 1e-6
        assert np.all(np.linalg.eigvalsh(P)) > 0
コード例 #4
0
ファイル: mlneb.py プロジェクト: hauser-group/mlpot
def oie_ml_neb(neb,
               ml_calc,
               optimizer=FIRE,
               steps=100,
               ml_steps=150,
               t_mep=0.3,
               t_ci=0.01,
               t_ci_on=1.0,
               r_max=None,
               t_mep_ml=None,
               callback_after_ml_neb=None):
    images = neb.images
    # save initial path as the machine learning NEB run is always restarted
    # from the initial path.
    initial_path = [image.get_positions().copy() for image in images]

    if r_max is None:
        # Koistinen et al. suggest half of the length of the initial path for
        # r_max:
        r_max = 0.5 * sum([
            distance(images[i - 1], images[i]) for i in range(1, len(images))
        ])
        print('r_max = %.2f' % r_max)

    # Default value of the threshold for the MEP on the machine learning
    # surface following Koistinen et al.
    t_mep_ml = t_mep_ml or 0.1 * t_ci

    def eval_image(ind):
        training_image = images[ind].copy()
        training_image.set_calculator(
            SinglePointCalculator(
                atoms=training_image,
                forces=images[ind].get_forces(apply_constraint=False),
                energy=images[ind].get_potential_energy()))
        ml_calc.add_data(training_image)

    # Add first and last image as well as any image that does not require
    # recalculation to the training data.
    for i, image in enumerate(images):
        if (not image.calc.calculation_required(image, ['energy', 'forces'])
                or i == 0 or i == len(images) - 1):
            eval_image(i)

    # make a copy of all images and attach a copy of the machine learning
    # calculator.
    ml_images = [image.copy() for image in images]
    [ml_image.set_calculator(copy(ml_calc)) for ml_image in ml_images]
    ml_neb = NEB(
        ml_images,
        k=neb.k,
        climb=neb.climb,
        method=neb.method,
        remove_rotation_and_translation=neb.remove_rotation_and_translation)

    # Step 1: fit the machine learning model to the training images
    print('Step 1')
    ml_calc.fit()
    params = ml_calc.get_params()
    [ml_image.calc.set_params(**params) for ml_image in ml_images]

    def eval_highest_variance():
        # Step A: determine unevaluated image with highest uncertainty
        print('Step A')
        vars = np.zeros(len(images))
        for i, (image, ml_image) in enumerate(zip(images, ml_images)):
            if image.calc.calculation_required(image, ['energy', 'forces']):
                # Calculate variance of the energy prediction:
                vars[i] = ml_image.calc.predict_var(ml_image)[0]
        if np.any(vars < 0.):
            print('Negative variance found. Using absolute values to ' +
                  'determine next image to evalute.')
            vars = np.abs(vars)
        var_max_i = np.argmax(vars)
        # Step B: evaluate image with highest uncertainty and add to training
        # data.
        print('Step B')
        eval_image(var_max_i)

    def step_H():
        # reset machine learning path to initial path
        for ml_image, init_pos in zip(ml_images, initial_path):
            # Reset positions to inital path
            ml_image.set_positions(init_pos.copy())
        ml_neb.climb = False
        converged, ind = _relaxation_phase(ml_neb, ml_calc, ml_steps, t_mep_ml,
                                           t_ci_on, r_max)

        if callback_after_ml_neb is not None:
            callback_after_ml_neb(images, ml_images, ml_calc)

        # Update positions
        [
            image.set_positions(ml_image.get_positions())
            for image, ml_image in zip(images, ml_images)
        ]
        return converged, ind

    eval_highest_variance()

    for step_i in range(steps):
        # Step C:
        print('Step C')

        # Step D: check for convergence:
        print('Step D')
        if not np.any([
                im.calc.calculation_required(im, ['energy', 'forces'])
                for im in images
        ]):
            forces = neb.get_forces().reshape((len(ml_neb.images) - 2, -1, 3))
            # Use imax-1 since forces only contains intermediate images
            if ((forces**2).sum(axis=2).max() < t_mep**2 and
                (forces[neb.imax - 1, :, :]**2).sum(axis=1).max() < t_ci):
                print('Converged. Final number of training points:',
                      len(ml_calc.atoms_train))
                return True

        # Step E: refit the machine learning model:
        print('Step E')
        ml_calc.fit()
        params = ml_calc.get_params()
        [ml_image.calc.set_params(**params) for ml_image in ml_images]

        # Step F:
        print('Step F')
        evaluated_images = [
            im.calc.calculation_required(im, ['energy', 'forces'])
            for im in images
        ]
        tmp_neb = NEB([
            ml_im if eval else im
            for eval, im, ml_im in zip(evaluated_images, images, ml_images)
        ],
                      k=neb.k,
                      climb=neb.climb,
                      method=neb.method,
                      remove_rotation_and_translation=neb.
                      remove_rotation_and_translation)
        approx_forces = tmp_neb.get_forces().reshape(
            (len(ml_neb.images) - 2, -1, 3))
        print('Highest energy image is number %d' % tmp_neb.imax)
        print('Maximum force on a atom (in eV/A) for each image, * indicates '
              'approximation by machine learning model')
        print(' '.join([
            '%.4f*' % f if eval else '%.4f' % f for eval, f in zip(
                evaluated_images[1:-1],
                np.sqrt((approx_forces**2).sum(axis=2).max(axis=1)))
        ]))

        # Step G:
        print('Step G')
        if (approx_forces**2).sum(axis=2).max() < t_mep**2:
            if images[tmp_neb.imax].calc.calculation_required(
                    images[tmp_neb.imax], ['energy', 'forces']):  # Substep I
                print('Step GI')
                eval_image(tmp_neb.imax)
                continue  # Go to C
            elif ((approx_forces[tmp_neb.imax - 1, :, :]**2).sum(axis=1).max()
                  < t_ci**2):  # Substep II
                print('Step GII')
                eval_highest_variance()
                continue  # Go to C
            else:  # Substep III
                print('Step GIII')
                converged, ind = step_H()
                # In case of early stopping evaluate image that caused early
                # stopping
                if not converged and ind is not None:
                    eval_image(ind)
                else:  # Evaluate climbing image
                    eval_image(ml_neb.imax)
        # Step H: Relaxation phase
        else:
            print('Step H')
            converged, ind = step_H()
            # In case of early stopping evaluate image that caused early
            # stopping
            if not converged and ind is not None:
                eval_image(ind)
            else:  # Evaluate image with highest uncertainty
                eval_highest_variance()
    # No convergence reached
    return False