Example #1
0
def test_force_en(kernels, diff_cutoff):
    """Check that the analytical force/en kernel matches finite difference of
    energy kernel."""

    delta = 1e-5
    d1 = 1
    d2 = 2
    cell = 1e7 * np.eye(3)
    np.random.seed(0)

    cutoffs, hyps, hm = generate_diff_hm(kernels, diff_cutoff)
    args = from_mask_to_args(hyps, cutoffs, hm)

    env1 = generate_mb_envs(cutoffs, cell, delta, d1, hm)
    env2 = generate_mb_envs(cutoffs, cell, delta, d2, hm)

    _, _, en_kernel, force_en_kernel, _, _, _ = \
        str_to_kernel_set(kernels, "mc", hm)

    kern_analytical = force_en_kernel(env1[0][0], env2[0][0], d1, *args)

    kern_finite_diff = 0
    if ('manybody' in kernels):
        kernel, _, enm_kernel, efk, _, _, _ = \
            str_to_kernel_set(['manybody'], "mc", hm)

        calc = 0
        for i in range(len(env1[0])):
            calc += enm_kernel(env1[1][i], env2[0][0], *args)
            calc -= enm_kernel(env1[2][i], env2[0][0], *args)

        kern_finite_diff += (calc) / (2 * delta)

    if ('twobody' in kernels or 'threebody' in kernels):
        args23 = from_mask_to_args(hyps, cutoffs, hm)

    if ('twobody' in kernels):
        kernel, _, en2_kernel, efk, _, _, _ = \
            str_to_kernel_set(['2b'], 'mc', hm)
        calc1 = en2_kernel(env1[1][0], env2[0][0], *args23)
        calc2 = en2_kernel(env1[2][0], env2[0][0], *args23)
        diff2b = 4 * (calc1 - calc2) / 2.0 / delta / 2.0

        kern_finite_diff += diff2b

    if ('threebody' in kernels):
        kernel, _, en3_kernel, efk, _, _, _ = \
            str_to_kernel_set(['3b'], 'mc', hm)
        calc1 = en3_kernel(env1[1][0], env2[0][0], *args23)
        calc2 = en3_kernel(env1[2][0], env2[0][0], *args23)
        diff3b = 9 * (calc1 - calc2) / 3.0 / delta / 2.0

        kern_finite_diff += diff3b

    tol = 1e-3
    print("\nforce_en", kernels, kern_finite_diff, kern_analytical)
    assert (isclose(-kern_finite_diff, kern_analytical, rtol=tol))
Example #2
0
def test_force_en_multi_vs_simple():
    """Check that the analytical kernel matches the one implemented
    in mc_simple.py"""

    cutoffs = np.ones(3, dtype=np.float64)
    delta = 1e-8
    env1_1, env1_2, env1_3, env2_1, env2_2, env2_3 = generate_envs(
        cutoffs, delta)

    # set hyperparameters
    d1 = 1
    d2 = 2
    tol = 1e-4

    hyps, hm, cut = generate_hm(1, 1, cutoffs, False)

    # mc_simple
    kernel0, kg0, en_kernel0, force_en_kernel0 = str_to_kernel_set(
        "2+3+mb+mc", False)
    hyps = np.ones(7, dtype=np.float64)
    args0 = (hyps, cutoffs)

    # mc_sephyps
    kernel, kg, en_kernel, force_en_kernel = str_to_kernel_set(
        "2+3+mb+mc", True)
    args1 = from_mask_to_args(hyps, hm, cutoffs)

    funcs = [[kernel0, kg0, en_kernel0, force_en_kernel0],
             [kernel, kg, en_kernel, force_en_kernel]]

    i = 0
    reference = funcs[0][i](env1_1, env2_1, d1, d2, *args0)
    result = funcs[1][i](env1_1, env2_1, d1, d2, *args1)
    assert (np.isclose(reference, result, atol=tol))

    i = 1
    reference = funcs[0][i](env1_1, env2_1, d1, d2, *args0)
    result = funcs[1][i](env1_1, env2_1, d1, d2, *args1)
    assert (np.isclose(reference[0], result[0], atol=tol))
    assert (np.isclose(reference[1], result[1], atol=tol).all())

    i = 2
    reference = funcs[0][i](env1_1, env2_1, *args0)
    result = funcs[1][i](env1_1, env2_1, *args1)
    assert (np.isclose(reference, result, atol=tol))

    i = 3
    reference = funcs[0][i](env1_1, env2_1, d1, *args0)
    result = funcs[1][i](env1_1, env2_1, d1, *args1)
    assert (np.isclose(reference, result, atol=tol))
Example #3
0
def get_kernel_term(kernel_name, hyps_mask, hyps):
    hyps, cutoffs, hyps_mask = Parameters.get_component_mask(hyps_mask,
                                                             kernel_name,
                                                             hyps=hyps)
    kernel, _, ek, efk, _, _, _ = str_to_kernel_set([kernel_name], "mc",
                                                    hyps_mask)
    return (ek, cutoffs, hyps, hyps_mask)
Example #4
0
def test_efs_kern_vec(params, ihyps):
    name, cutoffs, hyps_mask_list, _ = params

    np.random.seed(10)
    test_point = get_tstp()

    size1 = len(flare.gp_algebra._global_training_data[name])
    size2 = len(flare.gp_algebra._global_training_structures[name])

    hyps_mask = hyps_mask_list[ihyps]
    hyps = hyps_mask["hyps"]
    kernel = str_to_kernel_set(hyps_mask["kernels"], "mc", hyps_mask)

    test_point = get_tstp()

    energy_vector, force_array, stress_array = efs_kern_vec(
        name, kernel[5], kernel[4], test_point, hyps, cutoffs, hyps_mask)

    energy_vector_par, force_array_par, stress_array_par = efs_kern_vec(
        name,
        kernel[5],
        kernel[4],
        test_point,
        hyps,
        cutoffs,
        hyps_mask,
        n_cpus=2,
        n_sample=100,
    )

    assert np.equal(energy_vector, energy_vector_par).all()
    assert np.equal(force_array, force_array_par).all()
    assert np.equal(stress_array, stress_array_par).all()
Example #5
0
def test_ky_mat(params, ihyps, ky_mat_ref):

    name, cutoffs, hyps_mask_list, energy_noise = params
    hyps_mask = hyps_mask_list[ihyps]
    hyps = hyps_mask["hyps"]
    kernel = str_to_kernel_set(hyps_mask["kernels"], "mc", hyps_mask)

    # serial implementation
    time0 = time.time()
    ky_mat = get_Ky_mat(hyps, name, kernel[0], kernel[2], kernel[3],
                        energy_noise, cutoffs, hyps_mask)
    print(f"compute ky_mat with multihyps, test {ihyps}, n_cpus=1",
          time.time() - time0)
    assert np.isclose(ky_mat, ky_mat_ref, rtol=1e-3).all(
    ), f"multi hyps implementation is wrongwith case {ihyps}"

    # parallel implementation
    time0 = time.time()
    ky_mat = get_Ky_mat(
        hyps,
        name,
        kernel[0],
        kernel[2],
        kernel[3],
        energy_noise,
        cutoffs,
        hyps_mask,
        n_cpus=2,
        n_sample=20,
    )
    print(f"compute ky_mat with multihyps, test {ihyps}, n_cpus=2",
          time.time() - time0)
    assert np.isclose(ky_mat, ky_mat_ref, rtol=1e-3).all(
    ), f"multi hyps  parallel implementation is wrong with case {ihyps}"
Example #6
0
def test_kernel_vector(params, ihyps):

    name, cutoffs, hyps_mask_list, _ = params

    np.random.seed(10)
    test_point = get_tstp()

    size1 = len(flare.gp_algebra._global_training_data[name])
    size2 = len(flare.gp_algebra._global_training_structures[name])

    hyps_mask = hyps_mask_list[ihyps]
    hyps = hyps_mask["hyps"]
    kernel = str_to_kernel_set(hyps_mask["kernels"], "mc", hyps_mask)

    # test the parallel implementation for multihyps
    vec = get_kernel_vector(name, kernel[0], kernel[3], test_point, 1, hyps,
                            cutoffs, hyps_mask)

    vec_par = get_kernel_vector(
        name,
        kernel[0],
        kernel[3],
        test_point,
        1,
        hyps,
        cutoffs,
        hyps_mask,
        n_cpus=2,
        n_sample=100,
    )

    assert np.isclose(vec, vec_par,
                      rtol=1e-4).all(), "parallel implementation is wrong"
    assert vec.shape[0] == size1 * 3 + size2
Example #7
0
def test_en_kern_vec(params, ihyps):

    name, cutoffs, hyps_mask_list, _ = params
    hyps_mask = hyps_mask_list[ihyps]
    hyps = hyps_mask["hyps"]
    kernel = str_to_kernel_set(hyps_mask["kernels"], "mc", hyps_mask)

    np.random.seed(10)
    test_point = get_tstp()

    size1 = len(flare.gp_algebra._global_training_data[name])
    size2 = len(flare.gp_algebra._global_training_structures[name])

    # test the parallel implementation for multihyps
    vec = en_kern_vec(name, kernel[3], kernel[2], test_point, hyps, cutoffs,
                      hyps_mask)

    vec_par = en_kern_vec(
        name,
        kernel[3],
        kernel[2],
        test_point,
        hyps,
        cutoffs,
        hyps_mask,
        n_cpus=2,
        n_sample=100,
    )

    assert all(np.equal(vec, vec_par)), "parallel implementation is wrong"
    assert vec.shape[0] == size1 * 3 + size2
Example #8
0
def test_hyps_grad(kernels, kernel_type):

    d1 = randint(1, 3)
    d2 = randint(1, 3)
    tol = 1e-4
    cell = 1e7 * np.eye(3)
    delta = 1e-8
    cutoffs = np.ones(3) * 1.2

    np.random.seed(10)
    hyps = generate_hm(kernels)
    env1 = generate_mb_envs(cutoffs, cell, 0, d1, kern_type=kernel_type)[0][0]
    env2 = generate_mb_envs(cutoffs, cell, 0, d2, kern_type=kernel_type)[0][0]

    kernel, kernel_grad, _, _, _, _, _ = str_to_kernel_set(
        kernels, kernel_type)

    grad_test = kernel_grad(env1, env2, d1, d2, hyps, cutoffs)

    original = kernel(env1, env2, d1, d2, hyps, cutoffs)
    assert isclose(grad_test[0], original, rtol=tol)

    for i in range(len(hyps) - 1):
        newhyps = np.copy(hyps)
        newhyps[i] += delta
        hgrad = (kernel(env1, env2, d1, d2, newhyps, cutoffs) -
                 original) / delta
        print("numerical gradients", hgrad)
        print("analytical gradients", grad_test[1][i])
        assert isclose(grad_test[1][i], hgrad, rtol=tol)
Example #9
0
def ky_mat_ref(params):
    name, cutoffs, hyps_mask_list, energy_noise = params

    # get the reference without multi hyps
    hyps_mask = hyps_mask_list[-1]
    hyps = hyps_mask["hyps"]
    kernel = str_to_kernel_set(hyps_mask["kernels"], "mc", hyps_mask)

    time0 = time.time()
    ky_mat0 = get_Ky_mat(hyps, name, kernel[0], kernel[2], kernel[3],
                         energy_noise, cutoffs)
    print("compute ky_mat serial", time.time() - time0)

    # parallel version
    time0 = time.time()
    ky_mat = get_Ky_mat(
        hyps,
        name,
        kernel[0],
        kernel[2],
        kernel[3],
        energy_noise,
        cutoffs,
        n_cpus=2,
        n_sample=5,
    )
    print("compute ky_mat parallel", time.time() - time0)

    assert np.isclose(ky_mat, ky_mat0,
                      rtol=1e-3).all(), "parallel implementation is wrong"

    yield ky_mat0
    del ky_mat0
Example #10
0
    def update_kernel(
        self,
        kernels: List[str],
        component: str = "mc",
        hyps=None,
        cutoffs: dict = None,
        hyps_mask: dict = None,
    ):
        kernel, grad, ek, efk, _, _, _ = str_to_kernel_set(
            kernels, component, hyps_mask)
        self.kernel = kernel
        self.kernel_grad = grad
        self.energy_force_kernel = efk
        self.energy_kernel = ek
        self.kernels = kernel_str_to_array(kernel.__name__)

        if hyps_mask is not None:
            self.hyps_mask = hyps_mask
        # Cutoffs argument will override hyps mask's cutoffs key, if present
        if isinstance(hyps_mask, dict) and cutoffs is None:
            cutoffs = hyps_mask.get("cutoffs", None)

        if cutoffs is not None:
            if self.cutoffs != cutoffs:
                self.adjust_cutoffs(cutoffs,
                                    train=False,
                                    new_hyps_mask=hyps_mask)
            self.cutoffs = cutoffs

        if isinstance(hyps_mask, dict) and hyps is None:
            hyps = hyps_mask.get("hyps", None)

        if hyps is not None:
            self.hyps = hyps
Example #11
0
def test_constraint(kernels, diff_cutoff):
    """Check that the analytical force/en kernel matches finite difference of
    energy kernel."""

    if ('manybody' in kernels):
        return

    d1 = 1
    d2 = 2
    cell = 1e7 * np.eye(3)
    delta = 1e-8

    cutoffs, hyps, hm = generate_diff_hm(kernels,
                                         diff_cutoff=diff_cutoff,
                                         constraint=True)

    _, __, en_kernel, force_en_kernel, _, _, _ = \
        str_to_kernel_set(kernels, "mc", hm)

    args0 = from_mask_to_args(hyps, cutoffs, hm)

    np.random.seed(10)
    env1 = generate_mb_envs(cutoffs, cell, delta, d1, hm)
    env2 = generate_mb_envs(cutoffs, cell, delta, d2, hm)

    kern_finite_diff = 0

    if ('twobody' in kernels):
        _, _, en2_kernel, fek2, _, _, _ = \
            str_to_kernel_set(['twobody'], "mc", hm)
        calc1 = en2_kernel(env1[1][0], env2[0][0], *args0)
        calc2 = en2_kernel(env1[0][0], env2[0][0], *args0)
        kern_finite_diff += 4 * (calc1 - calc2) / 2.0 / delta

    if ('threebody' in kernels):
        _, _, en3_kernel, fek3, _, _, _ = \
            str_to_kernel_set(['threebody'], "mc", hm)
        calc1 = en3_kernel(env1[1][0], env2[0][0], *args0)
        calc2 = en3_kernel(env1[0][0], env2[0][0], *args0)
        kern_finite_diff += 9 * (calc1 - calc2) / 3.0 / delta

    kern_analytical = force_en_kernel(env1[0][0], env2[0][0], d1, *args0)

    tol = 1e-4
    print(kern_finite_diff, kern_analytical)
    assert (isclose(-kern_finite_diff, kern_analytical, rtol=tol))
Example #12
0
def test_ky_mat_update(params, ihyps):

    name, cutoffs, hyps_mask_list, energy_noise = params
    hyps_mask = hyps_mask_list[ihyps]
    hyps = hyps_mask["hyps"]
    kernel = str_to_kernel_set(hyps_mask["kernels"], "mc", hyps_mask)

    # prepare old data set as the starting point
    n = 5
    s = 1
    training_data = flare.gp_algebra._global_training_data[name]
    training_structures = flare.gp_algebra._global_training_structures[name]
    flare.gp_algebra._global_training_data["old"] = training_data[:n]
    flare.gp_algebra._global_training_structures[
        "old"] = training_structures[:s]
    func = [get_Ky_mat, get_ky_mat_update]

    # get the reference
    ky_mat0 = func[0](hyps, name, kernel[0], kernel[2], kernel[3],
                      energy_noise, cutoffs, hyps_mask)
    ky_mat_old = func[0](hyps, "old", kernel[0], kernel[2], kernel[3],
                         energy_noise, cutoffs, hyps_mask)

    # update
    ky_mat = func[1](
        ky_mat_old,
        n,
        hyps,
        name,
        kernel[0],
        kernel[2],
        kernel[3],
        energy_noise,
        cutoffs,
        hyps_mask,
    )
    assert np.isclose(ky_mat, ky_mat0,
                      rtol=1e-10).all(), "update function is wrong"

    # parallel version
    ky_mat = func[1](
        ky_mat_old,
        n,
        hyps,
        name,
        kernel[0],
        kernel[2],
        kernel[3],
        energy_noise,
        cutoffs,
        hyps_mask,
        n_cpus=2,
        n_sample=20,
    )
    assert np.isclose(ky_mat, ky_mat0,
                      rtol=1e-10).all(), "update function is wrong"
Example #13
0
def test_ky_and_hyp(params, ihyps, ky_mat_ref):

    name, cutoffs, hyps_mask_list, _ = params
    hyps_mask = hyps_mask_list[ihyps]
    hyps = hyps_mask['hyps']
    kernel = str_to_kernel_set(hyps_mask['kernels'], 'mc', hyps_mask)

    func = get_ky_and_hyp

    # serial version
    hypmat_ser, ky_mat_ser = func(hyps, name, kernel[1], cutoffs, hyps_mask)
    # parallel version
    hypmat_par, ky_mat_par = func(hyps,
                                  name,
                                  kernel[1],
                                  cutoffs,
                                  hyps_mask,
                                  n_cpus=2)

    ref = ky_mat_ref[:ky_mat_ser.shape[0], :ky_mat_ser.shape[1]]

    assert np.isclose(ky_mat_ser, ref, rtol=1e-5).all(), \
            "serial implementation is not consistent with get_Ky_mat"
    assert np.isclose(ky_mat_par, ref, rtol=1e-5).all(), \
            "parallel implementation is not consistent with get_Ky_mat"
    assert np.isclose(hypmat_ser, hypmat_par, rtol=1e-5).all(), \
            "serial implementation is not consistent with parallel implementation"

    # analytical form
    hyp_mat, ky_mat = func(hyps, name, kernel[1], cutoffs, hyps_mask)
    _, like_grad = get_like_grad_from_mats(ky_mat, hyp_mat, name)

    delta = 0.001
    for i in range(len(hyps)):

        newhyps = np.copy(hyps)

        newhyps[i] += delta
        hyp_mat_p, ky_mat_p = func(newhyps, name, kernel[1], cutoffs,
                                   hyps_mask)
        like_p, _ = \
            get_like_grad_from_mats(ky_mat_p, hyp_mat_p, name)

        newhyps[i] -= 2 * delta
        hyp_mat_m, ky_mat_m = func(newhyps, name, kernel[1], cutoffs,
                                   hyps_mask)
        like_m, _ = \
            get_like_grad_from_mats(ky_mat_m, hyp_mat_m, name)

        # numerical form
        numeric = (like_p - like_m) / 2. / delta
        assert np.isclose(like_grad[i], numeric, rtol=1e-3 ), \
                f"wrong calculation of hyp_mat {i}"
Example #14
0
def test_hyps_grad(kernels, diff_cutoff, constraint):

    delta = 1e-8
    d1 = 1
    d2 = 2
    tol = 1e-4

    np.random.seed(10)
    cutoffs, hyps, hm = generate_diff_hm(kernels,
                                         diff_cutoff,
                                         constraint=constraint)
    args = from_mask_to_args(hyps, cutoffs, hm)
    kernel, kernel_grad, _, _, _, _, _ = str_to_kernel_set(kernels, "mc", hm)

    np.random.seed(0)
    env1 = generate_mb_envs(cutoffs, np.eye(3) * 100, delta, d1)
    env2 = generate_mb_envs(cutoffs, np.eye(3) * 100, delta, d2)
    env1 = env1[0][0]
    env2 = env2[0][0]

    k, grad = kernel_grad(env1, env2, d1, d2, *args)

    original = kernel(env1, env2, d1, d2, *args)

    nhyps = len(hyps)
    if hm['train_noise']:
        nhyps -= 1
    original_hyps = Parameters.get_hyps(hm, hyps=hyps)

    for i in range(nhyps):
        newhyps = np.copy(hyps)
        newhyps[i] += delta
        if ('map' in hm.keys()):
            newid = hm['map'][i]
            hm['original_hyps'] = np.copy(original_hyps)
            hm['original_hyps'][newid] += delta
        newargs = from_mask_to_args(newhyps, cutoffs, hm)

        hgrad = (kernel(env1, env2, d1, d2, *newargs) - original) / delta
        if 'map' in hm:
            print(i, "hgrad", hgrad, grad[hm['map'][i]])
            assert (isclose(grad[hm['map'][i]], hgrad, rtol=tol))
        else:
            print(i, "hgrad", hgrad, grad[i])
            assert (isclose(grad[i], hgrad, rtol=tol))
Example #15
0
    def from_dict(dictionary: dict):
        """
        Create MGP object from dictionary representation.
        """
        new_mgp = MappedGaussianProcess(
            grid_params=dictionary['grid_params'],
            struc_params=dictionary['struc_params'],
            GP=None,
            mean_only=dictionary['mean_only'],
            container_only=True,
            lmp_file_name=dictionary['lmp_file_name'],
            n_cpus=dictionary['n_cpus'],
            n_sample=dictionary['n_sample'],
            autorun=False)

        # Restore kernel_info
        for i in dictionary['bodies']:
            kern_info = f'kernel{i}b_info'
            hyps_mask = dictionary[kern_info][-1]
            if (hyps_mask is None):
                multihyps = False
            else:
                multihyps = True

            kernel_info = dictionary[kern_info]
            kernel_name = kernel_info[0]
            kernel, _, _, efk = str_to_kernel_set(kernel_name, multihyps)
            kernel_info[0] = kernel
            kernel_info[1] = efk
            setattr(new_mgp, kern_info, kernel_info)

        # Fill up the model with the saved coeffs
        for m, map_2 in enumerate(new_mgp.maps_2):
            map_2.mean.__coeffs__ = np.array(dictionary['maps_2'][m])
        for m, map_3 in enumerate(new_mgp.maps_3):
            map_3.mean.__coeffs__ = np.array(dictionary['maps_3'][m])

        # Set GP
        if dictionary.get('GP'):
            new_mgp.GP = GaussianProcess.from_dict(dictionary.get("GP"))

        return new_mgp
Example #16
0
def test_force(kernel_name, kernel_type):
    """Check that the analytical force kernel matches finite difference of
    energy kernel."""

    d1 = 1
    d2 = 2
    tol = 1e-3
    cell = 1e7 * np.eye(3)
    delta = 1e-4
    cutoffs = np.ones(3) * 1.2

    np.random.seed(10)

    hyps = generate_hm(kernel_name)
    kernel, kg, en_kernel, fek = str_to_kernel_set(kernel_name + kernel_type,
                                                   False)
    args = (hyps, cutoffs)

    env1 = generate_mb_envs(cutoffs, cell, delta, d1)
    env2 = generate_mb_envs(cutoffs, cell, delta, d2)

    # check force kernel
    if ('mb' == kernel_name):
        cal = 0
        for i in range(3):
            for j in range(len(env1[0])):
                cal += en_kernel(env1[1][i], env2[1][j], *args)
                cal += en_kernel(env1[2][i], env2[2][j], *args)
                cal -= en_kernel(env1[1][i], env2[2][j], *args)
                cal -= en_kernel(env1[2][i], env2[1][j], *args)
        kern_finite_diff = cal / (4 * delta**2)
    elif ('mb' not in kernel_name):
        calc1 = en_kernel(env1[1][0], env2[1][0], *args)
        calc2 = en_kernel(env1[2][0], env2[2][0], *args)
        calc3 = en_kernel(env1[1][0], env2[2][0], *args)
        calc4 = en_kernel(env1[2][0], env2[1][0], *args)
        kern_finite_diff = (calc1 + calc2 - calc3 - calc4) / (4 * delta**2)
    else:
        return

    kern_analytical = kernel(env1[0][0], env2[0][0], d1, d2, *args)
    assert (isclose(kern_finite_diff, kern_analytical, rtol=tol))
Example #17
0
def get_reference(grid_env, species, parameter, kernel_name, same_hyps):

    env1, env2, hm1, hm2 = parameter[kernel_name]
    env = env1 if same_hyps else env2
    hm = hm1 if same_hyps else hm2

    kernel, kg, en_kernel, force_en_kernel, _, _, _ = str_to_kernel_set(
        hm['kernels'], "mc", None if same_hyps else hm)
    args = from_mask_to_args(hm['hyps'], hm['cutoffs'],
                             None if same_hyps else hm)

    energy_force = np.zeros(3, dtype=np.float)
    # force_force = np.zeros(3, dtype=np.float)
    # force_energy = np.zeros(3, dtype=np.float)
    # energy_energy = np.zeros(3, dtype=np.float)
    for i in range(3):
        energy_force[i] = force_en_kernel(env, grid_env, i + 1, *args)
        # force_energy[i] = force_en_kernel(env, grid_env, i, *args)
        # force_force[i] = kernel(grid_env, env, 0, i, *args)
#     result = funcs[1][i](env1, env2, d1, *args1)
    return energy_force  # , force_energy, force_force, energy_energy
Example #18
0
def test_force_bound_cutoff_compare(kernels, diff_cutoff):
    """Check that the analytical kernel matches the one implemented
    in mc_simple.py"""

    d1 = 1
    d2 = 2
    tol = 1e-4
    cell = 1e7 * np.eye(3)
    delta = 1e-8

    cutoffs, hyps, hm = generate_diff_hm(kernels, diff_cutoff)
    kernel, kg, en_kernel, force_en_kernel, _, _, _ = str_to_kernel_set(
        kernels, "mc", hm
    )
    args = from_mask_to_args(hyps, cutoffs, hm)

    np.random.seed(10)
    env1 = generate_mb_envs(cutoffs, cell, delta, d1, hm)
    env2 = generate_mb_envs(cutoffs, cell, delta, d2, hm)
    env1 = env1[0][0]
    env2 = env2[0][0]

    reference = kernel(env1, env2, d1, d2, *args, quadratic_cutoff_bound)
    result = kernel(env1, env2, d1, d2, *args)
    assert isclose(reference, result, rtol=tol)

    reference = kg(env1, env2, d1, d2, *args, quadratic_cutoff_bound)
    result = kg(env1, env2, d1, d2, *args)
    assert isclose(reference[0], result[0], rtol=tol)
    assert isclose(reference[1], result[1], rtol=tol).all()

    reference = en_kernel(env1, env2, *args, quadratic_cutoff_bound)
    result = en_kernel(env1, env2, *args)
    assert isclose(reference, result, rtol=tol)

    reference = force_en_kernel(env1, env2, d1, *args, quadratic_cutoff_bound)
    result = force_en_kernel(env1, env2, d1, *args)
    assert isclose(reference, result, rtol=tol)
Example #19
0
def test_force_en(kernel_name, kernel_type):
    """Check that the analytical force/en kernel matches finite difference of
    energy kernel."""

    cutoffs = np.ones(3) * 1.2
    delta = 1e-5
    tol = 1e-4
    cell = 1e7 * np.eye(3)

    # set hyperparameters
    d1 = 1

    np.random.seed(10)
    env1 = generate_mb_envs(cutoffs, cell, delta, d1)
    env2 = generate_mb_envs(cutoffs, cell, delta, d1)

    hyps = generate_hm(kernel_name)

    _, __, en_kernel, force_en_kernel = str_to_kernel_set(kernel_name +
                                                          kernel_type)
    print(force_en_kernel.__name__)

    nterm = 0
    for term in ['2', '3', 'mb']:
        if (term in kernel_name):
            nterm += 1

    kern_finite_diff = 0
    if ('mb' in kernel_name):
        _, __, enm_kernel, ___ = str_to_kernel_set('mb' + kernel_type)
        mhyps = hyps[(nterm - 1) * 2:]
        calc = 0
        for i in range(len(env1[0])):
            calc += enm_kernel(env1[2][i], env2[0][0], mhyps, cutoffs)
            calc -= enm_kernel(env1[1][i], env2[0][0], mhyps, cutoffs)
        mb_diff = calc / (2 * delta)
        kern_finite_diff += mb_diff

    if ('2' in kernel_name):
        nbond = 1
        _, __, en2_kernel, ___ = str_to_kernel_set('2' + kernel_type)
        calc1 = en2_kernel(env1[2][0], env2[0][0], hyps[0:nbond * 2], cutoffs)
        calc2 = en2_kernel(env1[1][0], env2[0][0], hyps[0:nbond * 2], cutoffs)
        diff2b = (calc1 - calc2) / 2.0 / 2.0 / delta

        kern_finite_diff += diff2b
    else:
        nbond = 0

    if ('3' in kernel_name):
        _, __, en3_kernel, ___ = str_to_kernel_set('3' + kernel_type)
        calc1 = en3_kernel(env1[2][0], env2[0][0], hyps[nbond * 2:], cutoffs)
        calc2 = en3_kernel(env1[1][0], env2[0][0], hyps[nbond * 2:], cutoffs)
        diff3b = (calc1 - calc2) / 2.0 / 3.0 / delta

        kern_finite_diff += diff3b

    kern_analytical = force_en_kernel(env1[0][0], env2[0][0], d1, hyps,
                                      cutoffs)

    print("\nforce_en", kernel_name, kern_finite_diff, kern_analytical)

    assert (isclose(kern_finite_diff, kern_analytical, rtol=tol))
Example #20
0
def test_force_en(kernels, kernel_type):
    """Check that the analytical force/en kernel matches finite difference of
    energy kernel."""

    cutoffs = np.ones(3) * 1.2
    delta = 1e-5
    tol = 1e-4
    cell = 1e7 * np.eye(3)

    # set hyperparameters
    d1 = 1

    np.random.seed(10)
    env1 = generate_mb_envs(cutoffs, cell, delta, d1, kern_type=kernel_type)
    env2 = generate_mb_envs(cutoffs, cell, delta, d1, kern_type=kernel_type)

    hyps = generate_hm(kernels)

    _, _, en_kernel, force_en_kernel, _, _, _ = str_to_kernel_set(
        kernels, kernel_type)
    print(force_en_kernel.__name__)

    nterm = 0
    for term in ["2", "3", "many"]:
        if term in kernels:
            nterm += 1

    kern_finite_diff = 0
    if "many" in kernels:
        _, _, enm_kernel, _, _, _, _ = str_to_kernel_set(["many"], kernel_type)
        mhyps = hyps[(nterm - 1) * 2:]
        calc = 0
        nat = len(env1[0])
        for i in range(nat):
            calc += enm_kernel(env1[2][i], env2[0][0], mhyps, cutoffs)
            calc -= enm_kernel(env1[1][i], env2[0][0], mhyps, cutoffs)
        mb_diff = calc / (2 * delta)
        kern_finite_diff += mb_diff

    if "2" in kernels:
        ntwobody = 1
        _, _, en2_kernel, _, _, _, _ = str_to_kernel_set("2", kernel_type)
        calc1 = en2_kernel(env1[2][0], env2[0][0], hyps[0:ntwobody * 2],
                           cutoffs)
        calc2 = en2_kernel(env1[1][0], env2[0][0], hyps[0:ntwobody * 2],
                           cutoffs)
        diff2b = 4 * (calc1 - calc2) / 2.0 / 2.0 / delta

        kern_finite_diff += diff2b
    else:
        ntwobody = 0

    if "3" in kernels:
        _, _, en3_kernel, _, _, _, _ = str_to_kernel_set("3", kernel_type)
        calc1 = en3_kernel(env1[2][0], env2[0][0], hyps[ntwobody * 2:],
                           cutoffs)
        calc2 = en3_kernel(env1[1][0], env2[0][0], hyps[ntwobody * 2:],
                           cutoffs)
        diff3b = 9 * (calc1 - calc2) / 2.0 / 3.0 / delta

        kern_finite_diff += diff3b

    kern_analytical = force_en_kernel(env1[0][0], env2[0][0], d1, hyps,
                                      cutoffs)

    print("\nforce_en", kernels, kern_finite_diff, kern_analytical)

    assert isclose(kern_finite_diff, kern_analytical, rtol=tol)
Example #21
0
def test_force(kernels, kernel_type):
    """Check that the analytical force kernel matches finite difference of
    energy kernel."""

    d1 = 1
    d2 = 2
    tol = 1e-3
    cell = 1e7 * np.eye(3)
    delta = 1e-4
    cutoffs = np.ones(3) * 1.2

    np.random.seed(10)

    hyps = generate_hm(kernels)
    kernel, kg, en_kernel, fek, _, _, _ = str_to_kernel_set(
        kernels, kernel_type)
    args = (hyps, cutoffs)

    nterm = 0
    for term in ["2", "3", "many"]:
        if term in kernels:
            nterm += 1

    env1 = generate_mb_envs(cutoffs, cell, delta, d1, kern_type=kernel_type)
    env2 = generate_mb_envs(cutoffs, cell, delta, d2, kern_type=kernel_type)

    # check force kernel
    kern_finite_diff = 0
    if "many" == kernels:
        _, _, enm_kernel, _, _, _, _ = str_to_kernel_set("many", kernel_type)
        mhyps = hyps[(nterm - 1) * 2:]
        print(hyps)
        print(mhyps)
        cal = 0
        for i in range(3):
            for j in range(len(env1[0])):
                cal += enm_kernel(env1[1][i], env2[1][j], mhyps, cutoffs)
                cal += enm_kernel(env1[2][i], env2[2][j], mhyps, cutoffs)
                cal -= enm_kernel(env1[1][i], env2[2][j], mhyps, cutoffs)
                cal -= enm_kernel(env1[2][i], env2[1][j], mhyps, cutoffs)
        kern_finite_diff += cal / (4 * delta**2)
    else:
        # TODO: Establish why 2+3+MB fails (numerical error?)
        return

    if "2" in kernels:
        ntwobody = 1
        _, _, en2_kernel, _, _, _, _ = str_to_kernel_set(["2"], kernel_type)
        print(hyps[0:ntwobody * 2])

        calc1 = en2_kernel(env1[1][0], env2[1][0], hyps[0:ntwobody * 2],
                           cutoffs)
        calc2 = en2_kernel(env1[2][0], env2[2][0], hyps[0:ntwobody * 2],
                           cutoffs)
        calc3 = en2_kernel(env1[1][0], env2[2][0], hyps[0:ntwobody * 2],
                           cutoffs)
        calc4 = en2_kernel(env1[2][0], env2[1][0], hyps[0:ntwobody * 2],
                           cutoffs)
        kern_finite_diff += 4 * (calc1 + calc2 - calc3 - calc4) / (4 *
                                                                   delta**2)
    else:
        ntwobody = 0

    if "3" in kernels:
        _, _, en3_kernel, _, _, _, _ = str_to_kernel_set(["3"], kernel_type)
        print(hyps[ntwobody * 2:])
        calc1 = en3_kernel(env1[1][0], env2[1][0], hyps[ntwobody * 2:],
                           cutoffs)
        calc2 = en3_kernel(env1[2][0], env2[2][0], hyps[ntwobody * 2:],
                           cutoffs)
        calc3 = en3_kernel(env1[1][0], env2[2][0], hyps[ntwobody * 2:],
                           cutoffs)
        calc4 = en3_kernel(env1[2][0], env2[1][0], hyps[ntwobody * 2:],
                           cutoffs)
        kern_finite_diff += 9 * (calc1 + calc2 - calc3 - calc4) / (4 *
                                                                   delta**2)

    kern_analytical = kernel(env1[0][0], env2[0][0], d1, d2, *args)

    assert isclose(kern_finite_diff, kern_analytical, rtol=tol)
Example #22
0
def test_check_sig_scale(kernels, diff_cutoff):
    """Check whether the grouping is properly assign
    with four environments

    * env1 and env2 are computed from two structures with four
    atoms each. There are two species 1, 2
    * env1_t and env2_t are derived from the same structure, but
      species 2 atoms are removed.
    * only the sigma of 1-1 are non-zero
    * so using env1 and env1_t should produce the same value
    * if the separate group of hyperparameter is properly
      applied, the result should be 2**2 times of
      the reference
    """

    d1 = 1
    d2 = 2
    tol = 1e-4
    scale = 2

    cutoffs, hyps0, hm = generate_diff_hm(kernels, diff_cutoff)

    delta = 1e-8
    env1, env1_t = generate_mb_twin_envs(cutoffs, np.eye(3) * 100, delta, d1, hm)
    env2, env2_t = generate_mb_twin_envs(cutoffs, np.eye(3) * 100, delta, d2, hm)
    env1 = env1[0][0]
    env2 = env2[0][0]
    env1_t = env1_t[0][0]
    env2_t = env2_t[0][0]

    # make the second sigma zero
    hyps1 = np.copy(hyps0)
    hyps0[0::4] = 0  # 1e-8
    hyps1[0::4] = 0  # 1e-8
    hyps1[1::4] *= scale

    kernel, kg, en_kernel, force_en_kernel, _, _, _ = str_to_kernel_set(
        kernels, "mc", hm
    )

    args0 = from_mask_to_args(hyps0, cutoffs, hm)
    args1 = from_mask_to_args(hyps1, cutoffs, hm)

    reference = en_kernel(env1, env2, *args0)
    result = en_kernel(env1_t, env2_t, *args1)
    print(en_kernel.__name__, result, reference)
    if reference != 0:
        assert isclose(result / reference, scale ** 2, rtol=tol)

    reference = force_en_kernel(env1, env2, d1, *args0)
    result = force_en_kernel(env1_t, env2_t, d1, *args1)
    print(force_en_kernel.__name__, result, reference)
    if reference != 0:
        assert isclose(result / reference, scale ** 2, rtol=tol)

    reference = kernel(env1, env2, d1, d2, *args0)
    result = kernel(env1_t, env2_t, d1, d2, *args1)
    print(kernel.__name__, result, reference)
    if reference != 0:
        assert isclose(result / reference, scale ** 2, rtol=tol)

    reference = kg(env1, env2, d1, d2, *args0)
    result = kg(env1_t, env2_t, d1, d2, *args1)
    print(kg.__name__, result, reference)
    if reference[0] != 0:
        assert isclose(result[0] / reference[0], scale ** 2, rtol=tol)
    for idx in range(reference[1].shape[0]):
        # check sig0
        if reference[1][idx] != 0 and (idx % 4) == 0:
            assert isclose(result[1][idx] / reference[1][idx], scale, rtol=tol)
        # check the rest, but skip sig 1
        elif reference[1][idx] != 0 and (idx % 4) != 1:
            assert isclose(result[1][idx] / reference[1][idx], scale ** 2, rtol=tol)
Example #23
0
    def __init__(self, kernel: Callable = None,
                 kernel_grad: Callable = None,
                 hyps: 'ndarray' = None,
                 cutoffs: 'ndarray' = None,
                 hyp_labels: List = None,
                 opt_algorithm: str = 'L-BFGS-B',
                 maxiter: int = 10, parallel: bool = False,
                 per_atom_par: bool = True,
                 n_cpus: int = 1, n_sample: int = 100,
                 output: Output = None,
                 multihyps: bool = False, hyps_mask: dict = None,
                 kernel_name="2+3_mc", name="default_gp", **kwargs):
        """Initialize GP parameters and training data."""

        # load arguments into attributes

        self.hyp_labels = hyp_labels
        self.cutoffs = np.array(cutoffs, dtype=np.float64)
        self.opt_algorithm = opt_algorithm

        if hyps is None:
            # If no hyperparameters are passed in, assume 2 hyps for each
            # cutoff, plus one noise hyperparameter, and use a guess value
            self.hyps = np.array([0.1] * (1 + 2 * len(cutoffs)))
        else:
            self.hyps = np.array(hyps, dtype=np.float64)

        self.output = output
        self.per_atom_par = per_atom_par
        self.maxiter = maxiter
        self.n_cpus = n_cpus
        self.n_sample = n_sample
        self.parallel = parallel

        if 'nsample' in kwargs:
            DeprecationWarning("nsample is being replaced with n_sample")
            self.n_sample = kwargs.get('nsample')
        if 'par' in kwargs:
            DeprecationWarning("par is being replaced with parallel")
            self.parallel = kwargs.get('par')
        if 'no_cpus' in kwargs:
            DeprecationWarning("no_cpus is being replaced with n_cpu")
            self.n_cpus = kwargs.get('no_cpus')

        # TO DO, clean up all the other kernel arguments
        if kernel is None:
            kernel, grad, ek, efk = str_to_kernel_set(kernel_name, multihyps)
            self.kernel = kernel
            self.kernel_grad = grad
            self.energy_force_kernel = efk
            self.energy_kernel = ek
            self.kernel_name = kernel.__name__
        else:
            DeprecationWarning("kernel, kernel_grad, energy_force_kernel "
                               "and energy_kernel will be replaced by kernel_name")
            self.kernel_name = kernel.__name__
            self.kernel = kernel
            self.kernel_grad = kernel_grad
            self.energy_force_kernel = kwargs.get('energy_force_kernel')
            self.energy_kernel = kwargs.get('energy_kernel')

        self.name = name

        # parallelization
        if self.parallel:
            if n_cpus is None:
                self.n_cpus = mp.cpu_count()
            else:
                self.n_cpus = n_cpus
        else:
            self.n_cpus = 1

        self.training_data = []  # Atomic environments
        self.training_labels = []  # Forces acting on central atoms of at. envs.
        self.training_labels_np = np.empty(0, )

        # Parameters set during training
        self.ky_mat = None
        self.l_mat = None
        self.alpha = None
        self.ky_mat_inv = None
        self.likelihood = None
        self.likelihood_gradient = None
        self.bounds = None

        self.hyps_mask = hyps_mask
        self.multihyps = multihyps
        self.check_instantiation()
Example #24
0
    def __init__(
        self,
        kernels: List[str] = None,
        component: str = "mc",
        hyps: "ndarray" = None,
        cutoffs: dict = None,
        hyps_mask: dict = None,
        hyp_labels: List = None,
        opt_algorithm: str = "L-BFGS-B",
        maxiter: int = 10,
        parallel: bool = False,
        per_atom_par: bool = True,
        n_cpus: int = 1,
        n_sample: int = 100,
        output: Output = None,
        name="default_gp",
        energy_noise: float = 0.01,
        **kwargs,
    ):
        """Initialize GP parameters and training data."""

        # load arguments into attributes
        self.name = name

        self.output = output
        self.opt_algorithm = opt_algorithm

        self.per_atom_par = per_atom_par
        self.maxiter = maxiter

        # set up parallelization
        self.n_cpus = n_cpus
        self.n_sample = n_sample
        self.parallel = parallel

        self.component = component
        self.kernels = (["twobody", "threebody"] if kernels is None else
                        kernel_str_to_array("".join(kernels)))
        self.cutoffs = {} if cutoffs is None else cutoffs
        self.hyp_labels = hyp_labels
        self.hyps_mask = {} if hyps_mask is None else hyps_mask
        self.hyps = hyps

        GaussianProcess.backward_arguments(kwargs, self.__dict__)
        GaussianProcess.backward_attributes(self.__dict__)

        # ------------  "computed" attributes ------------

        if self.output is None:
            self.logger_name = self.name + "GaussianProcess"
            set_logger(self.logger_name,
                       stream=True,
                       fileout_name=None,
                       verbose="info")
        else:
            self.logger_name = self.output.basename + "log"

        if self.hyps is None:
            # If no hyperparameters are passed in, assume 2 hyps for each
            # kernel, plus one noise hyperparameter, and use a guess value
            self.hyps = np.array([0.1] * (1 + 2 * len(self.kernels)))
        else:
            self.hyps = np.array(self.hyps, dtype=np.float64)

        kernel, grad, ek, efk, efs_e, efs_f, efs_self = str_to_kernel_set(
            self.kernels, self.component, self.hyps_mask)
        self.kernel = kernel
        self.kernel_grad = grad
        self.energy_force_kernel = efk
        self.energy_kernel = ek
        self.efs_energy_kernel = efs_e
        self.efs_force_kernel = efs_f
        self.efs_self_kernel = efs_self
        self.kernels = kernel_str_to_array(kernel.__name__)

        # parallelization
        if self.parallel:
            if self.n_cpus is None:
                self.n_cpus = mp.cpu_count()
            else:
                self.n_cpus = n_cpus
        else:
            self.n_cpus = 1

        self.training_data = []  # Atomic environments
        self.training_labels = []  # Forces acting on central atoms
        self.training_labels_np = np.empty(0, )
        self.n_envs_prev = len(self.training_data)

        # Attributes to accomodate energy labels:
        self.training_structures = []  # Environments of each structure
        self.energy_labels = []  # Energies of training structures
        self.energy_labels_np = np.empty(0, )
        self.energy_noise = energy_noise
        self.all_labels = np.empty(0, )

        # Parameters set during training
        self.ky_mat = None
        self.force_block = None
        self.energy_block = None
        self.force_energy_block = None
        self.l_mat = None
        self.l_mat_inv = None
        self.alpha = None
        self.ky_mat_inv = None
        self.likelihood = None
        self.likelihood_gradient = None
        self.bounds = None

        # File used for reading / writing model if model is large
        self.ky_mat_file = None
        # Flag if too-big warning has been printed for this model
        self.large_warning = False

        if self.logger_name is None:
            if self.output is None:
                self.logger_name = self.name + "GaussianProcess"
                set_logger(self.logger_name,
                           stream=True,
                           fileout_name=None,
                           verbose="info")
            else:
                self.logger_name = self.output.basename + "log"
        logger = logging.getLogger(self.logger_name)

        if self.cutoffs == {}:
            # If no cutoffs are passed in, assume 7 A for 2 body, 3.5 for
            # 3-body.
            cutoffs = {}
            if "twobody" in self.kernels:
                cutoffs["twobody"] = 7
            if "threebody" in self.kernels:
                cutoffs["threebody"] = 3.5
            if "manybody" in self.kernels:
                raise ValueError("No cutoff was set for the manybody kernel."
                                 "A default value will not be set by default.")

            self.cutoffs = cutoffs
            logger.warning("Warning: No cutoffs were set for your GP."
                           "Default values have been assigned but you "
                           "should think carefully about which are "
                           "appropriate for your use case.")

        self.check_instantiation()
Example #25
0
def test_force(kernels, diff_cutoff):
    """Check that the analytical force kernel matches finite difference of
    energy kernel."""

    d1 = 1
    d2 = 2
    tol = 1e-3
    cell = 1e7 * np.eye(3)
    delta = 1e-4
    cutoffs = np.ones(3) * 1.2

    np.random.seed(10)

    cutoffs, hyps, hm = generate_diff_hm(kernels, diff_cutoff)
    kernel, kg, en_kernel, fek, _, _, _ = str_to_kernel_set(kernels, "mc", hm)

    nterm = 0
    for term in ["twobody", "threebody", "manybody"]:
        if term in kernels:
            nterm += 1

    np.random.seed(10)
    env1 = generate_mb_envs(cutoffs, cell, delta, d1, hm)
    env2 = generate_mb_envs(cutoffs, cell, delta, d2, hm)

    # check force kernel
    kern_finite_diff = 0
    if "manybody" in kernels and len(kernels) == 1:
        _, _, enm_kernel, _, _, _, _ = str_to_kernel_set(["manybody"], "mc", hm)
        mhyps, mcutoffs, mhyps_mask = Parameters.get_component_mask(
            hm, "manybody", hyps=hyps
        )
        margs = from_mask_to_args(mhyps, mcutoffs, mhyps_mask)
        cal = 0
        for i in range(3):
            for j in range(len(env1[0])):
                cal += enm_kernel(env1[1][i], env2[1][j], *margs)
                cal += enm_kernel(env1[2][i], env2[2][j], *margs)
                cal -= enm_kernel(env1[1][i], env2[2][j], *margs)
                cal -= enm_kernel(env1[2][i], env2[1][j], *margs)
        kern_finite_diff += cal / (4 * delta ** 2)
    elif "manybody" in kernels:
        # TODO: Establish why 2+3+MB fails (numerical error?)
        return

    if "twobody" in kernels:
        ntwobody = 1
        _, _, en2_kernel, _, _, _, _ = str_to_kernel_set(["twobody"], "mc", hm)
        bhyps, bcutoffs, bhyps_mask = Parameters.get_component_mask(
            hm, "twobody", hyps=hyps
        )
        args2 = from_mask_to_args(bhyps, bcutoffs, bhyps_mask)

        calc1 = en2_kernel(env1[1][0], env2[1][0], *args2)
        calc2 = en2_kernel(env1[2][0], env2[2][0], *args2)
        calc3 = en2_kernel(env1[1][0], env2[2][0], *args2)
        calc4 = en2_kernel(env1[2][0], env2[1][0], *args2)
        kern_finite_diff += 4 * (calc1 + calc2 - calc3 - calc4) / (4 * delta ** 2)
    else:
        ntwobody = 0

    if "threebody" in kernels:
        _, _, en3_kernel, _, _, _, _ = str_to_kernel_set(["threebody"], "mc", hm)

        thyps, tcutoffs, thyps_mask = Parameters.get_component_mask(
            hm, "threebody", hyps=hyps
        )
        args3 = from_mask_to_args(thyps, tcutoffs, thyps_mask)

        calc1 = en3_kernel(env1[1][0], env2[1][0], *args3)
        calc2 = en3_kernel(env1[2][0], env2[2][0], *args3)
        calc3 = en3_kernel(env1[1][0], env2[2][0], *args3)
        calc4 = en3_kernel(env1[2][0], env2[1][0], *args3)
        kern_finite_diff += 9 * (calc1 + calc2 - calc3 - calc4) / (4 * delta ** 2)

    args = from_mask_to_args(hyps, cutoffs, hm)
    kern_analytical = kernel(env1[0][0], env2[0][0], d1, d2, *args)

    assert isclose(kern_finite_diff, kern_analytical, rtol=tol)
Example #26
0
def test_force_en_multi_vs_simple(kernels, multi_cutoff):
    """Check that the analytical kernel matches the one implemented
    in mc_simple.py"""

    d1 = 1
    d2 = 2
    tol = 1e-4
    cell = 1e7 * np.eye(3)

    # set hyperparameters
    cutoffs, hyps1, hyps2, hm1, hm2 = generate_same_hm(kernels, multi_cutoff)

    delta = 1e-8
    env1 = generate_mb_envs(cutoffs, cell, delta, d1)
    env2 = generate_mb_envs(cutoffs, cell, delta, d2)
    env1 = env1[0][0]
    env2 = env2[0][0]

    # mc_simple
    kernel0, kg0, en_kernel0, force_en_kernel0, _, _, _ = str_to_kernel_set(
        kernels, "mc", None
    )
    args0 = from_mask_to_args(hyps1, cutoffs)

    # mc_sephyps
    # args1 and args 2 use 1 and 2 groups of hyper-parameters
    # if (diff_cutoff), 1 or 2 group of cutoffs
    # but same value as in args0
    kernel, kg, en_kernel, force_en_kernel, _, _, _ = str_to_kernel_set(
        kernels, "mc", hm2
    )
    args1 = from_mask_to_args(hyps1, cutoffs, hm1)
    args2 = from_mask_to_args(hyps2, cutoffs, hm2)

    funcs = [
        [kernel0, kg0, en_kernel0, force_en_kernel0],
        [kernel, kg, en_kernel, force_en_kernel],
    ]

    # compare whether mc_sephyps and mc_simple
    # yield the same values
    i = 2
    reference = funcs[0][i](env1, env2, *args0)
    result = funcs[1][i](env1, env2, *args1)
    print(kernels, i, reference, result)
    assert isclose(reference, result, rtol=tol)
    result = funcs[1][i](env1, env2, *args2)
    print(kernels, i, reference, result)
    assert isclose(reference, result, rtol=tol)

    i = 3
    reference = funcs[0][i](env1, env2, d1, *args0)
    result = funcs[1][i](env1, env2, d1, *args1)
    print(kernels, i, reference, result)
    assert isclose(reference, result, rtol=tol)
    result = funcs[1][i](env1, env2, d1, *args2)
    print(kernels, i, reference, result)
    assert isclose(reference, result, rtol=tol)

    i = 0
    reference = funcs[0][i](env1, env2, d1, d2, *args0)
    result = funcs[1][i](env1, env2, d1, d2, *args1)
    assert isclose(reference, result, rtol=tol)
    print(kernels, i, reference, result)
    result = funcs[1][i](env1, env2, d1, d2, *args2)
    assert isclose(reference, result, rtol=tol)
    print(kernels, i, reference, result)

    i = 1
    reference = funcs[0][i](env1, env2, d1, d2, *args0)
    result = funcs[1][i](env1, env2, d1, d2, *args1)
    print(kernels, i, reference, result)
    assert isclose(reference[0], result[0], rtol=tol)
    assert isclose(reference[1], result[1], rtol=tol).all()

    result = funcs[1][i](env1, env2, d1, d2, *args2)
    print(kernels, i, reference, result)
    assert isclose(reference[0], result[0], rtol=tol)
    joint_grad = np.zeros(len(result[1]) // 2)
    for i in range(joint_grad.shape[0]):
        joint_grad[i] = result[1][i * 2] + result[1][i * 2 + 1]
    assert isclose(reference[1], joint_grad, rtol=tol).all()