示例#1
0
def test_force_en(kernels, diff_cutoff):
    """Check that the analytical force/en kernel matches finite difference of
    energy kernel."""

    delta = 1e-5
    d1 = 1
    d2 = 2
    cell = 1e7 * np.eye(3)
    np.random.seed(0)

    cutoffs, hyps, hm = generate_diff_hm(kernels, diff_cutoff)
    args = from_mask_to_args(hyps, cutoffs, hm)

    env1 = generate_mb_envs(cutoffs, cell, delta, d1, hm)
    env2 = generate_mb_envs(cutoffs, cell, delta, d2, hm)

    _, _, en_kernel, force_en_kernel, _, _, _ = \
        str_to_kernel_set(kernels, "mc", hm)

    kern_analytical = force_en_kernel(env1[0][0], env2[0][0], d1, *args)

    kern_finite_diff = 0
    if ('manybody' in kernels):
        kernel, _, enm_kernel, efk, _, _, _ = \
            str_to_kernel_set(['manybody'], "mc", hm)

        calc = 0
        for i in range(len(env1[0])):
            calc += enm_kernel(env1[1][i], env2[0][0], *args)
            calc -= enm_kernel(env1[2][i], env2[0][0], *args)

        kern_finite_diff += (calc) / (2 * delta)

    if ('twobody' in kernels or 'threebody' in kernels):
        args23 = from_mask_to_args(hyps, cutoffs, hm)

    if ('twobody' in kernels):
        kernel, _, en2_kernel, efk, _, _, _ = \
            str_to_kernel_set(['2b'], 'mc', hm)
        calc1 = en2_kernel(env1[1][0], env2[0][0], *args23)
        calc2 = en2_kernel(env1[2][0], env2[0][0], *args23)
        diff2b = 4 * (calc1 - calc2) / 2.0 / delta / 2.0

        kern_finite_diff += diff2b

    if ('threebody' in kernels):
        kernel, _, en3_kernel, efk, _, _, _ = \
            str_to_kernel_set(['3b'], 'mc', hm)
        calc1 = en3_kernel(env1[1][0], env2[0][0], *args23)
        calc2 = en3_kernel(env1[2][0], env2[0][0], *args23)
        diff3b = 9 * (calc1 - calc2) / 3.0 / delta / 2.0

        kern_finite_diff += diff3b

    tol = 1e-3
    print("\nforce_en", kernels, kern_finite_diff, kern_analytical)
    assert (isclose(-kern_finite_diff, kern_analytical, rtol=tol))
示例#2
0
def test_hyps_grad(kernel_name, nbond, ntriplet, constraint):

    np.random.seed(0)

    delta = 1e-8
    cutoffs = np.array([1, 1])
    env1_1, env1_2, env1_3, env2_1, env2_2, env2_3 = generate_envs(
        cutoffs, delta)

    hyps, hm, cut = generate_hm(nbond, ntriplet, cutoffs, constraint)
    args = from_mask_to_args(hyps, hm, cutoffs)
    d1 = 1
    d2 = 2

    kernel = stk[kernel_name]
    kernel_grad = stk[kernel_name + "_grad"]

    # compute analytical values
    k, grad = kernel_grad(env1_1, env2_1, d1, d2, *args)

    print(kernel_name)
    print("grad", grad)
    print("hyps", hyps)

    tol = 1e-4
    original = kernel(env1_1, env2_1, d1, d2, *args)

    nhyps = len(hyps) - 1
    if ('map' in hm.keys()):
        if (hm['map'][-1] != (len(hm['original']) - 1)):
            nhyps = len(hyps)
        print(hm['map'])
        original_hyps = np.copy(hm['original'])

    for i in range(nhyps):
        newhyps = np.copy(hyps)
        newhyps[i] += delta
        if ('map' in hm.keys()):
            newid = hm['map'][i]
            hm['original'] = np.copy(original_hyps)
            hm['original'][newid] += delta
        newargs = from_mask_to_args(newhyps, hm, cutoffs)

        hgrad = (kernel(env1_1, env2_1, d1, d2, *newargs) - original) / delta
        if ('map' in hm.keys()):
            print(i, "hgrad", hgrad, grad[hm['map'][i]])
            assert (np.isclose(grad[hm['map'][i]], hgrad, atol=tol))
        else:
            print(i, "hgrad", hgrad, grad[i])
            assert (np.isclose(grad[i], hgrad, atol=tol))
示例#3
0
def efs_energy_vector_unit(name, s, e, x, efs_energy_kernel, hyps, cutoffs, hyps_mask):

    training_structures = _global_training_structures[name]

    size = e - s
    args = from_mask_to_args(hyps, cutoffs, hyps_mask)

    k_ee = np.zeros((1, size))
    k_fe = np.zeros((3, size))
    k_se = np.zeros((6, size))

    for m_index in range(size):
        training_structure = training_structures[m_index + s]

        ee_curr = 0
        fe_curr = np.zeros(3)
        se_curr = np.zeros(6)

        for environment in training_structure:
            ee, fe, se = efs_energy_kernel(x, environment, *args)
            ee_curr += ee
            fe_curr += fe
            se_curr += se

        k_ee[:, m_index] = ee_curr
        k_fe[:, m_index] = fe_curr
        k_se[:, m_index] = se_curr

    return k_ee, k_fe, k_se
示例#4
0
def get_kernel_vector_unit(name, s, e, x, d_1, kernel, hyps, cutoffs,
                           hyps_mask):
    """
    Compute kernel vector, comparing input environment to all environments
    in the GP's training set.
    :param training_data: Set of atomic environments to compare against
    :param kernel:
    :param x: data point to compare against kernel matrix
    :type x: AtomicEnvironment
    :param d_1: Cartesian component of force vector to get (1=x,2=y,3=z)
    :type d_1: int
    :param hyps: list of hyper-parameters
    :param cutoffs: The cutoff values used for the atomic environments
    :type cutoffs: list of 2 float numbers
    :param hyps_mask: dictionary used for multi-group hyperparmeters

    :return: kernel vector
    :rtype: np.ndarray
    """

    size = (e - s)
    ds = [1, 2, 3]

    args = from_mask_to_args(hyps, hyps_mask, cutoffs)

    k_v = np.zeros(size * 3, )

    for m_index in range(size):
        x_2 = _global_training_data[name][m_index + s]
        for d_2 in ds:
            k_v[m_index * 3 + d_2 - 1] = kernel(x, x_2, d_1, d_2, *args)

    return k_v
示例#5
0
def test_force(kernel_name, nbond, ntriplet, constraint):
    """Check that the analytical force kernel matches finite difference of
    energy kernel."""

    # create env 1
    delta = 1e-5
    cutoffs = np.array([1, 1])
    env1_1, env1_2, env1_3, env2_1, env2_2, env2_3 = generate_envs(
        cutoffs, delta)

    # set hyperparameters
    hyps, hm, cut = generate_hm(nbond, ntriplet, cutoffs, constraint)
    args0 = from_mask_to_args(hyps, hm, cutoffs)
    d1 = 1
    d2 = 2

    kernel = stk[kernel_name]
    if bool('two' in kernel_name) != bool('three' in kernel_name):
        en_kernel = stk[kernel_name + "_en"]
    else:
        en_kernel = stk['two_plus_three_mc_en']

    # check force kernel
    calc1 = en_kernel(env1_2, env2_2, *args0)
    calc2 = en_kernel(env1_3, env2_3, *args0)
    calc3 = en_kernel(env1_2, env2_3, *args0)
    calc4 = en_kernel(env1_3, env2_2, *args0)

    kern_finite_diff = (calc1 + calc2 - calc3 - calc4) / (4 * delta**2)
    kern_analytical = kernel(env1_1, env2_1, d1, d2, *args0)
    tol = 1e-4
    assert (np.isclose(kern_finite_diff, kern_analytical, atol=tol))
示例#6
0
def energy_force_vector_unit(name,
                             s,
                             e,
                             x,
                             kernel,
                             hyps,
                             cutoffs=None,
                             hyps_mask=None,
                             d_1=None):
    """
    Gets part of the energy/force vector.
    """

    training_data = _global_training_data[name]

    ds = [1, 2, 3]
    size = (e - s) * 3
    k_v = np.zeros(size, )

    args = from_mask_to_args(hyps, cutoffs, hyps_mask)

    for m_index in range(size):
        x_2 = training_data[int(math.floor(m_index / 3)) + s]
        d_2 = ds[m_index % 3]
        k_v[m_index] = kernel(x_2, x, d_2, *args)

    return k_v
示例#7
0
def get_force_energy_block_pack(hyps: np.ndarray, name: str, s1: int, e1: int,
                                s2: int, e2: int, same: bool, kernel, cutoffs,
                                hyps_mask):
    # initialize matrices
    training_data = _global_training_data[name]
    training_structures = _global_training_structures[name]
    size1 = (e1 - s1) * 3
    size2 = e2 - s2
    force_energy_block = np.zeros([size1, size2])

    ds = [1, 2, 3]

    # calculate elements
    args = from_mask_to_args(hyps, cutoffs, hyps_mask)

    for m_index in range(size1):
        environment_1 = training_data[int(math.floor(m_index / 3)) + s1]
        d_1 = ds[m_index % 3]

        for n_index in range(size2):
            structure = training_structures[n_index + s2]

            # Loop over environments in the training structure.
            kern_curr = 0
            for environment_2 in structure:
                kern_curr += kernel(environment_1, environment_2, d_1, *args)

            # store kernel value
            force_energy_block[m_index, n_index] = kern_curr

    return force_energy_block
示例#8
0
def energy_energy_vector_unit(
    name, s, e, x, kernel, hyps, cutoffs=None, hyps_mask=None, d_1=None
):
    """
    Gets part of the energy/energy vector.
    """

    training_structures = _global_training_structures[name]

    size = e - s
    energy_energy_unit = np.zeros(
        size,
    )

    args = from_mask_to_args(hyps, cutoffs, hyps_mask)

    for m_index in range(size):
        structure = training_structures[m_index + s]
        kern_curr = 0
        for environment in structure:
            kern_curr += kernel(x, environment, *args)

        energy_energy_unit[m_index] = kern_curr

    return energy_energy_unit
示例#9
0
def test_hyps_grad(kernels, diff_cutoff, constraint):

    delta = 1e-8
    d1 = 1
    d2 = 2
    tol = 1e-4

    np.random.seed(10)
    cutoffs, hyps, hm = generate_diff_hm(kernels,
                                         diff_cutoff,
                                         constraint=constraint)
    args = from_mask_to_args(hyps, cutoffs, hm)
    kernel, kernel_grad, _, _, _, _, _ = str_to_kernel_set(kernels, "mc", hm)

    np.random.seed(0)
    env1 = generate_mb_envs(cutoffs, np.eye(3) * 100, delta, d1)
    env2 = generate_mb_envs(cutoffs, np.eye(3) * 100, delta, d2)
    env1 = env1[0][0]
    env2 = env2[0][0]

    k, grad = kernel_grad(env1, env2, d1, d2, *args)

    original = kernel(env1, env2, d1, d2, *args)

    nhyps = len(hyps)
    if hm['train_noise']:
        nhyps -= 1
    original_hyps = Parameters.get_hyps(hm, hyps=hyps)

    for i in range(nhyps):
        newhyps = np.copy(hyps)
        newhyps[i] += delta
        if ('map' in hm.keys()):
            newid = hm['map'][i]
            hm['original_hyps'] = np.copy(original_hyps)
            hm['original_hyps'][newid] += delta
        newargs = from_mask_to_args(newhyps, cutoffs, hm)

        hgrad = (kernel(env1, env2, d1, d2, *newargs) - original) / delta
        if 'map' in hm:
            print(i, "hgrad", hgrad, grad[hm['map'][i]])
            assert (isclose(grad[hm['map'][i]], hgrad, rtol=tol))
        else:
            print(i, "hgrad", hgrad, grad[i])
            assert (isclose(grad[i], hgrad, rtol=tol))
示例#10
0
def get_force_block_pack(
    hyps: np.ndarray,
    name: str,
    s1: int,
    e1: int,
    s2: int,
    e2: int,
    same: bool,
    kernel,
    cutoffs,
    hyps_mask,
):
    """Compute covariance matrix element between set1 and set2
    :param hyps: list of hyper-parameters
    :param name: name of the gp instance.
    :param same: whether the row and column are the same
    :param kernel: function object of the kernel
    :param cutoffs: The cutoff values used for the atomic environments
    :type cutoffs: list of 2 float numbers
    :param hyps_mask: dictionary used for multi-group hyperparmeters

    :return: covariance matrix
    """

    # initialize matrices
    training_data = _global_training_data[name]
    size1 = (e1 - s1) * 3
    size2 = (e2 - s2) * 3
    force_block = np.zeros([size1, size2])

    ds = [1, 2, 3]

    # calculate elements
    args = from_mask_to_args(hyps, cutoffs, hyps_mask)

    for m_index in range(size1):
        x_1 = training_data[int(math.floor(m_index / 3)) + s1]
        d_1 = ds[m_index % 3]
        if same:
            lowbound = m_index
        else:
            lowbound = 0
        for n_index in range(lowbound, size2):
            x_2 = training_data[int(math.floor(n_index / 3)) + s2]
            d_2 = ds[n_index % 3]
            kern_curr = kernel(x_1, x_2, d_1, d_2, *args)
            # store kernel value
            force_block[m_index, n_index] = kern_curr
            if same:
                force_block[n_index, m_index] = kern_curr

    return force_block
示例#11
0
文件: gp.py 项目: owaisahmad18/flare
    def predict(self, x_t: AtomicEnvironment, d: int) -> [float, float]:
        """
        Predict a force component of the central atom of a local environment.

        Args:
            x_t (AtomicEnvironment): Input local environment.
            d (int): Force component to be predicted (1 is x, 2 is y, and
                3 is z).

        Return:
            (float, float): Mean and epistemic variance of the prediction.
        """

        if d not in [1, 2, 3]:
            raise ValueError("d should be 1, 2, or 3")

        # Kernel vector allows for evaluation of atomic environments.
        if self.parallel and not self.per_atom_par:
            n_cpus = self.n_cpus
        else:
            n_cpus = 1

        self.sync_data()

        k_v = get_kernel_vector(
            self.name,
            self.kernel,
            self.energy_force_kernel,
            x_t,
            d,
            self.hyps,
            cutoffs=self.cutoffs,
            hyps_mask=self.hyps_mask,
            n_cpus=n_cpus,
            n_sample=self.n_sample,
        )

        # Guarantee that alpha is up to date with training set
        self.check_L_alpha()

        # get predictive mean
        pred_mean = np.matmul(k_v, self.alpha)

        # get predictive variance without cholesky (possibly faster)
        # pass args to kernel based on if mult. hyperparameters in use
        args = from_mask_to_args(self.hyps, self.cutoffs, self.hyps_mask)

        self_kern = self.kernel(x_t, x_t, d, d, *args)
        pred_var = self_kern - np.matmul(np.matmul(k_v, self.ky_mat_inv), k_v)

        return pred_mean, pred_var
示例#12
0
    def predict(self, atom_env):

        assert Parameters.compare_dict(
            self.hyps_mask, atom_env.cutoffs_mask
        ), "GP.hyps_mask is not the same as atom_env.cutoffs_mask"

        f_spcs = np.zeros(3)
        vir_spcs = np.zeros(6)
        v_spcs = 0
        e_spcs = 0
        kern = 0

        if len(atom_env.bond_array_2) == 0:
            return f_spcs, vir_spcs, kern, v_spcs, e_spcs

        en_kernel, cutoffs, hyps, hyps_mask = self.kernel_info

        args = from_mask_to_args(hyps, cutoffs, hyps_mask)

        if self.var_map == "pca":
            kern = en_kernel(atom_env, atom_env, *args)

        spcs, comp_r, comp_xyz = self.get_arrays(atom_env)

        # predict for each species
        rebuild_spc = []
        new_bounds = []
        for i, spc in enumerate(spcs):
            lengths = np.array(comp_r[i])
            xyzs = np.array(comp_xyz[i])
            map_ind = self.find_map_index(spc)
            try:
                f, vir, v, e = self.maps[map_ind].predict(lengths, xyzs)
            except ValueError as err_msg:
                rebuild_spc.append(err_msg.args[0])
                new_bounds.append(err_msg.args[1])

            if len(rebuild_spc) > 0:
                raise ValueError(
                    rebuild_spc,
                    new_bounds,
                    f"The {self.kernel_name} map needs re-constructing.",
                )

            f_spcs += f
            vir_spcs += vir
            v_spcs += v
            e_spcs += e

        return f_spcs, vir_spcs, kern, v_spcs, e_spcs
示例#13
0
def test_force_en_multi_vs_simple():
    """Check that the analytical kernel matches the one implemented
    in mc_simple.py"""

    cutoffs = np.ones(3, dtype=np.float64)
    delta = 1e-8
    env1_1, env1_2, env1_3, env2_1, env2_2, env2_3 = generate_envs(
        cutoffs, delta)

    # set hyperparameters
    d1 = 1
    d2 = 2
    tol = 1e-4

    hyps, hm, cut = generate_hm(1, 1, cutoffs, False)

    # mc_simple
    kernel0, kg0, en_kernel0, force_en_kernel0 = str_to_kernel_set(
        "2+3+mb+mc", False)
    hyps = np.ones(7, dtype=np.float64)
    args0 = (hyps, cutoffs)

    # mc_sephyps
    kernel, kg, en_kernel, force_en_kernel = str_to_kernel_set(
        "2+3+mb+mc", True)
    args1 = from_mask_to_args(hyps, hm, cutoffs)

    funcs = [[kernel0, kg0, en_kernel0, force_en_kernel0],
             [kernel, kg, en_kernel, force_en_kernel]]

    i = 0
    reference = funcs[0][i](env1_1, env2_1, d1, d2, *args0)
    result = funcs[1][i](env1_1, env2_1, d1, d2, *args1)
    assert (np.isclose(reference, result, atol=tol))

    i = 1
    reference = funcs[0][i](env1_1, env2_1, d1, d2, *args0)
    result = funcs[1][i](env1_1, env2_1, d1, d2, *args1)
    assert (np.isclose(reference[0], result[0], atol=tol))
    assert (np.isclose(reference[1], result[1], atol=tol).all())

    i = 2
    reference = funcs[0][i](env1_1, env2_1, *args0)
    result = funcs[1][i](env1_1, env2_1, *args1)
    assert (np.isclose(reference, result, atol=tol))

    i = 3
    reference = funcs[0][i](env1_1, env2_1, d1, *args0)
    result = funcs[1][i](env1_1, env2_1, d1, *args1)
    assert (np.isclose(reference, result, atol=tol))
示例#14
0
def get_ky_mat_update_serial(\
        ky_mat_old, hyps: np.ndarray, name,
        kernel, cutoffs=None, hyps_mask=None):
    '''
    used for update_L_alpha. if add 10 atoms to the training
    set, the K matrix will add 10x3 columns and 10x3 rows

    :param ky_mat_old: old covariance matrix
    :param hyps: list of hyper-parameters
    :param training_data: Set of atomic environments to compare against
    :param kernel:
    :param cutoffs: The cutoff values used for the atomic environments
    :type cutoffs: list of 2 float numbers
    :param hyps_mask: dictionary used for multi-group hyperparmeters

    '''

    training_data = _global_training_data[name]

    n = ky_mat_old.shape[0]
    size = len(training_data)
    size3 = size * 3
    m = size - n // 3  # number of new data added
    ky_mat = np.zeros((size3, size3))
    ky_mat[:n, :n] = ky_mat_old

    ds = [1, 2, 3]

    args = from_mask_to_args(hyps, hyps_mask, cutoffs)

    # calculate elements
    for m_index in range(size3):
        x_1 = training_data[int(math.floor(m_index / 3))]
        d_1 = ds[m_index % 3]
        low = int(np.max([m_index, n]))
        for n_index in range(low, size3):
            x_2 = training_data[int(math.floor(n_index / 3))]
            d_2 = ds[n_index % 3]
            # calculate kernel
            kern_curr = kernel(x_1, x_2, d_1, d_2, *args)
            ky_mat[m_index, n_index] = kern_curr
            ky_mat[n_index, m_index] = kern_curr

    # matrix manipulation
    sigma_n, _, __ = obtain_noise_len(hyps, hyps_mask)
    ky_mat[n:, n:] += sigma_n**2 * np.eye(size3 - n)
    return ky_mat
示例#15
0
文件: gp.py 项目: mailhexu/flare
    def predict_efs(self, x_t: AtomicEnvironment):
        """Predict the local energy, forces, and partial stresses of an
        atomic environment and their predictive variances."""

        # Kernel vector allows for evaluation of atomic environments.
        if self.parallel and not self.per_atom_par:
            n_cpus = self.n_cpus
        else:
            n_cpus = 1

        _global_training_data[self.name] = self.training_data
        _global_training_labels[self.name] = self.training_labels_np

        energy_vector, force_array, stress_array = efs_kern_vec(
            self.name,
            self.efs_force_kernel,
            self.efs_energy_kernel,
            x_t,
            self.hyps,
            cutoffs=self.cutoffs,
            hyps_mask=self.hyps_mask,
            n_cpus=n_cpus,
            n_sample=self.n_sample,
        )

        # Check that alpha is up to date with training set.
        self.check_L_alpha()

        # Compute mean predictions.
        en_pred = np.matmul(energy_vector, self.alpha)
        force_pred = np.matmul(force_array, self.alpha)
        stress_pred = np.matmul(stress_array, self.alpha)

        # Compute uncertainties.
        args = from_mask_to_args(self.hyps, self.cutoffs, self.hyps_mask)
        self_en, self_force, self_stress = self.efs_self_kernel(x_t, *args)

        en_var = self_en - np.matmul(np.matmul(energy_vector, self.ky_mat_inv),
                                     energy_vector)
        force_var = self_force - np.diag(
            np.matmul(np.matmul(force_array, self.ky_mat_inv),
                      force_array.transpose()))
        stress_var = self_stress - np.diag(
            np.matmul(np.matmul(stress_array, self.ky_mat_inv),
                      stress_array.transpose()))

        return en_pred, force_pred, stress_pred, en_var, force_var, stress_var
示例#16
0
def test_constraint(kernels, diff_cutoff):
    """Check that the analytical force/en kernel matches finite difference of
    energy kernel."""

    if ('manybody' in kernels):
        return

    d1 = 1
    d2 = 2
    cell = 1e7 * np.eye(3)
    delta = 1e-8

    cutoffs, hyps, hm = generate_diff_hm(kernels,
                                         diff_cutoff=diff_cutoff,
                                         constraint=True)

    _, __, en_kernel, force_en_kernel, _, _, _ = \
        str_to_kernel_set(kernels, "mc", hm)

    args0 = from_mask_to_args(hyps, cutoffs, hm)

    np.random.seed(10)
    env1 = generate_mb_envs(cutoffs, cell, delta, d1, hm)
    env2 = generate_mb_envs(cutoffs, cell, delta, d2, hm)

    kern_finite_diff = 0

    if ('twobody' in kernels):
        _, _, en2_kernel, fek2, _, _, _ = \
            str_to_kernel_set(['twobody'], "mc", hm)
        calc1 = en2_kernel(env1[1][0], env2[0][0], *args0)
        calc2 = en2_kernel(env1[0][0], env2[0][0], *args0)
        kern_finite_diff += 4 * (calc1 - calc2) / 2.0 / delta

    if ('threebody' in kernels):
        _, _, en3_kernel, fek3, _, _, _ = \
            str_to_kernel_set(['threebody'], "mc", hm)
        calc1 = en3_kernel(env1[1][0], env2[0][0], *args0)
        calc2 = en3_kernel(env1[0][0], env2[0][0], *args0)
        kern_finite_diff += 9 * (calc1 - calc2) / 3.0 / delta

    kern_analytical = force_en_kernel(env1[0][0], env2[0][0], d1, *args0)

    tol = 1e-4
    print(kern_finite_diff, kern_analytical)
    assert (isclose(-kern_finite_diff, kern_analytical, rtol=tol))
示例#17
0
    def predict_multicomponent(self, body, atom_env, kernel_info, spcs_list,
                               mappings, mean_only):
        '''
        Add up results from `predict_component` to get the total contribution
        of all species
        '''

        kernel, en_force_kernel, cutoffs, hyps, hyps_mask = kernel_info

        args = from_mask_to_args(hyps, hyps_mask, cutoffs)

        kern = np.zeros(3)
        for d in range(3):
            kern[d] = \
                kernel(atom_env, atom_env, d+1, d+1, *args)

        if (body == 2):
            spcs, comp_r, comp_xyz = get_bonds(atom_env.ctype, atom_env.etypes,
                                               atom_env.bond_array_2)
            set_spcs = []
            for spc in spcs:
                set_spcs += [set(spc)]
            spcs = set_spcs
        elif (body == 3):
            spcs, comp_r, comp_xyz = \
                get_triplets_en(atom_env.ctype, atom_env.etypes,
                        atom_env.bond_array_3, atom_env.cross_bond_inds,
                        atom_env.cross_bond_dists, atom_env.triplet_counts)

        # predict for each species
        f_spcs = 0
        vir_spcs = 0
        v_spcs = 0
        e_spcs = 0
        for i, spc in enumerate(spcs):
            lengths = np.array(comp_r[i])
            xyzs = np.array(comp_xyz[i])
            map_ind = spcs_list.index(spc)
            f, vir, v, e = self.predict_component(lengths, xyzs,
                                                  mappings[map_ind], mean_only)
            f_spcs += f
            vir_spcs += vir
            v_spcs += v
            e_spcs += e

        return f_spcs, vir_spcs, kern, v_spcs, e_spcs
示例#18
0
def get_energy_block_pack(
    hyps: np.ndarray,
    name: str,
    s1: int,
    e1: int,
    s2: int,
    e2: int,
    same: bool,
    kernel,
    cutoffs,
    hyps_mask,
):

    # initialize matrices
    training_structures = _global_training_structures[name]
    size1 = e1 - s1
    size2 = e2 - s2
    energy_block = np.zeros([size1, size2])

    # calculate elements
    args = from_mask_to_args(hyps, cutoffs, hyps_mask)

    for m_index in range(size1):
        struc_1 = training_structures[m_index + s1]
        if same:
            lowbound = m_index
        else:
            lowbound = 0

        for n_index in range(lowbound, size2):
            struc_2 = training_structures[n_index + s2]

            # Loop over environments in both structures to compute the
            # energy/energy kernel.
            kern_curr = 0
            for environment_1 in struc_1:
                for environment_2 in struc_2:
                    kern_curr += kernel(environment_1, environment_2, *args)

            # Store kernel value.
            energy_block[m_index, n_index] = kern_curr
            if same:
                energy_block[n_index, m_index] = kern_curr

    return energy_block
示例#19
0
def get_hyps_for_kern(hyps, cutoffs, hyps_mask, c2, etypes2):
    """
    Args:
        data: a single env of a list of envs
    """

    args = from_mask_to_args(hyps, cutoffs, hyps_mask)

    if len(args) == 2:
        hyps, cutoffs = args
        r_cut = cutoffs[1]

    else:
        (
            cutoff_2b,
            cutoff_3b,
            cutoff_mb,
            nspec,
            spec_mask,
            nbond,
            bond_mask,
            ntriplet,
            triplet_mask,
            ncut3b,
            cut3b_mask,
            nmb,
            mb_mask,
            sig2,
            ls2,
            sig3,
            ls3,
            sigm,
            lsm,
        ) = args

        bc1 = spec_mask[c2]
        bc2 = spec_mask[etypes2[0]]
        bc3 = spec_mask[etypes2[1]]
        ttype = triplet_mask[nspec * nspec * bc1 + nspec * bc2 + bc3]
        ls = ls3[ttype]
        sig = sig3[ttype]
        r_cut = cutoff_3b
        hyps = [sig, ls]

    return hyps, r_cut
示例#20
0
文件: gp.py 项目: owaisahmad18/flare
    def predict_local_energy_and_var(self, x_t: AtomicEnvironment):
        """Predict the local energy of a local environment and its
        uncertainty.

        Args:
            x_t (AtomicEnvironment): Input local environment.

        Return:
            (float, float): Mean and predictive variance predicted by the GP.
        """

        if self.parallel and not self.per_atom_par:
            n_cpus = self.n_cpus
        else:
            n_cpus = 1

        self.sync_data()

        # get kernel vector
        k_v = en_kern_vec(
            self.name,
            self.energy_force_kernel,
            self.energy_kernel,
            x_t,
            self.hyps,
            cutoffs=self.cutoffs,
            hyps_mask=self.hyps_mask,
            n_cpus=n_cpus,
            n_sample=self.n_sample,
        )

        # get predictive mean
        pred_mean = np.matmul(k_v, self.alpha)

        # get predictive variance
        v_vec = solve_triangular(self.l_mat, k_v, lower=True)
        args = from_mask_to_args(self.hyps, self.cutoffs, self.hyps_mask)

        self_kern = self.energy_kernel(x_t, x_t, *args)

        pred_var = self_kern - np.matmul(v_vec, v_vec)

        return pred_mean, pred_var
示例#21
0
def force_energy_vector_unit(name, s, e, x, kernel, hyps, cutoffs, hyps_mask,
                             d_1):
    """
    Gets part of the force/energy vector.
    """

    size = e - s
    args = from_mask_to_args(hyps, cutoffs, hyps_mask)
    force_energy_unit = np.zeros(size, )

    for m_index in range(size):
        training_structure = _global_training_structures[name][m_index + s]
        kern_curr = 0
        for environment in training_structure:
            kern_curr += kernel(x, environment, d_1, *args)

        force_energy_unit[m_index] = kern_curr

    return force_energy_unit
示例#22
0
def force_force_vector_unit(name, s, e, x, kernel, hyps, cutoffs, hyps_mask,
                            d_1):
    """
    Gets part of the force/force vector.
    """

    size = e - s
    ds = [1, 2, 3]

    args = from_mask_to_args(hyps, cutoffs, hyps_mask)

    k_v = np.zeros(size * 3)

    for m_index in range(size):
        x_2 = _global_training_data[name][m_index + s]
        for d_2 in ds:
            k_v[m_index * 3 + d_2 - 1] = kernel(x, x_2, d_1, d_2, *args)

    return k_v
示例#23
0
def get_reference(grid_env, species, parameter, kernel_name, same_hyps):

    env1, env2, hm1, hm2 = parameter[kernel_name]
    env = env1 if same_hyps else env2
    hm = hm1 if same_hyps else hm2

    kernel, kg, en_kernel, force_en_kernel, _, _, _ = str_to_kernel_set(
        hm['kernels'], "mc", None if same_hyps else hm)
    args = from_mask_to_args(hm['hyps'], hm['cutoffs'],
                             None if same_hyps else hm)

    energy_force = np.zeros(3, dtype=np.float)
    # force_force = np.zeros(3, dtype=np.float)
    # force_energy = np.zeros(3, dtype=np.float)
    # energy_energy = np.zeros(3, dtype=np.float)
    for i in range(3):
        energy_force[i] = force_en_kernel(env, grid_env, i + 1, *args)
        # force_energy[i] = force_en_kernel(env, grid_env, i, *args)
        # force_force[i] = kernel(grid_env, env, 0, i, *args)
#     result = funcs[1][i](env1, env2, d1, *args1)
    return energy_force  # , force_energy, force_force, energy_energy
示例#24
0
def efs_force_vector_unit(name, s, e, x, efs_force_kernel, hyps, cutoffs,
                          hyps_mask):
    size = e - s

    k_ef = np.zeros((1, size * 3))
    k_ff = np.zeros((3, size * 3))
    k_sf = np.zeros((6, size * 3))

    args = from_mask_to_args(hyps, cutoffs, hyps_mask)

    for m_index in range(size):
        x_2 = _global_training_data[name][m_index + s]
        ef, ff, sf = efs_force_kernel(x, x_2, *args)

        ind1 = m_index * 3
        ind2 = (m_index + 1) * 3

        k_ef[:, ind1:ind2] = ef
        k_ff[:, ind1:ind2] = ff
        k_sf[:, ind1:ind2] = sf

    return k_ef, k_ff, k_sf
示例#25
0
def test_force_bound_cutoff_compare(kernels, diff_cutoff):
    """Check that the analytical kernel matches the one implemented
    in mc_simple.py"""

    d1 = 1
    d2 = 2
    tol = 1e-4
    cell = 1e7 * np.eye(3)
    delta = 1e-8

    cutoffs, hyps, hm = generate_diff_hm(kernels, diff_cutoff)
    kernel, kg, en_kernel, force_en_kernel, _, _, _ = str_to_kernel_set(
        kernels, "mc", hm
    )
    args = from_mask_to_args(hyps, cutoffs, hm)

    np.random.seed(10)
    env1 = generate_mb_envs(cutoffs, cell, delta, d1, hm)
    env2 = generate_mb_envs(cutoffs, cell, delta, d2, hm)
    env1 = env1[0][0]
    env2 = env2[0][0]

    reference = kernel(env1, env2, d1, d2, *args, quadratic_cutoff_bound)
    result = kernel(env1, env2, d1, d2, *args)
    assert isclose(reference, result, rtol=tol)

    reference = kg(env1, env2, d1, d2, *args, quadratic_cutoff_bound)
    result = kg(env1, env2, d1, d2, *args)
    assert isclose(reference[0], result[0], rtol=tol)
    assert isclose(reference[1], result[1], rtol=tol).all()

    reference = en_kernel(env1, env2, *args, quadratic_cutoff_bound)
    result = en_kernel(env1, env2, *args)
    assert isclose(reference, result, rtol=tol)

    reference = force_en_kernel(env1, env2, d1, *args, quadratic_cutoff_bound)
    result = force_en_kernel(env1, env2, d1, *args)
    assert isclose(reference, result, rtol=tol)
示例#26
0
def en_kern_vec_unit(name,
                     s,
                     e,
                     x,
                     kernel,
                     hyps,
                     cutoffs=None,
                     hyps_mask=None):
    """
    Compute energy kernel vector, comparing input environment to all environments
    in the GP's training set.
    :param training_data: Set of atomic environments to compare against
    :param kernel:
    :param x: data point to compare against kernel matrix
    :type x: AtomicEnvironment
    :param hyps: list of hyper-parameters
    :param cutoffs: The cutoff values used for the atomic environments
    :type cutoffs: list of 2 float numbers
    :param hyps_mask: dictionary used for multi-group hyperparmeters

    :return: kernel vector
    :rtype: np.ndarray
    """

    training_data = _global_training_data[name]

    ds = [1, 2, 3]
    size = (e - s) * 3
    k_v = np.zeros(size, )

    args = from_mask_to_args(hyps, hyps_mask, cutoffs)

    for m_index in range(size):
        x_2 = training_data[int(math.floor(m_index / 3)) + s]
        d_2 = ds[m_index % 3]
        k_v[m_index] = kernel(x_2, x, d_2, *args)

    return k_v
示例#27
0
def test_force_en(kernel_name, nbond, ntriplet, constraint):
    """Check that the analytical force/en kernel matches finite difference of
    energy kernel."""

    cutoffs = np.array([1, 1])
    delta = 1e-8
    env1_1, env1_2, env1_3, env2_1, env2_2, env2_3 = generate_envs(
        cutoffs, delta)

    # set hyperparameters
    d1 = 1

    hyps, hm, cut = generate_hm(nbond, ntriplet, cutoffs, constraint)
    args0 = from_mask_to_args(hyps, hm, cutoffs)

    force_en_kernel = stk[kernel_name + "_force_en"]
    en_kernel = stk[kernel_name + "_en"]
    if bool('two' in kernel_name) != bool('three' in kernel_name):

        # check force kernel
        calc1 = en_kernel(env1_2, env2_1, *args0)
        calc2 = en_kernel(env1_1, env2_1, *args0)

        kern_finite_diff = (calc1 - calc2) / delta
        if ('two' in kernel_name):
            kern_finite_diff /= 2
        else:
            kern_finite_diff /= 3
    else:
        en2_kernel = stk['two_body_mc_en']
        en3_kernel = stk['three_body_mc_en']
        # check force kernel
        hm2 = deepcopy(hm)
        hm3 = deepcopy(hm)
        if ('map' in hm):
            hm2['original'] = np.hstack(
                [hm2['original'][0:nbond * 2], hm2['original'][-1]])
            hm2['map'] = np.array([1, 3, 4])
            hm3['original'] = hm3['original'][nbond * 2:]
            hm3['map'] = np.array([1, 3, 4])
            nbond = 1

        hm2['ntriplet'] = 0
        hm3['nbond'] = 0

        args2 = from_mask_to_args(hyps[0:nbond * 2], hm2, cutoffs)

        calc1 = en2_kernel(env1_2, env2_1, *args2)
        calc2 = en2_kernel(env1_1, env2_1, *args2)
        kern_finite_diff = (calc1 - calc2) / 2.0 / delta

        args3 = from_mask_to_args(hyps[nbond * 2:-1], hm3, cutoffs)

        calc1 = en3_kernel(env1_2, env2_1, *args3)
        calc2 = en3_kernel(env1_1, env2_1, *args3)
        kern_finite_diff += (calc1 - calc2) / 3.0 / delta

    kern_analytical = force_en_kernel(env1_1, env2_1, d1, *args0)

    tol = 1e-4
    assert (np.isclose(-kern_finite_diff, kern_analytical, atol=tol))
示例#28
0
def test_force(kernels, diff_cutoff):
    """Check that the analytical force kernel matches finite difference of
    energy kernel."""

    d1 = 1
    d2 = 2
    tol = 1e-3
    cell = 1e7 * np.eye(3)
    delta = 1e-4
    cutoffs = np.ones(3) * 1.2

    np.random.seed(10)

    cutoffs, hyps, hm = generate_diff_hm(kernels, diff_cutoff)
    kernel, kg, en_kernel, fek, _, _, _ = str_to_kernel_set(kernels, "mc", hm)

    nterm = 0
    for term in ["twobody", "threebody", "manybody"]:
        if term in kernels:
            nterm += 1

    np.random.seed(10)
    env1 = generate_mb_envs(cutoffs, cell, delta, d1, hm)
    env2 = generate_mb_envs(cutoffs, cell, delta, d2, hm)

    # check force kernel
    kern_finite_diff = 0
    if "manybody" in kernels and len(kernels) == 1:
        _, _, enm_kernel, _, _, _, _ = str_to_kernel_set(["manybody"], "mc", hm)
        mhyps, mcutoffs, mhyps_mask = Parameters.get_component_mask(
            hm, "manybody", hyps=hyps
        )
        margs = from_mask_to_args(mhyps, mcutoffs, mhyps_mask)
        cal = 0
        for i in range(3):
            for j in range(len(env1[0])):
                cal += enm_kernel(env1[1][i], env2[1][j], *margs)
                cal += enm_kernel(env1[2][i], env2[2][j], *margs)
                cal -= enm_kernel(env1[1][i], env2[2][j], *margs)
                cal -= enm_kernel(env1[2][i], env2[1][j], *margs)
        kern_finite_diff += cal / (4 * delta ** 2)
    elif "manybody" in kernels:
        # TODO: Establish why 2+3+MB fails (numerical error?)
        return

    if "twobody" in kernels:
        ntwobody = 1
        _, _, en2_kernel, _, _, _, _ = str_to_kernel_set(["twobody"], "mc", hm)
        bhyps, bcutoffs, bhyps_mask = Parameters.get_component_mask(
            hm, "twobody", hyps=hyps
        )
        args2 = from_mask_to_args(bhyps, bcutoffs, bhyps_mask)

        calc1 = en2_kernel(env1[1][0], env2[1][0], *args2)
        calc2 = en2_kernel(env1[2][0], env2[2][0], *args2)
        calc3 = en2_kernel(env1[1][0], env2[2][0], *args2)
        calc4 = en2_kernel(env1[2][0], env2[1][0], *args2)
        kern_finite_diff += 4 * (calc1 + calc2 - calc3 - calc4) / (4 * delta ** 2)
    else:
        ntwobody = 0

    if "threebody" in kernels:
        _, _, en3_kernel, _, _, _, _ = str_to_kernel_set(["threebody"], "mc", hm)

        thyps, tcutoffs, thyps_mask = Parameters.get_component_mask(
            hm, "threebody", hyps=hyps
        )
        args3 = from_mask_to_args(thyps, tcutoffs, thyps_mask)

        calc1 = en3_kernel(env1[1][0], env2[1][0], *args3)
        calc2 = en3_kernel(env1[2][0], env2[2][0], *args3)
        calc3 = en3_kernel(env1[1][0], env2[2][0], *args3)
        calc4 = en3_kernel(env1[2][0], env2[1][0], *args3)
        kern_finite_diff += 9 * (calc1 + calc2 - calc3 - calc4) / (4 * delta ** 2)

    args = from_mask_to_args(hyps, cutoffs, hm)
    kern_analytical = kernel(env1[0][0], env2[0][0], d1, d2, *args)

    assert isclose(kern_finite_diff, kern_analytical, rtol=tol)
示例#29
0
文件: rbcm.py 项目: mailhexu/flare
    def predict(self, x_t: AtomicEnvironment, d: int) -> [float, float]:
        """
        Predict a force component of the central atom of a local environment.

        Performs prediction over each expert and combines results.

        Weights beta_i for experts computed on a per-expert basis following Cao and
        Fleet 2014: https://arxiv.org/abs/1410.7827
        Which Liu et al (https://arxiv.org/pdf/1806.00720.pdf) describe as
        "the difference in differential entropy between the prior and the posterior".

        Args:
            x_t (AtomicEnvironment): Input local environment.
            i (integer):  Force component to predict (1=x, 2=y, 3=z).
        Return:
            (float, float): Mean and epistemic variance of the prediction.
        """

        assert d in [1, 2, 3], "d must be 1, 2 ,or 3."

        # Kernel vector allows for evaluation of atomic environments.
        if self.parallel and not self.per_atom_par:
            n_cpus = self.n_cpus
        else:
            n_cpus = 1

        self.sync_data()

        k_v = []
        for i in range(self.n_experts):
            k_v += [
                get_kernel_vector(
                    f"{self.name}_{i}",
                    self.kernel,
                    self.energy_force_kernel,
                    x_t,
                    d,
                    self.hyps,
                    cutoffs=self.cutoffs,
                    hyps_mask=self.hyps_mask,
                    n_cpus=n_cpus,
                    n_sample=self.n_sample,
                )
            ]

        # Guarantee that alpha is up to date with training set
        self.check_L_alpha()

        # get predictive mean
        variance_rbcm = 0
        mean = 0.0
        var = 0.0
        beta = 0.0  # Represents expert weight

        args = from_mask_to_args(self.hyps, self.cutoffs, self.hyps_mask)

        for i in range(self.n_experts):

            mean_i = np.matmul(self.alpha[i], k_v[i])

            # get predictive variance without cholesky (possibly faster)
            # pass args to kernel based on if mult. hyperparameters in use

            self_kern = self.kernel(x_t, x_t, d, d, *args)
            var_i = self_kern - np.matmul(
                np.matmul(k_v[i].T, self.ky_mat_inv[i]), k_v[i])

            beta_i = 0.5 * (self.log_prior_var - np.log(var_i)
                            )  # This expert's weight
            mean += mean_i * beta_i / var_i
            var += beta_i / var_i
            beta += beta_i

        var += (1 - beta) / self.prior_variance
        pred_var = 1.0 / var
        pred_mean = pred_var * mean

        return pred_mean, pred_var
示例#30
0
def get_ky_and_hyp_pack(
    name,
    s1,
    e1,
    s2,
    e2,
    same: bool,
    hyps: np.ndarray,
    kernel_grad,
    cutoffs=None,
    hyps_mask=None,
):
    """
    computes a block of ky matrix and its derivative to hyper-parameter
    If the cpu set up is None, it uses as much as posible cpus

    :param hyps: list of hyper-parameters
    :param name: name of the gp instance.
    :param kernel_grad: function object of the kernel gradient
    :param cutoffs: The cutoff values used for the atomic environments
    :type cutoffs: list of 2 float numbers
    :param hyps_mask: dictionary used for multi-group hyperparmeters

    :return: hyp_mat, ky_mat
    """

    # assume sigma_n is the final hyperparameter
    _, non_noise_hyps, _ = obtain_noise_len(hyps, hyps_mask)

    # initialize matrices
    size1 = (e1 - s1) * 3
    size2 = (e2 - s2) * 3
    k_mat = np.zeros([size1, size2])
    hyp_mat = np.zeros([non_noise_hyps, size1, size2])

    args = from_mask_to_args(hyps, cutoffs, hyps_mask)

    ds = [1, 2, 3]

    training_data = _global_training_data[name]
    # calculate elements
    for m_index in range(size1):
        x_1 = training_data[int(math.floor(m_index / 3)) + s1]
        d_1 = ds[m_index % 3]

        if same:
            lowbound = m_index
        else:
            lowbound = 0
        for n_index in range(lowbound, size2):
            x_2 = training_data[int(math.floor(n_index / 3)) + s2]
            d_2 = ds[n_index % 3]

            # calculate kernel and gradient
            cov = kernel_grad(x_1, x_2, d_1, d_2, *args)

            # store kernel value
            k_mat[m_index, n_index] = cov[0]
            grad = from_grad_to_mask(cov[1], hyps_mask)

            hyp_mat[:, m_index, n_index] = grad
            if same:
                k_mat[n_index, m_index] = cov[0]
                hyp_mat[:, n_index, m_index] = grad

    return hyp_mat, k_mat