Example #1
0
    def test_compute(self):

        # model
        modelname = "SW_StillingerWeber_1985_Si__MO_405512056662_006"
        model = KIMModel(modelname)

        # training set
        path = Path(__file__).parents[1].joinpath("configs_extxyz/Si_4")
        data = Dataset(path)
        configs = data.get_configs()

        # compute arguments
        compute_arguments = []
        for conf in configs:
            ca = model.create_a_kim_compute_argument()
            compute_arguments.append(
                KIMComputeArguments(
                    ca,
                    conf,
                    supported_species=model.get_supported_species(),
                    influence_distance=model.get_influence_distance(),
                ))
        for i, ca in enumerate(compute_arguments):
            ca.compute(model.kim_model)
            energy = ca.get_energy()
            forces = ca.get_forces()[:3]

            assert energy == pytest.approx(ref_energies[i], 1e-6)
            assert np.allclose(forces, ref_forces[i])
Example #2
0
def test_lj():
    model = LennardJones()

    # set params directly
    model.set_fitting_params(sigma=[[1.1, "fix"]], epsilon=[[2.1, None, 3.0]])

    model.update_model_params()
    # model.echo_model_params()
    # model.echo_fitting_params()

    # set params by reading from file (the same as set params directly)
    # fname = 'tmp_lj.params'
    # write_tmp_params(fname)
    # model.read_fitting_params(fname)
    # delete_tmp_params(fname)

    calc = Calculator(model)

    dset = Dataset(order_by_species=False)
    fname = "./configs_extxyz/MoS2/MoS2_energy_forces_stress.xyz"
    dset.read(fname)
    configs = dset.get_configs()

    energy_forces_stress(calc, configs, True, False, False)
    energy_forces_stress(calc, configs, True, True, False)
    energy_forces_stress(calc, configs, True, True, True)

    # params relation callback
    model.set_params_relation_callback(params_relation)
    x0 = calc.get_opt_params()
    calc.update_opt_params(x0)
    sigma = model.get_model_params("sigma")
    epsilon = model.get_model_params("epsilon")
    assert np.allclose(sigma * 2, epsilon)
Example #3
0
    def test_compute(self):
        test_file_path = Path(__file__).parents[1].joinpath("configs_extxyz")
        tset = Dataset(test_file_path.joinpath("Si_4"))
        configs = tset.get_configs()

        modelname = "SW_StillingerWeber_1985_Si__MO_405512056662_005"
        model = KIMModel(modelname)

        # calculator
        calc = Calculator(model)
        compute_arguments = calc.create(configs)

        for i, ca in enumerate(compute_arguments):
            calc.compute(ca)
            energy = calc.get_energy(ca)
            forces = calc.get_forces(ca)[:3]

            assert energy == pytest.approx(ref_energies[i], 1e-6)
            assert np.allclose(forces, ref_forces[i])
Example #4
0
def init():
    model = KIMModel(model_name="SW_StillingerWeber_1985_Si__MO_405512056662_006")

    # Cannot set them all by calling this function only once, because the assertion
    # depends on order
    model.set_opt_params(A=[[5.0]])
    model.set_opt_params(B=[["default"]])
    model.set_opt_params(sigma=[[2.0951, "fix"]])
    model.set_opt_params(gamma=[[1.5]])

    path = Path(__file__).parent.joinpath("configs_extxyz/Si_4")
    tset = Dataset(path)
    configs = tset.get_configs()

    calc = Calculator(model)
    calc.create(configs, use_energy=True, use_forces=True)

    loss = Loss(calc, residual_fn=residual_fn, nprocs=1)

    return loss
Example #5
0
def test_main():

    # training set
    tset = Dataset()
    tset.read("./configs_extxyz/Si_4")
    configs = tset.get_configs()

    # model
    modelname = "SW_StillingerWeber_1985_Si__MO_405512056662_005"
    model = KIM(modelname)

    # calculator
    calc = Calculator(model)
    compute_arguments = calc.create(configs)

    for i, ca in enumerate(compute_arguments):
        calc.compute(ca)
        energy = calc.get_energy(ca)
        forces = calc.get_forces(ca)[:3]

        assert energy == pytest.approx(ref_energies[i], 1e-6)
        assert np.allclose(forces, ref_forces[i])

    # Cannot set them all by calling this function only once, because the assertion
    # depends on order
    model.set_fitting_params(sigma=[["default"]])
    model.set_fitting_params(A=[["default", "fix"]])
    model.set_fitting_params(B=[["default"]])

    # update params
    x0 = calc.get_opt_params()
    x1 = [i + 0.1 for i in x0]
    calc.update_opt_params(x1)
    params = model.inquire_params()
    assert np.allclose(params["sigma"].get_value(), [x1[0]])
    assert np.allclose(params["B"].get_value(), [x1[1]])
    # restore params
    calc.update_opt_params(x0)
Example #6
0
"""
Compute the root-mean-square error (RMSE) of a model prediction and reference values in
the dataset.
"""

from kliff.analyzers import EnergyForcesRMSE
from kliff.calculators import Calculator
from kliff.dataset import Dataset
from kliff.models import KIMModel
from kliff.utils import download_dataset

model = KIMModel(model_name="SW_StillingerWeber_1985_Si__MO_405512056662_005")

# load the trained model back
# model.load("kliff_model.yaml")

dataset_path = download_dataset(dataset_name="Si_training_set_4_configs")
tset = Dataset(dataset_path)
configs = tset.get_configs()

calc = Calculator(model)
calc.create(configs)

analyzer = EnergyForcesRMSE(calc)
analyzer.run(verbose=2, sort="energy")
Example #7
0
def train_fn(rank, world_size):

    descriptor = SymmetryFunction(cut_name="cos",
                                  cut_dists={"Si-Si": 5.0},
                                  hyperparams="set30",
                                  normalize=True)

    ##########################################################################################
    # The ``cut_name`` and ``cut_dists`` tells the descriptor what type of cutoff function to
    # use and what the cutoff distances are. ``hyperparams`` specifies the the set of
    # hyperparameters used in the symmetry function descriptor. If you prefer, you can provide
    # a dictionary of your own hyperparameters. And finally, ``normalize`` informs that the
    # generated fingerprints should be normalized by first subtracting the mean and then
    # dividing the standard deviation. This normalization typically makes it easier to
    # optimize NN model.
    #
    # We can then build the NN model on top of the descriptor.

    N1 = 10
    N2 = 10
    model = NeuralNetwork(descriptor)
    model.add_layers(
        # first hidden layer
        nn.Linear(descriptor.get_size(), N1),
        nn.Tanh(),
        # second hidden layer
        nn.Linear(N1, N2),
        nn.Tanh(),
        # output layer
        nn.Linear(N2, 1),
    )
    model.set_save_metadata(prefix="./my_kliff_model", start=5, frequency=2)

    ##########################################################################################
    # In the above code, we build a NN model with an input layer, two hidden layer, and an
    # output layer. The ``descriptor`` carries the information of the input layer, so it is
    # not needed to be specified explicitly. For each hidden layer, we first do a linear
    # transformation using ``nn.Linear(size_in, size_out)`` (essentially carrying out :math:`y
    # = xW+b`, where :math:`W` is the weight matrix of size ``size_in`` by ``size_out``, and
    # :math:`b` is a vector of size ``size_out``. Then we apply the hyperbolic tangent
    # activation function ``nn.Tanh()`` to the output of the Linear layer (i.e. :math:`y`) so
    # as to add the nonlinearity. We use a Linear layer for the output layer as well, but
    # unlike the hidden layer, no activation function is applied here. The input size
    # ``size_in`` of the first hidden layer must be the size of the descriptor, which is
    # obtained using ``descriptor.get_size()``. For all other layers (hidden or output), the
    # input size must be equal to the output size of the previous layer. The ``out_size`` of
    # the output layer much be 1 such that the output of the NN model is gives the energy of
    # atom.
    #
    # The ``set_save_metadata`` function call informs where to save intermediate models during
    # the optimization (discussed below), and what the starting epoch and how often to save
    # the model.
    #
    #
    # Training set and calculator
    # ---------------------------
    #
    # The training set and the calculator are the same as explained in :ref:`tut_kim_sw`. The
    # only difference is that we need use the
    # :mod:`~kliff.calculators.CalculatorTorch()`, which is targeted for the NN model.
    # Also, its ``create()`` method takes an argument ``reuse`` to inform whether to reuse the
    # fingerprints generated from the descriptor if it is present.

    # training set
    dataset_name = "Si_training_set/varying_alat"
    tset = Dataset()
    tset.read(dataset_name)
    configs = tset.get_configs()
    print("Number of configurations:", len(configs))

    # calculator
    calc = CalculatorTorchDDPCPU(model, rank, world_size)
    calc.create(configs, reuse=True)

    ##########################################################################################
    # Loss function
    # -------------
    #
    # KLIFF uses a loss function to quantify the difference between the training data and
    # potential predictions and uses minimization algorithms to reduce the loss as much as
    # possible. In the following code snippet, we create a loss function that uses the
    # ``Adam`` optimizer to minimize it. The Adam optimizer supports minimization using
    # `mini-batches` of data, and here we use ``100`` configurations in each minimization step
    # (the training set has a total of 400 configurations as can be seen above), and run
    # through the training set for ``10`` epochs. The learning rate ``lr`` used here is
    # ``0.01``, and typically, one may need to play with this to find an acceptable one that
    # drives the loss down in a reasonable time.

    loss = Loss(calc, residual_data={"forces_weight": 0.3})
    result = loss.minimize(method="Adam",
                           num_epochs=10,
                           batch_size=100,
                           lr=0.01)
Example #8
0
from kliff.models import KIM
from kliff.calculators import Calculator
from kliff.dataset import Dataset
from kliff.analyzers import EnergyForcesRMSE


model = KIM(model_name="SW_StillingerWeber_1985_Si__MO_405512056662_005")
model.load("kliff_model.pkl")

tset = Dataset()
dataset_name = "Si_training_set"
tset.read(dataset_name)
configs = tset.get_configs()

calc = Calculator(model)
calc.create(configs)

analyzer = EnergyForcesRMSE(calc)
analyzer.run(verbose=2, sort="energy")
Example #9
0
def test_dataset():
    directory = "./configs_extxyz/MoS2"
    tset = Dataset()
    tset.read(directory)
    configs = tset.get_configs()
    assert len(configs) == 3