コード例 #1
0
ファイル: __init__.py プロジェクト: mkphuthi/amptorch
    def calculate(self, atoms, properties, system_changes):
        Calculator.calculate(self, atoms, properties, system_changes)
        dataset = TestDataset(
            images=atoms,
            unique_atoms=self.training_elements,
            descriptor=self.model.descriptor,
            Gs=self.Gs,
            fprange=self.fp_scaling,
            label=self.model.label,
        )
        fp_length = dataset.fp_length()
        architecture = copy.copy(self.model.structure)
        architecture.insert(0, fp_length)
        unique_atoms = dataset.unique()
        batch_size = len(dataset)
        dataloader = DataLoader(
            dataset, batch_size, collate_fn=dataset.collate_test, shuffle=False
        )
        if properties == ["energy"]:
            model = FullNN(self.training_elements, architecture, "cpu", forcetraining=False)
        elif properties == ["forces"]:
            model = FullNN(self.training_elements, architecture, "cpu", forcetraining=True)
        model.load_state_dict(torch.load(self.label))
        model.eval()

        for inputs in dataloader:
            for element in unique_atoms:
                inputs[0][element][0] = inputs[0][element][0].requires_grad_(True)
            energy, forces = model(inputs)
        energy = (energy * self.target_sd) + self.target_mean
        energy = np.concatenate(energy.detach().numpy())
        if properties == ["forces"]:
            forces = (forces * self.target_sd).detach().numpy()

        if self.lj:
            image_hash = hash_images([atoms])
            self.lj_model.neighborlist.calculate_items(image_hash)
            lj_energy, lj_forces, _ = self.lj_model.image_pred(
                atoms, self.fitted_params, self.params_dict
            )
            lj_energy = np.squeeze(lj_energy)
            energy += lj_energy
            if properties == ["forces"]:
                forces += lj_forces

        self.results["energy"] = float(energy)
        self.results["forces"] = forces
コード例 #2
0
ファイル: amptorch_test.py プロジェクト: ray38/SIMPLE-NN-MCSH
training_data = AtomsDataset(
        images, 
        SNN_Gaussian, 
        Gs, 
        forcetraining=forcetraining,
        label=label, 
        cores=1, 
        delta_data=None)
unique_atoms = training_data.elements
fp_length = training_data.fp_length

# define device
device = "cpu"

net = NeuralNetRegressor(
    module=FullNN(unique_atoms, [fp_length, 3, 10], device, forcetraining=forcetraining),
    criterion=CustomMSELoss,
    criterion__force_coefficient=0.1,
    optimizer=torch.optim.LBFGS,
    optimizer__line_search_fn="strong_wolfe",
    lr=1e-3,
    batch_size=len(images),
    max_epochs=20,
    iterator_train__collate_fn=collate_amp,
    iterator_train__shuffle=True,
    iterator_valid__collate_fn=collate_amp,
    device=device,
    # train_split=0,
    verbose=1,
    callbacks=[
        EpochScoring(
コード例 #3
0
def train_calc(inputs):
    images, filename, file_dir, Gs, lj, forcesonly, scaling = inputs

    class train_end_load_best_valid_loss(skorch.callbacks.base.Callback):
        def on_train_end(self, net, X, y):
            net.load_params(
                "./results/checkpoints/{}_params.pt".format(filename))

    cp = Checkpoint(
        monitor="forces_score_best",
        fn_prefix="./results/checkpoints/{}_".format(filename),
    )

    if not os.path.exists(file_dir):
        os.makedirs(file_dir, exist_ok=True)

    forcetraining = True
    training_data = AtomsDataset(
        images,
        SNN_Gaussian,
        Gs,
        forcetraining=forcetraining,
        label=filename,
        cores=1,
        lj_data=None,
        scaling=scaling,
    )
    unique_atoms = training_data.elements
    fp_length = training_data.fp_length
    device = "cpu"

    torch.set_num_threads(1)

    net = NeuralNetRegressor(
        module=FullNN(unique_atoms, [fp_length, 3, 20],
                      device,
                      forcetraining=forcetraining),
        criterion=CustomMSELoss,
        criterion__force_coefficient=0.04,
        optimizer=torch.optim.LBFGS,
        lr=1e-1,
        batch_size=len(training_data),
        max_epochs=200,
        iterator_train__collate_fn=collate_amp,
        iterator_train__shuffle=False,
        iterator_valid__collate_fn=collate_amp,
        iterator_valid__shuffle=False,
        device=device,
        train_split=CVSplit(cv=5, random_state=1),
        callbacks=[
            EpochScoring(
                forces_score,
                on_train=False,
                use_caching=True,
                target_extractor=target_extractor,
            ),
            EpochScoring(
                energy_score,
                on_train=False,
                use_caching=True,
                target_extractor=target_extractor,
            ),
        ],
    )
    calc = AMP(training_data, net, label=filename)
    calc.train()
    return [training_data, net, filename]
コード例 #4
0
def test_calcs():
    """Gaussian/Neural non-periodic standard.

    Checks that the answer matches that expected from previous Mathematica
    calculations.
    """

    #: Making the list of non-periodic images
    images = [
        Atoms(
            symbols="PdOPd2",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[0.0, 0.0, 0.0], [0.0, 2.0, 0.0],
                                [0.0, 0.0, 3.0], [1.0, 0.0, 0.0]]),
        ),
        Atoms(
            symbols="PdOPd2",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0],
                                [-1.0, 1.0, 2.0], [1.0, 3.0, 2.0]]),
        ),
        Atoms(
            symbols="PdO",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[2.0, 1.0, -1.0], [1.0, 2.0, 1.0]]),
        ),
        Atoms(
            symbols="Pd2O",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[-2.0, -1.0, -1.0], [1.0, 2.0, 1.0],
                                [3.0, 4.0, 4.0]]),
        ),
        Atoms(
            symbols="Cu",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[0.0, 0.0, 0.0]]),
        ),
    ]

    # Parameters
    hiddenlayers = {"O": (2, ), "Pd": (2, ), "Cu": (2, )}

    Gs = {}
    Gs["G2_etas"] = [0.2]
    Gs["G2_rs_s"] = [0]
    Gs["G4_etas"] = [0.4]
    Gs["G4_zetas"] = [1]
    Gs["G4_gammas"] = [1]
    Gs["cutoff"] = 6.5

    elements = ["O", "Pd", "Cu"]

    G = make_symmetry_functions(elements=elements,
                                type="G2",
                                etas=Gs["G2_etas"])
    G += make_symmetry_functions(
        elements=elements,
        type="G4",
        etas=Gs["G4_etas"],
        zetas=Gs["G4_zetas"],
        gammas=Gs["G4_gammas"],
    )
    amp_images = amp_hash(images)
    descriptor = Gaussian(Gs=G, cutoff=Gs["cutoff"])
    descriptor.calculate_fingerprints(amp_images, calculate_derivatives=True)
    fingerprints_range = calculate_fingerprints_range(descriptor, amp_images)
    np.random.seed(1)
    O_weights_1 = np.random.rand(10, 2)
    O_weights_2 = np.random.rand(1, 3).reshape(-1, 1)
    np.random.seed(2)
    Pd_weights_1 = np.random.rand(10, 2)
    Pd_weights_2 = np.random.rand(1, 3).reshape(-1, 1)
    np.random.seed(3)
    Cu_weights_1 = np.random.rand(10, 2)
    Cu_weights_2 = np.random.rand(1, 3).reshape(-1, 1)

    weights = OrderedDict([
        ("O", OrderedDict([(1, O_weights_1), (2, O_weights_2)])),
        ("Pd", OrderedDict([(1, Pd_weights_1), (2, Pd_weights_2)])),
        ("Cu", OrderedDict([(1, Cu_weights_1), (2, Cu_weights_2)])),
    ])

    scalings = OrderedDict([
        ("O", OrderedDict([("intercept", 0), ("slope", 1)])),
        ("Pd", OrderedDict([("intercept", 0), ("slope", 1)])),
        ("Cu", OrderedDict([("intercept", 0), ("slope", 1)])),
    ])

    calc = Amp(
        descriptor,
        model=NeuralNetwork(
            hiddenlayers=hiddenlayers,
            weights=weights,
            scalings=scalings,
            activation="tanh",
            fprange=fingerprints_range,
            mode="atom-centered",
            fortran=False,
        ),
        logging=False,
    )

    amp_energies = [calc.get_potential_energy(image) for image in images]
    amp_forces = [calc.get_forces(image) for image in images]
    amp_forces = np.concatenate(amp_forces)

    torch_O_weights_1 = torch.FloatTensor(O_weights_1[:-1, :]).t()
    torch_O_bias_1 = torch.FloatTensor(O_weights_1[-1, :])
    torch_O_weights_2 = torch.FloatTensor(O_weights_2[:-1, :]).t()
    torch_O_bias_2 = torch.FloatTensor(O_weights_2[-1, :])
    torch_Pd_weights_1 = torch.FloatTensor(Pd_weights_1[:-1, :]).t()
    torch_Pd_bias_1 = torch.FloatTensor(Pd_weights_1[-1, :])
    torch_Pd_weights_2 = torch.FloatTensor(Pd_weights_2[:-1, :]).t()
    torch_Pd_bias_2 = torch.FloatTensor(Pd_weights_2[-1, :])
    torch_Cu_weights_1 = torch.FloatTensor(Cu_weights_1[:-1, :]).t()
    torch_Cu_bias_1 = torch.FloatTensor(Cu_weights_1[-1, :])
    torch_Cu_weights_2 = torch.FloatTensor(Cu_weights_2[:-1, :]).t()
    torch_Cu_bias_2 = torch.FloatTensor(Cu_weights_2[-1, :])

    device = "cpu"
    dataset = AtomsDataset(
        images,
        descriptor=Gaussian,
        cores=1,
        label="consistency",
        Gs=Gs,
        forcetraining=True,
    )

    fp_length = dataset.fp_length
    batch_size = len(dataset)
    dataloader = DataLoader(dataset,
                            batch_size,
                            collate_fn=collate_amp,
                            shuffle=False)
    model = FullNN(elements, [fp_length, 2, 2], device, forcetraining=True)
    model.state_dict()["elementwise_models.O.model_net.0.weight"].copy_(
        torch_O_weights_1)
    model.state_dict()["elementwise_models.O.model_net.0.bias"].copy_(
        torch_O_bias_1)
    model.state_dict()["elementwise_models.O.model_net.2.weight"].copy_(
        torch_O_weights_2)
    model.state_dict()["elementwise_models.O.model_net.2.bias"].copy_(
        torch_O_bias_2)
    model.state_dict()["elementwise_models.Pd.model_net.0.weight"].copy_(
        torch_Pd_weights_1)
    model.state_dict()["elementwise_models.Pd.model_net.0.bias"].copy_(
        torch_Pd_bias_1)
    model.state_dict()["elementwise_models.Pd.model_net.2.weight"].copy_(
        torch_Pd_weights_2)
    model.state_dict()["elementwise_models.Pd.model_net.2.bias"].copy_(
        torch_Pd_bias_2)
    model.state_dict()["elementwise_models.Cu.model_net.0.weight"].copy_(
        torch_Cu_weights_1)
    model.state_dict()["elementwise_models.Cu.model_net.0.bias"].copy_(
        torch_Cu_bias_1)
    model.state_dict()["elementwise_models.Cu.model_net.2.weight"].copy_(
        torch_Cu_weights_2)
    model.state_dict()["elementwise_models.Cu.model_net.2.bias"].copy_(
        torch_Cu_bias_2)
    import torch.nn as nn
    for name, layer in model.named_modules():
        if isinstance(layer, MLP):
            layer.model_net = nn.Sequential(layer.model_net, Tanh())

    for batch in dataloader:
        x = to_tensor(batch[0], device)
        y = to_tensor(batch[1], device)
        energy_pred, force_pred = model(x)
    for idx, i in enumerate(amp_energies):
        assert round(i, 4) == round(
            energy_pred.tolist()[idx][0],
            4), "The predicted energy of image %i is wrong!" % (idx + 1)
    print("Energy predictions are correct!")
    for idx, sample in enumerate(amp_forces):
        for idx_d, value in enumerate(sample):
            predict = force_pred.tolist()[idx][idx_d]
            assert abs(value - predict) < 0.0001, (
                "The predicted force of image % i, direction % i is wrong! Values: %s vs %s"
                % (idx + 1, idx_d, value, force_pred.tolist()[idx][idx_d]))
    print("Force predictions are correct!")
コード例 #5
0
def test_skorch():
    distances = np.linspace(2, 5, 100)
    label = "skorch_example"
    images = []
    energies = []
    forces = []
    for l in distances:
        image = Atoms(
            "CuCO",
            [
                (-l * np.sin(0.65), l * np.cos(0.65), 0),
                (0, 0, 0),
                (l * np.sin(0.65), l * np.cos(0.65), 0),
            ],
        )
        image.set_cell([10, 10, 10])
        image.wrap(pbc=True)
        image.set_calculator(EMT())
        images.append(image)
        energies.append(image.get_potential_energy())
        forces.append(image.get_forces())

    energies = np.array(energies)
    forces = np.concatenate(np.array(forces))
    Gs = {}
    Gs["G2_etas"] = np.logspace(np.log10(0.05), np.log10(5.0), num=2)
    Gs["G2_rs_s"] = [0] * 2
    Gs["G4_etas"] = [0.005]
    Gs["G4_zetas"] = [1.0]
    Gs["G4_gammas"] = [+1.0, -1]
    Gs["cutoff"] = 6.5

    forcetraining = True
    training_data = AtomsDataset(
        images,
        SNN_Gaussian,
        Gs,
        forcetraining=forcetraining,
        label=label,
        cores=1,
        delta_data=None,
    )
    unique_atoms = training_data.elements
    fp_length = training_data.fp_length
    device = "cpu"

    net = NeuralNetRegressor(
        module=FullNN(unique_atoms, [fp_length, 2, 2],
                      device,
                      forcetraining=forcetraining),
        criterion=CustomMSELoss,
        criterion__force_coefficient=0.3,
        optimizer=torch.optim.LBFGS,
        optimizer__line_search_fn="strong_wolfe",
        lr=1,
        batch_size=100,
        max_epochs=150,
        iterator_train__collate_fn=collate_amp,
        iterator_train__shuffle=True,
        iterator_valid__collate_fn=collate_amp,
        device=device,
        train_split=0,
        verbose=0,
        callbacks=[
            EpochScoring(
                forces_score,
                on_train=True,
                use_caching=True,
                target_extractor=target_extractor,
            ),
            EpochScoring(
                energy_score,
                on_train=True,
                use_caching=True,
                target_extractor=target_extractor,
            ),
        ],
    )
    calc = AMP(training_data, net, "test")
    calc.train(overwrite=True)
    num_of_atoms = 3
    calculated_energies = np.array(
        [calc.get_potential_energy(image) for image in images])
    energy_rmse = np.sqrt(
        (((calculated_energies - energies) / num_of_atoms)**2).sum() /
        len(images))

    calculated_forces = np.concatenate(
        np.array([calc.get_forces(image) for image in images]))
    force_rmse = np.sqrt((((calculated_forces - forces))**2).sum() /
                         (3 * num_of_atoms * len(images)))
    l1_force = np.sum(np.abs(calculated_forces - forces) / num_of_atoms, 1)
    idx = 0
    force_loss_image = np.zeros((len(calculated_energies), 1))
    for i in range(len(calculated_energies)):
        force_loss_image[i] = np.sum(l1_force[idx:idx + 3])
        idx += 3
    force_loss_image /= 3

    reported_energy_score = net.history[-1]["energy_score"]
    reported_forces_score = net.history[-1]["forces_score"]
    assert force_rmse <= 0.005, "Force training convergence not met!"
    assert energy_rmse <= 0.005, "Energy training convergence not met!"
    assert round(reported_energy_score,
                 4) == round(energy_rmse,
                             4), "Shuffled reported energy scores incorrect!"
    assert round(reported_forces_score,
                 4) == round(force_rmse,
                             4), "Shuffled reported forces score incorrect!"
コード例 #6
0
def test_skorch_delta():
    from amptorch.skorch_model import AMP

    cp = Checkpoint(monitor="valid_loss_best", fn_prefix="valid_best_")

    distances = np.linspace(2, 5, 10)
    label = "skorch_example"
    images = []
    energies = []
    forces = []
    for l in distances:
        image = Atoms(
            "CuCO",
            [
                (-l * np.sin(0.65), l * np.cos(0.65), 0),
                (0, 0, 0),
                (l * np.sin(0.65), l * np.cos(0.65), 0),
            ],
        )
        image.set_cell([10, 10, 10])
        image.wrap(pbc=True)
        image.set_calculator(EMT())
        images.append(image)
        energies.append(image.get_potential_energy())
        forces.append(image.get_forces())

    image = Atoms("CuC", [(-1, 1, 0), (1, 1, 0)])
    image.set_cell([10, 10, 10])
    image.wrap(pbc=True)
    image.set_calculator(EMT())
    images.append(image)
    energies.append(image.get_potential_energy())
    forces.append(image.get_forces())

    energies = np.array(energies)
    forces = np.concatenate(np.array(forces))

    Gs = {}
    Gs["G2_etas"] = np.logspace(np.log10(0.05), np.log10(5.0), num=2)
    Gs["G2_rs_s"] = [0] * 2
    Gs["G4_etas"] = [0.005]
    Gs["G4_zetas"] = [1.0]
    Gs["G4_gammas"] = [+1.0, -1]
    Gs["cutoff"] = 6.5

    params = {
            "C": {"re": 0.972, "D": 6.379, "sig": 0.477},
            "O": {"re": 1.09, "D": 8.575, "sig": 0.603},
            "Cu": {"re": 2.168, "D": 3.8386, "sig": 1.696},
        }

    morse_model = morse_potential(images, params, Gs["cutoff"], label)
    morse_energies, morse_forces, num_atoms = morse_model.morse_pred(
        images, params)
    morse_data = [morse_energies, morse_forces, num_atoms, params, morse_model]

    forcetraining = True
    training_data = AtomsDataset(
        images,
        SNN_Gaussian,
        Gs,
        forcetraining=forcetraining,
        label=label,
        cores=1,
        delta_data=morse_data,
    )
    batch_size = len(training_data)
    unique_atoms = training_data.elements
    fp_length = training_data.fp_length
    device = "cpu"

    net = NeuralNetRegressor(
        module=FullNN(
            unique_atoms, [fp_length, 2, 2], device, forcetraining=forcetraining
        ),
        criterion=CustomMSELoss,
        criterion__force_coefficient=0.3,
        optimizer=torch.optim.LBFGS,
        optimizer__line_search_fn="strong_wolfe",
        lr=1e-2,
        batch_size=batch_size,
        max_epochs=100,
        iterator_train__collate_fn=collate_amp,
        iterator_train__shuffle=False,
        iterator_valid__collate_fn=collate_amp,
        device=device,
        train_split=0,
        verbose=0,
        callbacks=[
            EpochScoring(
                forces_score,
                on_train=True,
                use_caching=True,
                target_extractor=target_extractor,
            ),
            EpochScoring(
                energy_score,
                on_train=True,
                use_caching=True,
                target_extractor=target_extractor,
            ),
        ],
    )
    calc = AMP(training_data, net, "test")
    calc.train(overwrite=True)
    num_of_atoms = 3
    calculated_energies = np.array(
        [calc.get_potential_energy(image) for idx, image in enumerate(images)]
    )
    energy_rmse = np.sqrt(
        (((calculated_energies - energies) / num_of_atoms) ** 2).sum() / len(images)
    )
    last_energy_score = net.history[-1]["energy_score"]
    assert round(energy_rmse, 4) == round(
        last_energy_score, 4
    ), "Energy errors incorrect!"
    last_forces_score = net.history[-1]["forces_score"]

    calculated_forces = np.concatenate(
        np.array([calc.get_forces(image) for image in images])
    )
    force_rmse = np.sqrt(
        (((calculated_forces - forces)) ** 2).sum() / (3 * num_of_atoms * len(images))
    )
    assert round(force_rmse, 4) == round(
        last_forces_score, 4
    ), "Force errors incorrect!"
コード例 #7
0
def test_skorch_val():
    distances = np.linspace(2, 5, 100)
    label = "example"
    images = []
    energies = []
    forces = []
    for l in distances:
        image = Atoms(
            "CuCCu",
            [
                (-l * np.sin(0.65), l * np.cos(0.65), 0),
                (0, 0, 0),
                (l * np.sin(0.65), l * np.cos(0.65), 0),
            ],
        )
        image.set_cell([10, 10, 10])
        image.wrap(pbc=True)
        image.set_calculator(EMT())
        images.append(image)
        energies.append(image.get_potential_energy())
        forces.append(image.get_forces())

    energies = np.array(energies)

    Gs = {}
    Gs["G2_etas"] = np.logspace(np.log10(0.05), np.log10(5.0), num=2)
    Gs["G2_rs_s"] = [0] * 2
    Gs["G4_etas"] = [0.005]
    Gs["G4_zetas"] = [1.0]
    Gs["G4_gammas"] = [+1.0, -1]
    Gs["cutoff"] = 6.5

    forcetraining = True
    training_data = AtomsDataset(
        images,
        SNN_Gaussian,
        Gs,
        forcetraining=forcetraining,
        label=label,
        cores=1,
        delta_data=None,
    )
    batch_size = len(training_data)
    unique_atoms = training_data.elements
    fp_length = training_data.fp_length
    device = "cpu"

    net = NeuralNetRegressor(
        module=FullNN(unique_atoms, [fp_length, 2, 2],
                      device,
                      forcetraining=forcetraining),
        criterion=CustomMSELoss,
        criterion__force_coefficient=0.3,
        optimizer=torch.optim.LBFGS,
        optimizer__line_search_fn="strong_wolfe",
        lr=1e-2,
        batch_size=10,
        max_epochs=20,
        iterator_train__collate_fn=collate_amp,
        iterator_train__shuffle=True,
        iterator_valid__collate_fn=collate_amp,
        device=device,
        train_split=CVSplit(0.1, random_state=1),
        verbose=0,
        callbacks=[
            EpochScoring(
                forces_score,
                on_train=False,
                use_caching=True,
                target_extractor=target_extractor,
            ),
            EpochScoring(
                energy_score,
                on_train=False,
                use_caching=True,
                target_extractor=target_extractor,
            ),
        ],
    )
    val_indices = [80, 84, 33, 81, 93, 17, 36, 82, 69, 65]
    val_images = [images[idx] for idx in val_indices]
    val_energies = energies[val_indices]
    val_forces = np.concatenate(np.array([forces[idx] for idx in val_indices]))
    calc = AMP_skorch(training_data, net, "test")
    calc.train(overwrite=True)
    num_of_atoms = 3

    last_energy_score = net.history[-1]["energy_score"]
    last_forces_score = net.history[-1]["forces_score"]

    calculated_energies = np.array(
        [calc.get_potential_energy(image) for image in val_images])
    energy_rmse = np.sqrt(
        (((calculated_energies - val_energies) / num_of_atoms)**2).sum() /
        len(val_images))
    assert round(energy_rmse,
                 5) == round(last_energy_score,
                             5), "Validation energy errors incorrect!"
    calculated_forces = np.concatenate(
        np.array([calc.get_forces(image) for image in val_images]))
    force_rmse = np.sqrt((((calculated_forces - val_forces))**2).sum() /
                         (3 * num_of_atoms * len(val_images)))
    assert round(force_rmse,
                 5) == round(last_forces_score,
                             5), "Validation force errors incorrect!"
コード例 #8
0
def test_skorch_lj():
    from amptorch.skorch_model import AMP

    cp = Checkpoint(monitor="valid_loss_best", fn_prefix="valid_best_")

    distances = np.linspace(2, 5, 10)
    label = "skorch_example"
    images = []
    energies = []
    forces = []
    for l in distances:
        image = Atoms(
            "CuCO",
            [
                (-l * np.sin(0.65), l * np.cos(0.65), 0),
                (0, 0, 0),
                (l * np.sin(0.65), l * np.cos(0.65), 0),
            ],
        )
        image.set_cell([10, 10, 10])
        image.wrap(pbc=True)
        image.set_calculator(EMT())
        images.append(image)
        energies.append(image.get_potential_energy())
        forces.append(image.get_forces())

    energies = np.array(energies)
    forces = np.concatenate(np.array(forces))

    Gs = {}
    Gs["G2_etas"] = np.logspace(np.log10(0.05), np.log10(5.0), num=2)
    Gs["G2_rs_s"] = [0] * 2
    Gs["G4_etas"] = [0.005]
    Gs["G4_zetas"] = [1.0]
    Gs["G4_gammas"] = [+1.0, -1]
    Gs["cutoff"] = 6.5

    p0 = [
        1.33905162,
        0.12290683,
        3.5,
        0.64021468,
        0.08010004,
        4.6,
        2.29284676,
        0.29639983,
        3.51,
        12,
    ]
    params_dict = {"C": [], "O": [], "Cu": []}
    lj_model = lj_optim(images, p0, params_dict, Gs["cutoff"], label)
    fitted_params = lj_model.fit()
    lj_energies, lj_forces, num_atoms = lj_model.lj_pred(
        images, fitted_params, params_dict)
    lj_data = [
        lj_energies, lj_forces, num_atoms, fitted_params, params_dict, lj_model
    ]

    forcetraining = True
    training_data = AtomsDataset(
        images,
        SNN_Gaussian,
        Gs,
        forcetraining=forcetraining,
        label=label,
        cores=1,
        lj_data=lj_data,
    )
    batch_size = len(training_data)
    unique_atoms = training_data.elements
    fp_length = training_data.fp_length
    device = "cpu"

    net = NeuralNetRegressor(
        module=FullNN(unique_atoms, [fp_length, 2, 2],
                      device,
                      forcetraining=forcetraining),
        criterion=CustomLoss,
        criterion__force_coefficient=0.3,
        optimizer=torch.optim.LBFGS,
        optimizer__line_search_fn="strong_wolfe",
        lr=1e-2,
        batch_size=batch_size,
        max_epochs=100,
        iterator_train__collate_fn=collate_amp,
        iterator_train__shuffle=False,
        iterator_valid__collate_fn=collate_amp,
        device=device,
        train_split=0,
        callbacks=[
            EpochScoring(
                forces_score,
                on_train=True,
                use_caching=True,
                target_extractor=target_extractor,
            ),
            EpochScoring(
                energy_score,
                on_train=True,
                use_caching=True,
                target_extractor=target_extractor,
            ),
        ],
    )
    calc = AMP(training_data, net, "test")
    calc.train(overwrite=True)
    num_of_atoms = 3
    calculated_energies = np.array(
        [calc.get_potential_energy(image) for idx, image in enumerate(images)])
    energy_rmse = np.sqrt(
        (((calculated_energies - energies) / num_of_atoms)**2).sum() /
        len(images))
    last_energy_score = net.history[-1]["energy_score"]
    assert round(energy_rmse, 4) == round(last_energy_score,
                                          4), "Energy errors incorrect!"
    last_forces_score = net.history[-1]["forces_score"]

    calculated_forces = np.concatenate(
        np.array([calc.get_forces(image) for image in images]))
    force_rmse = np.sqrt((((calculated_forces - forces))**2).sum() /
                         (3 * num_of_atoms * len(images)))
    assert round(force_rmse, 4) == round(last_forces_score,
                                         4), "Force errors incorrect!"
コード例 #9
0
def test_load():
    distances = np.linspace(2, 5, 100)
    label = "example"
    images = []
    for l in distances:
        image = Atoms(
            "CuCO",
            [
                (-l * np.sin(0.65), l * np.cos(0.65), 0),
                (0, 0, 0),
                (l * np.sin(0.65), l * np.cos(0.65), 0),
            ],
        )
        image.set_cell([10, 10, 10])
        image.wrap(pbc=True)
        image.set_calculator(EMT())
        images.append(image)

    # define symmetry functions to be used

    Gs = {}
    Gs["G2_etas"] = np.logspace(np.log10(0.05), np.log10(5.0), num=4)
    Gs["G2_rs_s"] = [0] * 4
    Gs["G4_etas"] = [0.005]
    Gs["G4_zetas"] = [1.0]
    Gs["G4_gammas"] = [+1.0, -1]
    Gs["cutoff"] = 6.5

    forcetraining = True
    training_data = AtomsDataset(images, SNN_Gaussian, Gs, forcetraining=forcetraining,
            label=label, cores=1, delta_data=None)
    unique_atoms = training_data.elements
    fp_length = training_data.fp_length
    device = "cpu"

    net = NeuralNetRegressor(
        module=FullNN(unique_atoms, [fp_length, 3, 10], device, forcetraining=forcetraining),
        criterion=CustomMSELoss,
        criterion__force_coefficient=0.3,
        optimizer=torch.optim.LBFGS,
        optimizer__line_search_fn="strong_wolfe",
        lr=1e-1,
        batch_size=len(images),
        max_epochs=5,
        iterator_train__collate_fn=collate_amp,
        iterator_train__shuffle=False,
        iterator_valid__collate_fn=collate_amp,
        device=device,
        train_split=0,
        verbose=0,
        callbacks=[
            EpochScoring(
                forces_score,
                on_train=True,
                use_caching=True,
                target_extractor=target_extractor,
            ),
            EpochScoring(
                energy_score,
                on_train=True,
                use_caching=True,
                target_extractor=target_extractor,
            ),
        ],
    )

    net_2 = copy.copy(net)

    calc_1 = AMP(training_data, net, 'test')
    calc_1.train(overwrite=True)

    energy_1 = calc_1.get_potential_energy(images[0])
    forces_1 = calc_1.get_forces(images[0])

    calc_2 = AMP(training_data, net_2, 'test')
    calc_2.load(filename='./results/trained_models/test.pt')
    energy_2 = calc_2.get_potential_energy(images[0])
    forces_2 = calc_2.get_forces(images[0])

    assert energy_1 == energy_2, "Energies do not match!"
    assert (forces_1 == forces_2).all(), "Forces do not match!"