Ejemplo n.º 1
0
 def __init__(self,
              images,
              unique_atoms,
              descriptor,
              Gs,
              fprange,
              label="example",
              cores=1):
     self.images = images
     if type(images) is not list:
         self.images = [images]
     self.descriptor = descriptor
     self.atom_images = self.images
     if isinstance(images, str):
         extension = os.path.splitext(images)[1]
         if extension != (".traj" or ".db"):
             self.atom_images = ase.io.read(images, ":")
     self.fprange = fprange
     self.training_unique_atoms = unique_atoms
     self.hashed_images = amp_hash(self.atom_images)
     if descriptor == SNN_Gaussian:
         self.hashed_images = hash_images(self.atom_images, Gs)
         self.fps, self.fp_primes = make_amp_descriptors_simple_nn(
             self.atom_images,
             Gs,
             self.training_unique_atoms,
             cores=cores,
             label=label,
             save=False,
         )
     self.unique_atoms = self.unique()
Ejemplo n.º 2
0
    def calculate(self, atoms, properties, system_changes):
        Calculator.calculate(self, atoms, properties, system_changes)
        dataset = TestDataset(
            images=atoms,
            unique_atoms=self.training_elements,
            descriptor=self.model.descriptor,
            Gs=self.Gs,
            fprange=self.fp_scaling,
            label=self.model.label,
        )
        fp_length = dataset.fp_length()
        architecture = copy.copy(self.model.structure)
        architecture.insert(0, fp_length)
        unique_atoms = dataset.unique()
        batch_size = len(dataset)
        dataloader = DataLoader(
            dataset, batch_size, collate_fn=dataset.collate_test, shuffle=False
        )
        if properties == ["energy"]:
            model = FullNN(self.training_elements, architecture, "cpu", forcetraining=False)
        elif properties == ["forces"]:
            model = FullNN(self.training_elements, architecture, "cpu", forcetraining=True)
        model.load_state_dict(torch.load(self.label))
        model.eval()

        for inputs in dataloader:
            for element in unique_atoms:
                inputs[0][element][0] = inputs[0][element][0].requires_grad_(True)
            energy, forces = model(inputs)
        energy = (energy * self.target_sd) + self.target_mean
        energy = np.concatenate(energy.detach().numpy())
        if properties == ["forces"]:
            forces = (forces * self.target_sd).detach().numpy()

        if self.lj:
            image_hash = hash_images([atoms])
            self.lj_model.neighborlist.calculate_items(image_hash)
            lj_energy, lj_forces, _ = self.lj_model.image_pred(
                atoms, self.fitted_params, self.params_dict
            )
            lj_energy = np.squeeze(lj_energy)
            energy += lj_energy
            if properties == ["forces"]:
                forces += lj_forces

        self.results["energy"] = float(energy)
        self.results["forces"] = forces
    def calculate(self, atoms, properties, system_changes):
        Calculator.calculate(self, atoms, properties, system_changes)
        dataset = TestDataset(images=atoms,
                              unique_atoms=self.training_data.elements,
                              descriptor=self.training_data.base_descriptor,
                              Gs=self.Gs,
                              fprange=self.fprange,
                              label=self.testlabel,
                              cores=self.cores,
                              specific_atoms=self.specific_atoms,
                              save_test_fp=self.save_test_fp)
        unique_atoms = dataset.unique()
        batch_size = len(dataset)
        dataloader = DataLoader(dataset,
                                batch_size,
                                collate_fn=dataset.collate_test,
                                shuffle=False)
        model = self.model.module
        model.forcetraining = True
        model.load_state_dict(torch.load(self.label))
        model.eval()

        for inputs in dataloader:
            if self.specific_atoms is True:
                unique_atoms = inputs[2]
            for element in unique_atoms:
                inputs[0][element][0] = inputs[0][element][0].requires_grad_(
                    True)
            energy, forces = model(inputs)
        energy = np.concatenate(energy.detach().numpy())
        forces = forces.detach().numpy()
        num_atoms = forces.shape[0]

        image_hash = hash_images([atoms])
        if self.delta:
            self.delta_model.neighborlist.calculate_items(image_hash)
            delta_energy, delta_forces, _ = self.delta_model.image_pred(
                atoms, self.params)
            delta_energy = np.squeeze(delta_energy)
            energy += delta_energy + num_atoms * (self.target_ref_per_atom -
                                                  self.delta_ref_per_atom)
            forces += delta_forces

        self.results["energy"] = float(energy)
        self.results["forces"] = forces
Ejemplo n.º 4
0
 def __init__(self, images, params, cutoff, filename, combo='mean'):
     if not os.path.exists("results"):
         os.mkdir("results")
     if not os.path.exists("results/logs"):
         os.mkdir("results/logs")
     self.filename = filename
     self.data = images
     self.params = params
     self.combo = combo
     self.cutoff = cutoff
     self.hashed_images = hash_images(images)
     self.hashed_keys = list(self.hashed_images.keys())
     calc = NeighborlistCalculator(cutoff=cutoff)
     self.neighborlist = Data(filename="amp-data-neighborlists",
                              calculator=calc)
     self.neighborlist.calculate_items(self.hashed_images)
     log = Logger("results/logs/{}.txt".format(filename))
     self.logresults(log, self.params)
 def __init__(self,
              images,
              unique_atoms,
              descriptor,
              Gs,
              fprange,
              label="example",
              cores=1,
              specific_atoms=False,
              save_test_fp=False):
     self.images = images
     if type(images) is not list:
         self.images = [images]
     self.descriptor = descriptor
     self.atom_images = self.images
     if isinstance(images, str):
         extension = os.path.splitext(images)[1]
         if extension != (".traj" or ".db"):
             self.atom_images = ase.io.read(images, ":")
     self.fprange = fprange
     self.training_unique_atoms = unique_atoms
     self.hashed_images = amp_hash(self.atom_images)
     self.specific_atoms = specific_atoms
     self.save_fp = save_test_fp
     G2_etas = Gs["G2_etas"]
     G2_rs_s = Gs["G2_rs_s"]
     G4_etas = Gs["G4_etas"]
     G4_zetas = Gs["G4_zetas"]
     G4_gammas = Gs["G4_gammas"]
     cutoff = Gs["cutoff"]
     if str(descriptor)[8:16] == "amptorch":
         self.hashed_images = hash_images(self.atom_images, Gs)
         self.fps, self.fp_primes = make_amp_descriptors_simple_nn(
             self.atom_images,
             Gs,
             self.training_unique_atoms,
             cores=cores,
             label=label,
             save=self.save_fp,
             specific_atoms=self.specific_atoms)
     self.unique_atoms = self.unique()
Ejemplo n.º 6
0
 def __init__(self, images, unique_atoms, descriptor, Gs, fprange, label="example", cores=1):
     self.images = images
     if type(images) is not list:
         self.images = [images]
     self.descriptor = descriptor
     self.atom_images = self.images
     if isinstance(images, str):
         extension = os.path.splitext(images)[1]
         if extension != (".traj" or ".db"):
             self.atom_images = ase.io.read(images, ":")
     self.fprange = fprange
     self.training_unique_atoms = unique_atoms
     self.hashed_images = amp_hash(self.atom_images)
     G2_etas = Gs["G2_etas"]
     G2_rs_s = Gs["G2_rs_s"]
     G4_etas = Gs["G4_etas"]
     G4_zetas = Gs["G4_zetas"]
     G4_gammas = Gs["G4_gammas"]
     cutoff = Gs["cutoff"]
     if str(descriptor)[8:16] == "amptorch":
         self.hashed_images = hash_images(self.atom_images, Gs)
         make_amp_descriptors_simple_nn(
             self.atom_images, Gs, self.training_unique_atoms, cores=cores, label=label
         )
     G = make_symmetry_functions(elements=self.training_unique_atoms, type="G2", etas=G2_etas)
     G += make_symmetry_functions(
         elements=self.training_unique_atoms,
         type="G4",
         etas=G4_etas,
         zetas=G4_zetas,
         gammas=G4_gammas,
     )
     for g in G:
         g["Rs"] = G2_rs_s
     self.descriptor = self.descriptor(Gs=G, cutoff=cutoff)
     self.descriptor.calculate_fingerprints(
         self.hashed_images, calculate_derivatives=True
     )
     self.unique_atoms = self.unique()
 def __init__(self,
              images,
              descriptor,
              Gs,
              forcetraining,
              label,
              cores,
              delta_data=None,
              store_primes=False,
              specific_atoms=False):
     self.images = images
     self.base_descriptor = descriptor
     self.descriptor = descriptor
     self.Gs = Gs
     self.atom_images = self.images
     self.forcetraining = forcetraining
     self.store_primes = store_primes
     self.cores = cores
     self.delta = False
     self.specific_atoms = specific_atoms
     if delta_data is not None:
         self.delta_data = delta_data
         self.delta_energies = np.array(delta_data[0])
         self.delta_forces = delta_data[1]
         self.num_atoms = np.array(delta_data[2])
         self.delta = True
     if self.store_primes:
         if not os.path.isdir("./stored-primes/"):
             os.mkdir("stored-primes")
     if isinstance(images, str):
         extension = os.path.splitext(images)[1]
         if extension != (".traj" or ".db"):
             self.atom_images = ase.io.read(images, ":")
     self.elements = self.unique()
     #TODO Print log - control verbose
     print("Calculating fingerprints...")
     G2_etas = Gs["G2_etas"]
     G2_rs_s = Gs["G2_rs_s"]
     G4_etas = Gs["G4_etas"]
     G4_zetas = Gs["G4_zetas"]
     G4_gammas = Gs["G4_gammas"]
     cutoff = Gs["cutoff"]
     # create simple_nn fingerprints
     if str(descriptor)[8:16] == "amptorch":
         self.hashed_images = hash_images(self.atom_images, Gs=Gs)
         make_amp_descriptors_simple_nn(self.atom_images,
                                        Gs,
                                        self.elements,
                                        cores=cores,
                                        label=label,
                                        specific_atoms=self.specific_atoms)
         self.isamp_hash = False
     else:
         self.hashed_images = amp_hash(self.atom_images)
         self.isamp_hash = True
     G = make_symmetry_functions(elements=self.elements,
                                 type="G2",
                                 etas=G2_etas)
     G += make_symmetry_functions(
         elements=self.elements,
         type="G4",
         etas=G4_etas,
         zetas=G4_zetas,
         gammas=G4_gammas,
     )
     for g in list(G):
         g["Rs"] = G2_rs_s
     self.descriptor = self.descriptor(Gs=G, cutoff=cutoff)
     self.descriptor.calculate_fingerprints(
         self.hashed_images, calculate_derivatives=forcetraining)
     print("Fingerprints Calculated!")
     self.fprange = calculate_fingerprints_range(self.descriptor,
                                                 self.hashed_images)
     # perform preprocessing
     self.fingerprint_dataset, self.energy_dataset, self.num_of_atoms, self.sparse_fprimes, self.forces_dataset, self.index_hashes, self.scalings, self.rearange_forces = (
         self.preprocess_data())
def test_calcs():
    """Gaussian/Neural non-periodic standard.

    Checks that the answer matches that expected from previous Mathematica
    calculations.
    """

    #: Making the list of non-periodic images
    images = [
        Atoms(
            symbols="PdOPd2",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[0.0, 0.0, 0.0], [0.0, 2.0, 0.0],
                                [0.0, 0.0, 3.0], [1.0, 0.0, 0.0]]),
        ),
        Atoms(
            symbols="PdOPd2",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0],
                                [-1.0, 1.0, 2.0], [1.0, 3.0, 2.0]]),
        ),
        Atoms(
            symbols="PdO",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[2.0, 1.0, -1.0], [1.0, 2.0, 1.0]]),
        ),
        Atoms(
            symbols="Pd2O",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[-2.0, -1.0, -1.0], [1.0, 2.0, 1.0],
                                [3.0, 4.0, 4.0]]),
        ),
        Atoms(
            symbols="Cu",
            pbc=np.array([False, False, False], dtype=bool),
            calculator=EMT(),
            cell=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
            positions=np.array([[0.0, 0.0, 0.0]]),
        ),
    ]

    [a.get_potential_energy() for a in images]
    # Parameters
    hiddenlayers = {"O": (2, ), "Pd": (2, ), "Cu": (2, )}

    Gs = {}
    Gs["G2_etas"] = [0.2]
    Gs["G2_rs_s"] = [0]
    Gs["G4_etas"] = [0.4]
    Gs["G4_zetas"] = [1]
    Gs["G4_gammas"] = [1]
    Gs["cutoff"] = 6.5

    elements = ["O", "Pd", "Cu"]

    G = make_symmetry_functions(elements=elements,
                                type="G2",
                                etas=Gs["G2_etas"])
    G += make_symmetry_functions(
        elements=elements,
        type="G4",
        etas=Gs["G4_etas"],
        zetas=Gs["G4_zetas"],
        gammas=Gs["G4_gammas"],
    )
    hashed_images = hash_images(images, Gs)
    descriptor = Gaussian(Gs=G, cutoff=Gs["cutoff"])
    descriptor.calculate_fingerprints(hashed_images,
                                      calculate_derivatives=True)
    fingerprints_range = calculate_fingerprints_range(descriptor,
                                                      hashed_images)

    weights = OrderedDict([
        (
            "O",
            OrderedDict([
                (
                    1,
                    np.array([
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                    ]),
                ),
                (2, np.matrix([[0.5], [0.5], [0.5]])),
            ]),
        ),
        (
            "Pd",
            OrderedDict([
                (
                    1,
                    np.array([
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                    ]),
                ),
                (2, np.array([[0.5], [0.5], [0.5]])),
            ]),
        ),
        (
            "Cu",
            OrderedDict([
                (
                    1,
                    np.array([
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                        [0.5, 0.5],
                    ]),
                ),
                (2, np.array([[0.5], [0.5], [0.5]])),
            ]),
        ),
    ])

    scalings = OrderedDict([
        ("O", OrderedDict([("intercept", 0), ("slope", 1)])),
        ("Pd", OrderedDict([("intercept", 0), ("slope", 1)])),
        ("Cu", OrderedDict([("intercept", 0), ("slope", 1)])),
    ])

    calc = Amp(
        descriptor,
        model=NeuralNetwork(
            hiddenlayers=hiddenlayers,
            weights=weights,
            scalings=scalings,
            activation="linear",
            fprange=fingerprints_range,
            mode="atom-centered",
            fortran=False,
        ),
        logging=False,
    )

    amp_energies = [calc.get_potential_energy(image) for image in images]
    amp_forces = [calc.get_forces(image) for image in images]
    amp_forces = np.concatenate(amp_forces)

    device = "cpu"
    dataset = AtomsDataset(images,
                           descriptor=DummyGaussian,
                           cores=1,
                           label='test',
                           Gs=Gs,
                           forcetraining=True)
    fp_length = dataset.fp_length
    batch_size = len(dataset)
    dataloader = DataLoader(dataset,
                            batch_size,
                            collate_fn=collate_amp,
                            shuffle=False)
    model = FullNN(elements, [fp_length, 2, 2], device, forcetraining=True)
    for name, layer in model.named_modules():
        if isinstance(layer, Dense):
            layer.activation = None
            init.constant_(layer.weight, 0.5)
            init.constant_(layer.bias, 0.5)
    for batch in dataloader:
        input_data = [batch[0], len(batch[1]), batch[3]]
        for element in elements:
            input_data[0][element][0] = (
                input_data[0][element][0].to(device).requires_grad_(True))
        fp_primes = batch[4]
        energy_pred, force_pred = model(input_data, fp_primes)

    for idx, i in enumerate(amp_energies):
        assert round(i, 4) == round(
            energy_pred.tolist()[idx][0],
            4), "The predicted energy of image %i is wrong!" % (idx + 1)
    print("Energy predictions are correct!")
    for idx, sample in enumerate(amp_forces):
        for idx_d, value in enumerate(sample):
            predict = force_pred.tolist()[idx][idx_d]
            assert abs(value - predict) < 0.00001, (
                "The predicted force of image % i, direction % i is wrong! Values: %s vs %s"
                % (idx + 1, idx_d, value, force_pred.tolist()[idx][idx_d]))
    print("Force predictions are correct!")