def _initialize_queue(self):
     r"""
     A job per distance is created. Each job uses the same input
     parameters while the whole system considered is made of both
     fragments, the first one being unchanged, the second one being
     translated by the given distance in the :math:`y` direction. The
     name used to initialize the workflow is used for each job, while
     a specific run_dir per job is defined.
     """
     queue = []
     for y_0 in self.distances:
         # Define a specific run directory for each job
         run_dir = os.path.join(self.run_dir, "y_{}".format(y_0))
         # Set the positions of the whole system, where the second
         # fragment is translated along the y direction
         new_frag2 = self.fragment2.translate([0, y_0, 0])
         pos = deepcopy(self.fragment1)
         pos._atoms += new_frag2.atoms
         # Add a new job to the queue
         job = Job(
             name=self.name,
             inputparams=self.inputparams,
             posinp=pos,
             run_dir=run_dir,
         )
         job.distance = y_0  # We add the distance attribute
         queue.append(job)
     return queue
def run(posinp, i, args, param, pseudos):
    try:
        os.makedirs("{}_{:06}".format(args.name, i))
        os.chdir("{}_{:06}".format(args.name, i))
        job = Job(name=args.name,
                  posinp=posinp,
                  inputparams=param,
                  pseudos=pseudos)
        job.run(nmpi=args.nmpi)
        copyfile(
            "forces_{}.xyz".format(args.name),
            "../../saved_results/{:06}.xyz".format(i),
        )
        os.chdir("../")
    except OSError:
        os.chdir("{}_{:06}".format(args.name, i))
        try:
            log = Logfile.from_file("log-" + args.name + ".yaml")
            print("Calculation {:06} was complete.\n".format(i))
        except:
            job = Job(name=args.name,
                      posinp=posinp,
                      inputparams=param,
                      pseudos=pseudos)
            job.run(args.nmpi, restart_if_incomplete=True)
            copyfile(
                "forces_{}.xyz".format(args.name),
                "../../saved_results/{:06}.xyz".format(i),
            )
        os.chdir("../")
def single_phonon_calculation(
    nmpi=1,
    nomp=1,
    preparation=True,
    savefile=True,
    pseudos=False,
    verbose=False,
    optimization=True,
):
    if preparation:
        base_inp, ref_pos, jobname = utils.prepare_calculations()

    if optimization:
        base_job = Job(
            posinp=ref_pos,
            inputparams=base_inp,
            name=jobname,
            run_dir="geopt/",
            pseudos=pseudos,
        )
        geopt = Geopt(base_job, forcemax=2e-5, ncount_cluster_x=50)
        geopt.run(nmpi=nmpi, nomp=nomp, restart_if_incomplete=True)
        relaxed_pos = geopt.final_posinp
    else:
        relaxed_pos = ref_pos

    if "output" in base_inp:
        del base_inp["output"]

    ground_state = Job(
        name=jobname,
        posinp=relaxed_pos,
        inputparams=base_inp,
        run_dir="phonons/",
        ref_data_dir=(geopt.queue[0].data_dir if optimization else None),
        pseudos=pseudos,
    )
    phonons = Phonons(ground_state)
    phonons.run(nmpi=nmpi, nomp=nomp, restart_if_incomplete=True)

    if verbose:
        for i in range(len(phonons.energies)):
            print(f"Mode {i} :")
            print("Energy : ", phonons.energies[i])
            print("Mode : ", phonons.normal_modes[:, i], "\n")

    if savefile:
        save("phonons/ph_energies.npy", phonons.energies)
        save("phonons/ph_normal_modes.npy", phonons.normal_modes)
Example #4
0
 def test_posinp_with_inf(self):
     new_inp = InputParams({
         "posinp": {
             "units":
             "angstroem",
             "cell": [40, ".inf", 40],
             "positions": [
                 {
                     'N': [
                         2.97630782434901e-23, 6.87220595204354e-23,
                         0.0107161998748779
                     ]
                 },
                 {
                     'N': [
                         -1.10434491945017e-23, -4.87342174483075e-23,
                         1.10427379608154
                     ]
                 },
             ]
         }
     })
     with Job(inputparams=new_inp, name="test") as job:
         job.run(nmpi=6, nomp=3, dry_run=True)
         job.clean()
     assert job.logfile.boundary_conditions == 'surface'
Example #5
0
 def test_run(self):
     atoms = [Atom('N', [0, 0, 0]), Atom('N', [0, 0, 1.1])]
     pos = Posinp(atoms, units="angstroem", boundary_conditions="free")
     inp = InputParams({"dft": {"rmult": [5, 7], "hgrids": 0.55}})
     base = Job(posinp=pos,
                inputparams=inp,
                name="N2",
                run_dir="tests/rmult_convergence_N2")
     rmc = RmultConvergence(base, [8, 11], [-1, -1],
                            n_jobs=4,
                            precision_per_atom=0.01 * EV_TO_HA)
     assert not rmc.is_completed
     rmc.run(nmpi=6, nomp=3)
     assert rmc.is_completed
     # The correct minimum rmult is found
     assert rmc.converged.param == [6., 9.]
     # The output energies are correct
     expected_energies = [-19.871104, -19.871032, -19.870892, -19.869907]
     energies = [job.logfile.energy for job in rmc.queue]
     np.testing.assert_array_almost_equal(energies, expected_energies)
     # Print the summary of the workflow
     rmc.summary()
     # Test that running the workflow again warns a UserWarning
     with pytest.warns(UserWarning):
         rmc.run()
Example #6
0
 def test_dry_run_raises_RuntimeError(self):
     # Error because two ".inf" in cell
     new_inp = InputParams({
         "posinp": {
             "units":
             "angstroem",
             "cell": [40, ".inf", ".inf"],
             "positions": [
                 {
                     'N': [
                         2.97630782434901e-23, 6.87220595204354e-23,
                         0.0107161998748779
                     ]
                 },
                 {
                     'N': [
                         -1.10434491945017e-23, -4.87342174483075e-23,
                         1.10427379608154
                     ]
                 },
             ]
         }
     })
     with pytest.raises(RuntimeError):
         with Job(inputparams=new_inp, run_dir="tests/dummy") as job:
             job.run(dry_run=True)
Example #7
0
 def test_run(self):
     with Job(inputparams=self.inp, run_dir="tests",
              name='warnings') as job:
         job.run()
     assert job.is_completed
     assert self.inp["dft"].get("inputpsiid") is None
     assert np.isclose(job.logfile.energy, -191.74377352940274)
Example #8
0
 def test_run(self):
     atoms = [Atom('N', [0, 0, 0]), Atom('N', [0, 0, 1.1])]
     pos = Posinp(atoms, units="angstroem", boundary_conditions="free")
     inp = InputParams({"dft": {"rmult": [5, 7]}})
     base = Job(inputparams=inp,
                posinp=pos,
                name="N2",
                run_dir="tests/hgrids_convergence_N2")
     hgc = HgridsConvergence(base,
                             0.36,
                             0.02,
                             n_jobs=8,
                             precision_per_atom=0.01 * EV_TO_HA)
     assert not hgc.is_completed
     hgc.run(nmpi=6, nomp=3)
     assert hgc.is_completed
     # The correct maximum hgrids is found
     assert hgc.converged.param == [0.38] * 3
     # The output energies are correct
     expected_energies = [-19.888197, -19.887715, -19.887196, 0, 0, 0, 0, 0]
     energies = [
         job.logfile.energy if job.is_completed else 0 for job in hgc.queue
     ]
     np.testing.assert_array_almost_equal(energies, expected_energies)
     # Print the summary of the workflow
     hgc.summary()
     # Test that running the workflow again warns a UserWarning
     with pytest.warns(UserWarning):
         hgc.run()
Example #9
0
class TestPhonons:

    gs = Job(posinp=pos, name='N2', run_dir='tests/phonons_N2')

    @pytest.mark.parametrize("to_evaluate", [
        "Phonons(self.gs, translation_amplitudes=1)",
        "Phonons(self.gs, translation_amplitudes=[3]*2)",
        "Phonons(self.gs, translation_amplitudes=[3]*4)",
    ])
    def test_init_raises_ValueError(self, to_evaluate):
        with pytest.raises(ValueError):
            eval(to_evaluate)

    def test_init_raises_NotImplementedError(self):
        with pytest.raises(NotImplementedError):
            Phonons(self.gs, order=-1)

    def test_run_first_order(self):
        N2_ref = """\
2   angstroem
free
N   3.571946174   3.571946174   3.620526682
N   3.571946174   3.571946174   4.71401439"""
        ref_pos = Posinp.from_string(N2_ref)
        gs = Job(posinp=ref_pos, name='N2', run_dir='tests/phonons_N2')
        ph = Phonons(gs, order=1)
        assert not ph.is_completed
        ph.run(nmpi=2, nomp=2)
        assert ph.is_completed
        # Test the only physically relevant phonon energy
        np.testing.assert_almost_equal(
            max(ph.energies), 2386.9850607523636, decimal=6)
Example #10
0
 def test__check_logfile_inputparams(self):
     with pytest.raises(UserWarning):
         with Job(inputparams=InputParams(),
                  posinp=self.pos,
                  name="warnings",
                  run_dir="tests") as job:
             job.run()
Example #11
0
    def test_run_second_order_phonons(self):
        N2_ref = """\
2   angstroem
free
N   3.571946174   3.571946174   3.620526682
N   3.571946174   3.571946174   4.71401439"""
        ref_pos = Posinp.from_string(N2_ref)
        gs = Job(posinp=ref_pos, name='N2', run_dir='tests/phonons_N2')
        phonons = Phonons(gs, order=2)
        raman = RamanSpectrum(phonons)
        assert not phonons.is_completed
        assert not raman.is_completed
        raman.run(nmpi=2, nomp=2)
        assert phonons.is_completed
        assert raman.is_completed
        # Test the only physically relevant phonon energy
        np.testing.assert_almost_equal(
            max(raman.energies), 2386.9463343478246, decimal=6)
        # Test the only physically relevant intensity
        np.testing.assert_almost_equal(
            max(raman.intensities), 22.564457304830206)
        # Test the only physically relevant depolarization ratio
        i = np.argmax(raman.energies)
        np.testing.assert_almost_equal(
            raman.depolarization_ratios[i], 0.09412173797731693)
        # Test that running the workflow again warns a UserWarning
        with pytest.warns(UserWarning):
            phonons.run()
        with pytest.warns(UserWarning):
            raman.run()
Example #12
0
 def test_run_raises_ValueError_when_incomplete_logfile(self):
     with Job(inputparams=self.inp,
              posinp=self.pos,
              run_dir="tests",
              name="incomplete") as job:
         shutil.copyfile(job.logfile_name + ".ref", job.logfile_name)
         with pytest.raises(ValueError,
                            message="The logfile is incomplete!"):
             job.run(restart_if_incomplete=False)
Example #13
0
 def dry_gwf(self):
     new_pos = Posinp([Atom('N', [0, 0, 0]), Atom('N', [0, 0, 1.1])],
                      units="angstroem", boundary_conditions="free")
     base_job = Job(posinp=new_pos, name="N2", run_dir="tests/geopt_N2/dry")
     gwf = Geopt(base_job, maxrise=0.5)
     yield gwf
     with gwf.queue[0] as job:
         job.clean()
     os.rmdir(job.run_dir)
Example #14
0
 def test_run_exceeds_timeout_raises_RuntimeError(self):
     inp = InputParams({"dft": {"rmult": [9, 12], "hgrids": 0.25}})
     with Job(inputparams=inp,
              posinp=self.pos,
              run_dir="tests",
              name="long-run") as job:
         job.clean()
         assert not job.is_completed
         with pytest.raises(RuntimeError):
             job.run(timeout=1.5 / 60, force_run=True)
Example #15
0
 def test_run_with_force_run_and_inp_and_pos(self):
     with Job(inputparams=self.inp,
              posinp=self.pos,
              run_dir="tests",
              name="runtest") as job:
         assert not job.is_completed
         job.run(force_run=True)
         assert job.is_completed
     assert self.inp["dft"].get("inputpsiid") is None
     assert np.isclose(job.logfile.energy, -191.74377352940274)
Example #16
0
 def test_init(self):
     atoms = [Atom('N', [0, 0, 0]), Atom('N', [0, 0, 1.1])]
     pos = Posinp(atoms, units="angstroem", boundary_conditions="free")
     base = Job(posinp=pos, name="N2", run_dir="tests/rmult_convergence_N2")
     rmc = RmultConvergence(base, [6, 8], [0.5, 0.5],
                            n_jobs=3,
                            precision_per_atom=0.01 * EV_TO_HA)
     rmults = [job.param for job in rmc.queue]
     expected = [[6.0, 8.0], [5.5, 7.5], [5.0, 7.0]]
     np.testing.assert_array_almost_equal(rmults, expected)
Example #17
0
 def test_clean(self):
     with Job(inputparams=self.inp, name="dry_run", run_dir="tests") as job:
         job.write_input_files()
         job.clean(logfiles_dir=True, data_dir=True)
         assert not os.path.exists(job.posinp_name)
         assert not os.path.exists(job.input_name)
         assert not os.path.exists(job.logfile_name)
         assert not os.path.exists("logfiles")
         assert not os.path.exists("data-dry_run")
         assert not job.is_completed
Example #18
0
 def test_run_restart_if_incomplete(self):
     with Job(inputparams=self.inp,
              posinp=self.pos,
              run_dir="tests",
              name="incomplete") as job:
         shutil.copyfile(job.logfile_name + ".ref", job.logfile_name)
         assert not job.is_completed
         job.run(restart_if_incomplete=True)
         assert job.is_completed
         job.clean()
     assert np.isclose(job.logfile.energy, -191.74377352940274)
Example #19
0
 def test_run_with_force_run(self):
     new_inp = deepcopy(self.inp)
     new_inp["output"] = {"orbitals": "binary"}
     with Job(inputparams=new_inp, run_dir="tests",
              name="write_orbs") as job:
         assert not job.is_completed
         job.run(force_run=True, nmpi=2, nomp=4)
         assert job.is_completed
         job.clean()
     assert self.inp["dft"].get("inputpsiid") is None
     assert np.isclose(job.logfile.energy, -191.74377352940274)
Example #20
0
    def _initialize_queue(self, reference, delta, n_jobs):
        r"""
        Initialize the jobs to be run in order to perform the hgrids
        convergence.

        Parameters
        ----------
        reference
            Reference input parameter, giving high quality results.
        delta
            Variation of of the input parameter between two runs.
        n_jobs : int
            Maximal number of jobs to be run.

        Returns
        -------
        list
            Queue of jobs to be run.
        """
        # Define the parameters to be used during this workflow
        param_variations = self._initialize_param_variations(
            reference, delta, n_jobs)
        # Set the queue of jobs according to the hgrids defined
        pos = self.base_job.posinp
        name = self.base_job.name
        pseudos = self.base_job.pseudos
        queue = []
        for param in param_variations:
            # The input parameters and the run directory of the base job
            # are updated given the value of the parameter
            new_inp = self._new_inputparams(param)
            new_run_dir = self._new_run_dir(param)
            job = Job(posinp=pos,
                      inputparams=new_inp,
                      name=name,
                      run_dir=new_run_dir,
                      pseudos=pseudos)
            job.param = param
            queue.append(job)
        return queue
Example #21
0
 def _initialize_queue(self):
     r"""
     Initialize the queue of jobs to be run in order to compute the
     phonon energies.
     """
     queue = []
     gs = self.ground_state
     # Add the ground state job to the queue if needed
     if self.order == 1:
         queue.append(gs)
     # Add the jobs where each atom is displaced along each space
     # coordinate
     for i_at in range(len(gs.posinp)):
         for key, disp in self.displacements.items():
             # Prepare the new job by translating an atom
             run_dir = os.path.join(gs.run_dir, "atom{:04d}".format(i_at),
                                    key)
             new_posinp = gs.posinp.translate_atom(i_at, disp.vector)
             # Set the correct reference data directory
             default = DEFAULT_PARAMETERS["output"]["orbitals"]
             write_orbitals = ("output" in gs.inputparams
                               and gs.inputparams["output"] != default)
             if self.order == 1 and write_orbitals:
                 ref_data_dir = gs.data_dir  # pragma: no cover
             else:
                 ref_data_dir = gs.ref_data_dir
             job = Job(
                 inputparams=gs.inputparams,
                 posinp=new_posinp,
                 name=gs.name,
                 run_dir=run_dir,
                 skip=gs.skip,
                 ref_data_dir=ref_data_dir,
                 pseudos=gs.pseudos,
             )
             # Add attributes to the job to facilitate post-processing
             job.moved_atom = i_at
             job.displacement = disp
             queue.append(job)
     return queue
Example #22
0
 def test_run_warns_UserWarning_too_low_energy(self):
     atoms = [Atom('N', [0, 0, 0]), Atom('N', [0, 0, 1.1])]
     pos = Posinp(atoms, units="angstroem", boundary_conditions="free")
     inp = InputParams({"dft": {"rmult": [5, 7], "hgrids": 0.55}})
     base = Job(posinp=pos,
                inputparams=inp,
                name="N2",
                run_dir="tests/rmult_convergence_N2")
     rmc = RmultConvergence(base, [9, 12], [-1, -1],
                            n_jobs=2,
                            precision_per_atom=0.01 * EV_TO_HA)
     with pytest.warns(UserWarning):
         rmc.run(nmpi=6, nomp=3)
Example #23
0
 def _initialize_queue(self):
     r"""
     Initialize the queue of calculations to be performed in order to
     compute the polarizability tensor.
     """
     queue = []
     gs = self.ground_state
     # Add the ground state job to the queue after updating the run
     # directory if needed
     if self.order == 1:
         queue.append(gs)
     # Add a job for each electric field calculation (one along each
     # space coordinate)
     for key, efield in self.efields.items():
         inp = deepcopy(gs.inputparams)
         if "dft" in inp:
             inp["dft"]["elecfield"] = efield.vector
         else:
             inp["dft"] = {"elecfield": efield.vector}
         # Set the correct reference data directory
         default = DEFAULT_PARAMETERS["output"]["orbitals"]
         write_orbitals = ("output" in gs.inputparams
                           and gs.inputparams["output"] != default)
         if self.order == 1 and write_orbitals:
             ref_data_dir = gs.data_dir  # pragma: no cover
         else:
             ref_data_dir = gs.ref_data_dir
         run_dir = os.path.join(gs.run_dir, "EF_along_{}".format(key))
         job = Job(
             name=gs.name,
             inputparams=inp,
             posinp=gs.posinp,
             run_dir=run_dir,
             skip=gs.skip,
             ref_data_dir=ref_data_dir,
         )
         job.efield = efield
         queue.append(job)
     return queue
Example #24
0
 def test_init(self):
     atoms = [Atom('N', [0, 0, 0]), Atom('N', [0, 0, 1.1])]
     pos = Posinp(atoms, units="angstroem", boundary_conditions="free")
     base = Job(posinp=pos,
                name="N2",
                run_dir="tests/hgrids_convergence_N2")
     hgc = HgridsConvergence(base,
                             0.36,
                             0.02,
                             n_jobs=8,
                             precision_per_atom=0.01 * EV_TO_HA)
     hgrids = [job.param for job in hgc.queue]
     expected = [[0.36 + i * 0.02] * 3 for i in range(8)]
     np.testing.assert_array_almost_equal(hgrids, expected)
Example #25
0
 def test_run_with_non_existing_ref_data_dir(self):
     with Job(inputparams=self.inp,
              posinp=self.pos,
              ref_data_dir="unknown_data_dir",
              run_dir="tests",
              name="with_unknown_ref_data_dir") as job:
         assert self.inp["dft"].get("inputpsiid") is None
         assert job.inputparams["dft"].get("inputpsiid") is None
         assert not job.is_completed
         assert job.ref_data_dir is not None
         assert not os.path.exists(job.ref_data_dir)
         job.run(force_run=True)
         assert job.is_completed
     assert np.isclose(job.logfile.energy, -191.74377352940274)
Example #26
0
 def test_run_with_dry_run(self):
     with Job(inputparams=self.inp, name="dry_run", run_dir="tests") as job:
         # Run the calculation
         job.clean()
         assert not job.is_completed
         job.run(dry_run=True, nmpi=2, nomp=4)
         assert job.is_completed
         # There must be input and output files afterwards
         new_inp = InputParams.from_file(job.input_name)
         assert new_inp == self.inp
         new_pos = Posinp.from_file(job.posinp_name)
         assert new_pos == self.pos
         bigdft_tool_log = Logfile.from_file(job.logfile_name)
         assert bigdft_tool_log.energy is None
Example #27
0
    def test_run_first_order(self):
        N2_ref = """\
2   angstroem
free
N   3.571946174   3.571946174   3.620526682
N   3.571946174   3.571946174   4.71401439"""
        ref_pos = Posinp.from_string(N2_ref)
        gs = Job(posinp=ref_pos, name='N2', run_dir='tests/phonons_N2')
        ph = Phonons(gs, order=1)
        assert not ph.is_completed
        ph.run(nmpi=2, nomp=2)
        assert ph.is_completed
        # Test the only physically relevant phonon energy
        np.testing.assert_almost_equal(
            max(ph.energies), 2386.9850607523636, decimal=6)
Example #28
0
 def test_run_with_ref_data_dir(self):
     with Job(inputparams=self.inp,
              posinp=self.pos,
              ref_data_dir="data-write_orbs",
              run_dir="tests",
              name="with_ref_data_dir") as job:
         assert self.inp["dft"].get("inputpsiid") is None
         assert job.inputparams["dft"].get("inputpsiid") is None
         assert not job.is_completed
         job.run(force_run=True)
         assert job.is_completed
         assert job.inputparams["dft"].get("inputpsiid") == 2
         assert self.inp["dft"].get("inputpsiid") is None
         job.run(force_run=True)
         assert job.is_completed
     assert np.isclose(job.logfile.energy, -191.74377352940274)
Example #29
0
 def test_run(self):
     atoms = [Atom('N', [3.571946174, 3.571946174, 3.620526682]),
              Atom('N', [3.571946174, 3.571946174, 4.71401439])]
     pos = Posinp(atoms, units="angstroem", boundary_conditions="free")
     gs = Job(posinp=pos, name='N2', run_dir='tests/phonons_N2')
     ph = Phonons(gs)
     ir = InfraredSpectrum(ph)
     assert not ir.is_completed
     ir.run()
     assert ir.is_completed
     # Test the only physically relevant infrared intensity
     i = np.argmax(ir.energies)
     np.testing.assert_almost_equal(ir.intensities[i], 1.100446469749e-06)
     # Test that running the workflow again warns a UserWarning
     with pytest.warns(UserWarning):
         ir.run()
Example #30
0
 def test_run_with_dry_run_with_posinp(self):
     with Job(inputparams=self.inp,
              posinp=self.pos,
              name="dry_run",
              run_dir="tests") as job:
         job.clean()
         assert not job.is_completed
         job.run(dry_run=True, nmpi=2, nomp=4)
         assert job.is_completed
         # Make sure that input, posinp and output files are created
         new_inp = InputParams.from_file(job.input_name)
         assert new_inp == self.inp
         new_pos = Posinp.from_file(job.posinp_name)
         assert new_pos == self.pos
         bigdft_tool_log = Logfile.from_file(job.logfile_name)
         assert bigdft_tool_log.energy is None