Beispiel #1
0
class FileLockTest(unittest.TestCase):
    def setUp(self):
        self.file_name = "__lock__"
        self.lock = FileLock(self.file_name, timeout=1)
        self.lock.acquire()

    def test_raise(self):
        with self.assertRaises(FileLockException):
            new_lock = FileLock(self.file_name, timeout=1)
            new_lock.acquire()

    def tearDown(self):
        self.lock.release()
Beispiel #2
0
class FileLockTest(unittest.TestCase):

    def setUp(self):
        self.file_name = "__lock__"
        self.lock = FileLock(self.file_name, timeout=1)
        self.lock.acquire()

    def test_raise(self):
        with self.assertRaises(FileLockException):
            new_lock = FileLock(self.file_name, timeout=1)
            new_lock.acquire()

    def tearDown(self):
        self.lock.release()
Beispiel #3
0
def save_lastnode_id():
    """Save the id of the last node created."""
    init_counter()

    with FileLock(_COUNTER_FILE):
        with AtomicFile(_COUNTER_FILE, mode="w") as fh:
            fh.write("%d\n" % _COUNTER)
Beispiel #4
0
def gbrv_rundb(options):
    """Build flow and run it."""
    dbpath = os.path.abspath(options.path)
    retcode = 0

    # Get list of jobs to execute.
    with FileLock(dbpath):
        outdb = GbrvOutdb.from_file(dbpath)
        jobs = outdb.find_jobs_torun(options.max_njobs)
        if not jobs:
            cprint("Nothing to do, returning 0", "yellow")
            return 0

    gbrv_factory = GbrvCompoundsFactory(xc=outdb["xc_name"])

    # Build workdir.
    s = "-".join(job.formula for job in jobs)
    m = hashlib.md5()
    m.update(s)
    workdir = os.path.join(
        os.getcwd(), "GBRV_OUTDB_" + jobs[0].formula + "_" + jobs[-1].formula +
        "_" + m.hexdigest())
    #workdir = os.path.join(os.getcwd(), "GBRV_OUTDB_" + s)
    flow = GbrvCompoundsFlow(workdir=workdir)

    for job in jobs:
        #for accuracy in ("low", "normal", "high"):
        #for accuracy in ("high",):
        for accuracy in ("normal", "high"):
            ecut = max(p.hint_for_accuracy(accuracy).ecut for p in job.pseudos)
            pawecutdg = max(
                p.hint_for_accuracy(accuracy).pawecutdg for p in job.pseudos)
            if ecut <= 0.0: raise RuntimeError("Pseudos do not have hints")
            # Increase by 10 since many pseudos only have ppgen_hints
            #ecut += 10
            work = gbrv_factory.relax_and_eos_work(accuracy,
                                                   job.pseudos,
                                                   job.formula,
                                                   job.struct_type,
                                                   ecut=ecut,
                                                   pawecutdg=pawecutdg)

            # Attach the database to the work to trigger the storage of the results.
            flow.register_work(work.set_outdb(dbpath))

    print("Working in:", flow.workdir)
    flow.build_and_pickle_dump()  #abivalidate=options.dry_run)
    if options.dry_run: return 0

    # Run the flow with the scheduler (enable smart_io)
    flow.use_smartio()
    retcode += flow.make_scheduler().start()

    return retcode
Beispiel #5
0
    def add_entry_to_dojoreport(self,
                                entry,
                                overwrite_data=False,
                                pop_trial=False):
        """
        Write/update the DOJO_REPORT section of the pseudopotential.
        Important paramenters such as the name of the dojo_trial and the energy cutoff
        are provided by the sub-class.
        Client code is responsible for preparing the dictionary with the data.

        Args:
            entry: Dictionary with results.
            overwrite_data: If False, the routine raises an exception if this entry is
                already filled.
            pop_trial: True if the trial should be removed before adding the new entry.
        """
        root, ext = os.path.splitext(self.dojo_pseudo.filepath)
        djrepo = root + ".djrepo"
        self.history.info("Writing dojreport data to %s" % djrepo)

        # Update file content with Filelock.
        with FileLock(djrepo):
            # Read report from file.
            file_report = DojoReport.from_file(djrepo)

            # Create new entry if not already there
            dojo_trial = self.dojo_trial

            if pop_trial:
                file_report.pop(dojo_trial, None)

            if dojo_trial not in file_report:
                file_report[dojo_trial] = {}

            # Convert float to string with 1 decimal digit.
            dojo_ecut = "%.1f" % self.ecut

            # Check that we are not going to overwrite data.
            if dojo_ecut in file_report[dojo_trial]:
                if not overwrite_data:
                    raise RuntimeError(
                        "dojo_ecut %s already exists in %s. Cannot overwrite data"
                        % (dojo_ecut, dojo_trial))
                else:
                    file_report[dojo_trial].pop(dojo_ecut)

            # Update file_report by adding the new entry and write new file
            file_report[dojo_trial][dojo_ecut] = entry

            # Write new dojo report and update the pseudo attribute
            file_report.json_write()
            self._pseudo.dojo_report = file_report
Beispiel #6
0
 def insert_results(cls, filepath, struct_type, formula, accuracy, pseudos,
                    results):
     """
     Update the entry in the database.
     """
     with FileLock(filepath):
         outdb = cls.from_file(filepath)
         old_dict = outdb[struct_type][formula]
         if not isinstance(old_dict, dict): old_dict = {}
         old_dict[accuracy] = results
         outdb[struct_type][formula] = old_dict
         with AtomicFile(filepath, mode="wt") as fh:
             json.dump(outdb, fh, indent=-1,
                       sort_keys=True)  #, cls=MontyEncoder)
Beispiel #7
0
    def write_pid_file(self):
        """
        This function checks if we are already running the AbiPy |Flow| with a :class:`PyFlowScheduler`.
        Raises: Flow.Error if the pid file of the scheduler exists.
        """
        if os.path.exists(self.pid_path):
            raise self.Error("""\n\
                pid_path
                %s
                already exists. There are two possibilities:

                   1) There's an another instance of PyFlowScheduler running
                   2) The previous scheduler didn't exit in a clean way

                To solve case 1:
                   Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
                   Then you can restart the new scheduler.

                To solve case 2:
                   Remove the pid_path and restart the scheduler.

                Exiting""" % self.pid_path)

        # Make dir and file if not present.
        if not os.path.exists(os.path.dirname(self.pid_path)):
            os.makedirs(os.path.dirname(self.pid_path))

        import json
        d = dict(
            pid=os.getpid(),
            host=self.host,
            port=self.port,
        )

        #d["flows_db"] = dict(

        #)

        with FileLock(self.pid_path):
            with open(self.pid_path, "wt") as fh:
                json.dump(d, fh)
Beispiel #8
0
    def check_and_write_pid_file(self):
        """
        This function checks if we already have a running instance of :class:`MongoFlowScheduler`.
        Raises: RuntimeError if the pid file of the scheduler exists.
        """
        if os.path.exists(self.pid_path):
            raise RuntimeError("""\n\
                pid_path
                %s
                already exists. There are two possibilities:

                   1) There's an another instance of MongoFlowScheduler running
                   2) The previous scheduler didn't exit in a clean way

                To solve case 1:
                   Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
                   Then you can restart the new scheduler.

                To solve case 2:
                   Remove the pid_path and restart the scheduler.

                Exiting""" % self.pid_path)

        # Make dir and file if not present.
        if not os.path.exists(os.path.dirname(self.pid_path)):
            os.makedirs(os.path.dirname(self.pid_path))

        d = dict(pid=os.getpid(),
                 #host=self.host,
                 #port=self.port,
                 #"db_name"=self.db_name
                 )

        with FileLock(self.pid_path):
            with open(self.pid_path, "wt") as fh:
                json.dump(d, fh)
Beispiel #9
0
 def test_raise(self):
     with self.assertRaises(FileLockException):
         new_lock = FileLock(self.file_name, timeout=1)
         new_lock.acquire()
Beispiel #10
0
 def setUp(self):
     self.file_name = "__lock__"
     self.lock = FileLock(self.file_name, timeout=1)
     self.lock.acquire()
Beispiel #11
0
    def on_all_ok(self):
        """
        Results are written to the dojoreport.
        """
        def vol2a(vol):
            """Function to compute cubic a0 from primitive v0 (depends on struct_type)"""
            return (4 * vol) ** (1/3.)

        entries = {}
        for task in self:
            ecut = task.input["ecut"]
            #final_structure = task.get_final_structure()
            with task.open_hist() as hist:
                final_structure = hist.final_structure
                initial_energy = hist.etotals[0]

                # Convert float to string with 1 decimal digit.
                dojo_ecut = "%.1f" % ecut
                entries[dojo_ecut] = {
                        "relaxed_a": vol2a(final_structure.volume),
                        "initial_energy_ev_per_atom": float(initial_energy) / len(final_structure),
                }

        #print(entries)
        # Convert to JSON and add results to the dojo report.
        #entry = dict(ecut=self.ecut, pawecutdg=self.dojo_pawecutdg, kppa=self.dojo_kppa)
        #self.add_entry_to_dojoreport(entry)
        #return results
        djrepo = self.djrepo_path

        # Update file content with Filelock.
        with FileLock(djrepo):
            # Read report from file.
            file_report = DojoReport.from_file(djrepo)

            # Create new entry if not already there
            dojo_trial = self.dojo_trial

            #if pop_trial:
            #    file_report.pop(dojo_trial, None)

            if dojo_trial not in file_report:
                file_report[dojo_trial] = {}

            # Convert float to string with 1 decimal digit.
            #dojo_ecut = "%.1f" % self.ecut
            # Check that we are not going to overwrite data.
            #if dojo_ecut in file_report[dojo_trial]:
            #    if not overwrite_data:
            #        raise RuntimeError("dojo_ecut %s already exists in %s. Cannot overwrite data" %
            #                (dojo_ecut, dojo_trial))
            #    else:
            #        file_report[dojo_trial].pop(dojo_ecut)

            # Update file_report by adding the new entry and write new file
            for dojo_ecut, entry in entries.items():
                file_report[dojo_trial][dojo_ecut] = entry

            # Write new dojo report and update the pseudo attribute
            file_report.json_write()
            self._pseudo.dojo_report = file_report

        return dict(returncode=0, message="Lattice paramenters computed and stored in djrepo file")
Beispiel #12
0
 def test_raise(self):
     with self.assertRaises(FileLockException):
         new_lock = FileLock(self.file_name, timeout=1)
         new_lock.acquire()
Beispiel #13
0
 def setUp(self):
     self.file_name = "__lock__"
     self.lock = FileLock(self.file_name, timeout=1)
     self.lock.acquire()