def test_loadconfigs_test2(): """ Test that loadconfigs throws a configuration file exception on bad path. """ with pytest.raises(ex.ConfigurationError): loadconfigs("/tmp/sometestfile")
def test_loadconfigs_test3(): """ Test that loadconfigs throws a configuration file exception on bad path. """ with pytest.raises(ex.ConfigurationError): loadconfigs(os.path.join(os.getcwd(), "tests/standards/simplefile.txt"))
def test_loadconfigs_test1(): """ Test that loadconfigs can parse a configuration file. """ conffile = os.path.join(os.getcwd(), "tests/standards/simplehostfile.txt") contents, sections, params = loadconfigs(conffile) assert contents == [ "[HPC1-shortqueue]", "queue = short", "user = test", "host = login.test.ac.uk", "remoteworkdir = /work/dir", "corespernode = 24", "account = acc200", "handler = aprun", "scheduler = pbs", "maxtime = 00:18", "", "# Comment-y goodness", "[HPC1]", "user = test", "host = login.test.ac.uk", "remoteworkdir = /work/dir2", "corespernode = 24", "account = acc300" ] assert sections == ['HPC1-shortqueue', 'HPC1'] assert params["HPC1-shortqueue"]["corespernode"] == "24" assert params["HPC1-shortqueue"]["host"] == "login.test.ac.uk" assert params["HPC1-shortqueue"]["remoteworkdir"] == "/work/dir" assert params["HPC1-shortqueue"]["corespernode"] == "24" assert params["HPC1-shortqueue"]["account"] == "acc200" assert params["HPC1-shortqueue"]["handler"] == "aprun" assert params["HPC1-shortqueue"]["scheduler"] == "pbs" assert params["HPC1-shortqueue"]["maxtime"] == "00:18" assert params["HPC1"]["corespernode"] == "24" assert params["HPC1"]["host"] == "login.test.ac.uk" assert params["HPC1"]["remoteworkdir"] == "/work/dir2" assert params["HPC1"]["corespernode"] == "24" assert params["HPC1"]["account"] == "acc300"
def recovery(jobs, recoveryfile): """Recover a Longbow session. This method is for attempting to recover a failed Longbow session or to reconnect to an intentionally disconnected session. It will try to take the recovery file, written shortly after submission to recover the whole session. Once the data has been loaded from the recovery file and a new job data structure populated, this method will then re-enter the monitoring function to continue where it left off. Any jobs that finished in the meantime will be marked accordingly and then file staging will continue. Required inputs are: recoveryfile (string): A path to the recovery file. """ jobfile = os.path.join(os.path.expanduser('~/.longbow'), recoveryfile) LOG.info("Attempting to find the recovery file '{0}'".format(jobfile)) # Load the jobs recovery file. if os.path.isfile(jobfile): LOG.info("Recovery file found.") _, _, jobparams = configuration.loadconfigs(jobfile) # Copy to jobs so when exceptions are raised the structure is # available. for param in jobparams: jobs[param] = jobparams[param] else: raise exceptions.RequiredinputError( "Recovery file could not be found, make sure you haven't deleted " "the recovery file and that you are not providing the full path, " "just the file name is needed.") # Rejoin at the monitoring stage. This will assume that all jobs that # are no longer in the queue have completed. scheduling.monitor(jobs) # Cleanup the remote working directory. staging.cleanup(jobs)
def update(jobs, updatefile): """Trigger update of a disconnected Longbow session. This method will start the update process on an existing but disconnected Longbow session. All job statuses will be checked and updated in the recovery file and all output files will be synced before disconnecting.""" jobfile = os.path.join(os.path.expanduser('~/.longbow'), updatefile) LOG.info("Attempting to find the recovery file '{0}'".format(jobfile)) # Load the jobs recovery file. if os.path.isfile(jobfile): LOG.info("Recovery file found.") _, _, jobparams = configuration.loadconfigs(jobfile) # Copy to jobs so when exceptions are raised the structure is # available. for param in jobparams: jobs[param] = jobparams[param] else: raise exceptions.RequiredinputError( "Recovery file could not be found, make sure you haven't deleted " "the recovery file and that you are not providing the full path, " "just the file name is needed.") # Add the updater key jobs["lbowconf"]["update"] = True # Enter monitoring loop scheduling.monitor(jobs) # Cleanup the remote working directory. staging.cleanup(jobs)