def get_root_jobinfo(): """Fetch a mock JobInfo object for the current enclosing instance""" handle = flux.Flux() size = handle.attr_get("size") try: # If the enclosing instance has a jobid and a parent-uri, then # fill in data from job-list in the parent: # jobid = JobID(handle.attr_get("jobid")) parent = flux.Flux(handle.attr_get("parent-uri")) info = JobList(parent, ids=[jobid]).fetch_jobs().get_jobs()[0] except OSError: # Make a best-effort attempt to create a mock job info dictionary uri = handle.attr_get("local-uri") nodelist = handle.attr_get("hostlist") userid = handle.attr_get("security.owner") info = dict( id=0, userid=int(userid), state=flux.constants.FLUX_JOB_STATE_RUN, name=".", ntasks=int(size), nnodes=int(size), nodelist=nodelist, annotations={"user": {"uri": uri}}, ) try: info["t_run"] = float(handle.attr_get("broker.starttime")) except OSError: pass # If 'ranks' idset came from parent, it could be confusing, # rewrite ranks to be relative to current instance, i.e. # 0-(size-1) # info["ranks"] = "0-{}".format(int(size) - 1) # Fetch instance-specific information for the current instance: job = JobInfo(info).get_instance_info() # If no jobid was discovered for the root instance, use RootJobID() if job.id == 0: job.id = RootJobID() return job
def fetch_jobs_stdin(): """ Return a list of jobs gathered from a series of JSON objects, one per line, presented on stdin. This function is used for testing of the flux-jobs utility, and thus, all filtering options are currently ignored. """ jobs = [] for line in fileinput.input("-"): try: job = JobInfo(json.loads(line)) except ValueError as err: LOGGER.error("JSON input error: line %d: %s", fileinput.lineno(), str(err)) sys.exit(1) jobs.append(job) return jobs
def process_entry(entry, formatter, filters, level, max_level, combine): job = JobInfo(entry).get_instance_info() # pylint: disable=comparison-with-callable parent = job.uri and job.state_single == "R" label = formatter.format(job, parent) prefix = formatter.format_prefix(job) if not parent: return Tree(label, prefix) return load_tree( label, formatter, prefix=prefix, uri=str(job.uri), filters=filters, level=level + 1, max_level=max_level, combine_children=combine, )
def test_32_job_result(self): result = {} ids = [] def cb(future, jobid): result[jobid] = future ids.append(job.submit(self.fh, JobspecV1.from_command(["true"]))) ids.append(job.submit(self.fh, JobspecV1.from_command(["false"]))) ids.append(job.submit(self.fh, JobspecV1.from_command(["nosuchprog"]))) ids.append( job.submit(self.fh, JobspecV1.from_command(["sleep", "120"]))) # Submit held job so we can cancel before RUN state ids.append( job.submit(self.fh, JobspecV1.from_command(["true"]), urgency=0)) job.cancel(self.fh, ids[4]) for jobid in ids: flux.job.result_async(self.fh, jobid).then(cb, jobid) def cancel_on_start(future, jobid): event = future.get_event() if event is None: return if event.name == "shell.start": job.cancel(self.fh, jobid) future.cancel() job.event_watch_async(self.fh, ids[3], eventlog="guest.exec.eventlog").then( cancel_on_start, ids[3]) self.fh.reactor_run() self.assertEqual(len(result.keys()), len(ids)) self.addTypeEqualityFunc(JobInfo, self.assertJobInfoEqual) self.assertEqual( result[ids[0]].get_info(), JobInfo({ "id": ids[0], "result": flux.constants.FLUX_JOB_RESULT_COMPLETED, "t_start": 1.0, "t_run": 2.0, "t_cleanup": 3.0, "waitstatus": 0, "exception_occurred": False, }), ) self.assertEqual( result[ids[1]].get_info(), JobInfo({ "id": ids[1], "result": flux.constants.FLUX_JOB_RESULT_FAILED, "t_submit": 1.0, "t_run": 2.0, "t_cleanup": 3.0, "waitstatus": 256, "exception_occurred": False, }), ) self.assertEqual( result[ids[2]].get_info(), JobInfo({ "id": ids[2], "result": flux.constants.FLUX_JOB_RESULT_FAILED, "t_submit": 1.0, "t_run": 2.0, "t_cleanup": 3.0, "waitstatus": 32512, "exception_occurred": True, "exception_type": "exec", "exception_note": "task 0.*: start failed: nosuchprog: " "No such file or directory", "exception_severity": 0, }), ) self.assertEqual( result[ids[3]].get_info(), JobInfo({ "id": ids[3], "result": flux.constants.FLUX_JOB_RESULT_CANCELED, "t_submit": 1.0, "t_run": 2.0, "t_cleanup": 3.0, "waitstatus": 36608, # 143<<8 "exception_occurred": True, "exception_type": "cancel", "exception_note": "", "exception_severity": 0, }), ) self.assertEqual( result[ids[4]].get_info(), JobInfo({ "id": ids[4], "result": flux.constants.FLUX_JOB_RESULT_CANCELED, "t_submit": 0.0, "exception_occurred": True, "exception_type": "cancel", "exception_note": "", "exception_severity": 0, }), ) # synchronous job.result() test self.assertEqual(job.result(self.fh, ids[3]), result[ids[3]].get_info())