def rst_from_perl(podfile, title): """Take a perl file and converts it to a reStructuredText with the help of pod2rst.""" logger.info("Making rst from perl: %s.", podfile) errc, output = asyncloop(["pod2rst", "--infile", podfile, "--title", title]) logger.debug(output) if errc != 0 or output == "\n": logger.warning("pod2rst failed on %s.", podfile) output = None return output
def build_annotations(pfile, basedir, outputdir): """Build pan annotations.""" panccommand = ["panc-annotations", "--output-dir", outputdir, "--base-dir", basedir] panccommand.append(pfile) logger.debug("Running %s." % panccommand) ec, output = output = asyncloop(panccommand) logger.debug(output) if ec == 0 and os.path.exists(os.path.join(outputdir, "%s.annotation.xml" % pfile)): return True else: logger.warning("Something went wrong running '%s'." % panccommand) return False
def main(): """The main, only test the indices passed""" opts = { "last": ("Only test last data entry", None, "store_true", False, 'L'), "first": ("Only test first data entry", None, "store_true", False, 'F'), "entries": ("Indices of data entries to test", "strlist", "store", None, 'E'), "logstash-version": ("Logstash version to test with", None, "store", DEFAULT_LOGSTASH_VERSION, 'V'), } go = simple_option(opts) indices = None if go.options.first: indices = [0] elif go.options.last: indices = [-1] elif go.options.entries: indices = [int(x) for x in go.options.entries] global _log _log = go.log cfg_name = 'logstash_%s.conf' % go.options.logstash_version cfg_file = os.path.join(os.getcwd(), 'tests', cfg_name) if not os.path.isfile(cfg_file): _log.error("Could not find logstash version %s configfile %s", go.options.logstash_version, cfg_file) _log.error("CWD: %s", os.getcwd()) sys.exit(1) prep_grok() input_data, results = get_data() if indices: for indx in indices: _log.debug("Test index %d => input: %s", indx, input_data[indx]) _log.debug("Test index %d => results: %s", indx, results[indx]) try: input_data = [input_data[idx] for idx in indices] results = [results[idx] for idx in indices] except IndexError: _log.error('Provided indices %s exceed avail data items %s', indices, len(input_data)) sys.exit(1) ec, stdout = asyncloop(cmd=LOGSTASH_CMD + [cfg_file], input="\n".join(input_data + [''])) _log.debug("async process ec: %d", ec) output = process(stdout, len(input_data)) test(output, input_data, results)
def rst_from_perl(podfile, title): """ Take a perl file and converts it to a reStructuredText with the help of pod2rst. Returns True if pod2rst worked, False if it failed. """ logger.info("Making rst from perl: %s." % podfile) ec, output = asyncloop(["pod2rst", "--infile", podfile, "--title", title]) logger.debug(output) if ec != 0 or output == "\n": logger.warning("pod2rst failed on %s." % podfile) return None else: return output
def _execute(self, cmd, changes=False): """Run command cmd, return exitcode,output""" if changes and self.dry_run: self.log.info("Dry run: not really executing cmd %s", cmd) return 0, "" # Executes and captures the output of a succesful run. ec, out = asyncloop(cmd) if ec: self.log.exception("_execute command [%s] failed: ec %s, out=%s", cmd, ec, out) return ec, out
def get_slurm_acct_info(info_type): """Get slurm account info for the given clusterself. @param info_type: SyncTypes """ (exitcode, contents) = asyncloop([ SLURM_SACCT_MGR, "-s", "-P", "list", info_type.value, ]) if exitcode != 0: raise SacctMgrException("Cannot run sacctmgr") info = parse_slurm_acct_dump(contents.splitlines(), info_type) return info
def test_simple_ns_asyncloop(self): ec, output = asyncloop([sys.executable, SCRIPT_SIMPLE, 'shortsleep']) self.assertEqual(ec, 0) self.assertTrue('shortsleep' in output.lower())
def maven_clean_compile(location): """Execute mvn clean and mvn compile in the given modules_location.""" logger.info("Doing maven clean compile in %s." % location) ec, output = asyncloop(["mvn", "clean", "compile"], startpath=location) logger.debug(output) return ec
def main(): """ main function """ parser = ArgumentParser( description=""" Calculate job resource usage for running or recently finished jobs This script can be used to check if requested resources are/were used optimally. Resources: -memory: random access memory -walltime: wall-clock time -cores: number of CPU cores are doing actual work Color codes corresponding to ratings: -green: good -yellow: medium -red: bad - wasting resources -magenta: danger - close to the limit -blue: no rating """, formatter_class=RawDescriptionHelpFormatter, ) parser.add_argument( "jobid", help="show only resources for given jobID(s) (default: show all)", nargs="*") parser.add_argument("-a", "--noalert", dest="alerts", help="do not show alert messages", action="store_false", default=True) parser.add_argument("-f", "--infile", dest="infile", help="xml file (output of 'qstat -xt')") parser.add_argument("-c", "--nocolor", dest="colors", help="do not use colors in the output", action="store_false", default=True) parser.add_argument("--csv", dest="csv", help="print as csv", action="store_true") parser.add_argument( "-s", "--state", dest="state", help= 'show only jobs with given state(s) as comma-separated list: "Q,H,R,E,C" (default: show all)', ) parser.add_argument("-d", "--demo", dest="demo", help="show demo output and exit", action="store_true") parser.add_argument("-v", "--version", dest="version", help="show version and exit", action="store_true") args = parser.parse_args() if args.version: print("version: %s" % VERSION) sys.exit() if args.jobid: for i in args.jobid: try: int(i) except ValueError: raise ValueError("%s is not a valid jobID" % i) if args.demo: demo_myresources(alerts=args.alerts) sys.exit() if args.infile: try: tree = ET.parse(args.infile) except (IOError, ET.ParseError): print("Error parsing xml file: %s" % args.infile) sys.exit() else: _, xmlstring = asyncloop("qstat -xt") tree = ET.ElementTree(ET.fromstring(xmlstring)) root = tree.getroot() if not root: sys.exit() if args.csv: write_header_csv() else: write_header() for jobdata in root: job = parse_xml(jobdata) if args.jobid: if job["jobid"] not in args.jobid: continue if args.state: states = args.state.split(",") if job["state"] not in states: continue job = calc_usage(job) if args.csv: csvstring = csv_string(job) write_string(csvstring) else: ustring = usage_string(job, colors=args.colors) write_string(ustring) if args.alerts: write_alerts(job) print("")