def getOsStatistics(): """ Gather statistics about the node's operating system NOTE: does not conform to the LSB layout that the GLUE schema suggests @returns: OS name, OS release, OS version """ name = runCommand('uname').read().strip() release = runCommand('uname -r').read().strip() version = runCommand('uname -v').read().strip() return name, release, version
def runTests(self): self.getTests() # perfom the reports self.runList(self.reports, "reports") # perfom the glite reports self.runList(self.glite_tests, "glite") # perfom the critical tests self.runList(self.critical_tests, "critical") self.writeResultsPage() oim_plugin_enabled = cp_getBoolean(self.cp, "gip_tests", "enable_myosg_plugin", False) if oim_plugin_enabled: oim_plugin = os.path.expandvars("$GIP_LOCATION/reporting/plugins/OIM_XML_Aggregator.py") cmd = '/bin/bash -c "%(source)s; %(plugin)s %(args)s "' % ({"source": self.source_cmd, "plugin": oim_plugin, "args": self.args}) runCommand(cmd)
def pbsCommand(command, cp): """ Run a command against the PBS batch system. Use this when talking to PBS; not only does it allow for integration into the GIP test framework, but it also filters and expands PBS-style line continuations. """ try: pbsHost = cp.get("pbs", "host") except: pbsHost = "" if pbsHost.lower() == "none" or pbsHost.lower() == "localhost": pbsHost = "" pbs_path = cp_get(cp, "pbs", "pbs_path", ".") addToPath(pbs_path) addToPath(pbs_path + "/bin") cmd = command % {'pbsHost': pbsHost} fp = runCommand(cmd) #pid, exitcode = os.wait() #if exitcode != 0: # raise Exception("Command failed: %s" % cmd) return pbsOutputFilter(fp)
def getJobsInfo(vo_map, cp): xml = runCommand(sge_job_info_cmd) handler = JobInfoParser() parseXmlSax(xml, handler) job_info = handler.getJobInfo() queue_jobs = {} for job in job_info: user = job['JB_owner'] state = job['state'] queue = job.get('queue_name', '') if queue.strip() == '': queue = 'waiting' queue = queue.split('@')[0] try: vo = vo_map[user].lower() except: # Most likely, this means that the user is local and not # associated with a VO, so we skip the job. continue voinfo = queue_jobs.setdefault(queue, {}) info = voinfo.setdefault(vo, {"running":0, "wait":0, "total":0}) if state == "r": info["running"] += 1 else: info["wait"] += 1 info["total"] += 1 info["vo"] = vo log.debug("SGE job info: %s" % str(queue_jobs)) return queue_jobs
def getHTCondorCEVersion(cp): """ Returns the running version of the HTCondor CE Copied from getOSGVersion() in gip_cluster.py """ log = logging.getLogger() htcondorce_ver_backup = cp_get(cp, "ce", "htcondorce_version", "1.8") htcondorce_version_script = cp_get(cp, "gip", "htcondorce_version_script", "") htcondorce_ver = '' if len(htcondorce_version_script) == 0: htcondorce_version_script = vdtDir('$VDT_LOCATION/condor_ce_config_val', '/usr/bin/condor_ce_config_val') htcondorce_version_script = os.path.expandvars(htcondorce_version_script) if not os.path.exists(htcondorce_version_script): htcondorce_version_script = os.path.expandvars("$VDT_LOCATION/osg/bin/" \ "osg-version") if os.path.exists(htcondorce_version_script): try: htcondorce_version_script += " HTCondorCEVersion" htcondorce_ver = runCommand(htcondorce_version_script).read().strip() htcondorce_ver = htcondorce_ver.replace('"','') except Exception, e: log.exception(e)
def getJobsInfo(vo_map, cp): xml = runCommand(sge_job_info_cmd) handler = JobInfoParser() parseXmlSax(xml, handler) job_info = handler.getJobInfo() queue_jobs = {} for job in job_info: user = job['JB_owner'] state = job['state'] queue = job.get('queue_name', '') if queue.strip() == '': queue = 'waiting' queue = queue.split('@')[0] try: vo = vo_map[user].lower() except: # Most likely, this means that the user is local and not # associated with a VO, so we skip the job. continue voinfo = queue_jobs.setdefault(queue, {}) info = voinfo.setdefault(vo, {"running": 0, "wait": 0, "total": 0}) if state == "r": info["running"] += 1 else: info["wait"] += 1 info["total"] += 1 info["vo"] = vo log.debug("SGE job info: %s" % str(queue_jobs)) return queue_jobs
def sgeCommand(command, cp): """ Run a command against the SGE batch system. Use this when talking to SGE; not only does it allow for integration into the GIP test framework, but it also filters and expands SGE-style line continuations. """ fp = runCommand(command) return sgeOutputFilter(fp)
def getRelease(): """ Get the release information for the node; if the lsb_release command isn't found, return generic stats based on uname from getOsStatistics This function conforms to the suggestions made by the GLUE schema 1.3 and EGEE's policy page: http://goc.grid.sinica.edu.tw/gocwiki/How_to_publish_the_OS_name @returns: OS name, OS release, OS version """ name = (runCommand('lsb_release -i | cut -f2').read()).strip() release = (runCommand('lsb_release -r | cut -f2').read()).strip() version = (runCommand('lsb_release -c | cut -f2').read()).strip() if name: return (name, release, version) else: return getOsStatistics()
def slurmCommand(command, cp): """ Run a command against the SLURM batch system. """ slurm_path = cp_get(cp, "slurm", "slurm_path", ".") addToPath(slurm_path) addToPath(slurm_path + "/bin") fp = runCommand(command) return slurmOutputFilter(fp)
def getStartTimeAndPid(cp): pgrepOut = runCommand('pgrep -f "org.apache.catalina.startup.Bootstrap start"') if not pgrepOut: return '' pid = int(pgrepOut.readlines()[0]) startTimeOut = runCommand('ps -p %d -o lstart' % pid) if not startTimeOut: return '' startTime = startTimeOut.readlines()[1].strip() log.debug("Tomcat start time is %s" % startTime) timeTuple = time.strptime(startTime) glueTime = time.strftime('%FT%X', timeTuple) timeOffset = time.strftime('%z') log.debug("Tomcat time offset is %s" % timeOffset) if len(timeOffset) > 4: timeOffset = '%s:%s' % (timeOffset[0:3], timeOffset[3:]) glueTime = glueTime + timeOffset return (glueTime, pid)
def runTests(self): self.getTests() # perfom the reports self.runList(self.reports, "reports") # perfom the glite reports self.runList(self.glite_tests, "glite") # perfom the critical tests self.runList(self.critical_tests, "critical") self.writeResultsPage() oim_plugin_enabled = cp_getBoolean(self.cp, "gip_tests", "enable_myosg_plugin", False) if oim_plugin_enabled: oim_plugin = os.path.expandvars( "$GIP_LOCATION/reporting/plugins/OIM_XML_Aggregator.py") cmd = '/bin/bash -c "%(source)s; %(plugin)s %(args)s "' % ( { "source": self.source_cmd, "plugin": oim_plugin, "args": self.args }) runCommand(cmd)
def getStartTimeAndPid(cp): pgrepOut = runCommand( 'pgrep -f "org.apache.catalina.startup.Bootstrap start"') if not pgrepOut: return '' pid = int(pgrepOut.readlines()[0]) startTimeOut = runCommand('ps -p %d -o lstart' % pid) if not startTimeOut: return '' startTime = startTimeOut.readlines()[1].strip() log.debug("Tomcat start time is %s" % startTime) timeTuple = time.strptime(startTime) glueTime = time.strftime('%FT%X', timeTuple) timeOffset = time.strftime('%z') log.debug("Tomcat time offset is %s" % timeOffset) if len(timeOffset) > 4: timeOffset = '%s:%s' % (timeOffset[0:3], timeOffset[3:]) glueTime = glueTime + timeOffset return (glueTime, pid)
def main(): try: # Load up the site configuration cp = config() se_only = cp_getBoolean(cp, "gip", "se_only", False) if not se_only and 'VDT_LOCATION' in os.environ: # get the VDT version vdt_version_cmd = os.path.expandvars( "$VDT_LOCATION/vdt/bin/") + 'vdt-version --no-wget' vdt_version_out = runCommand(vdt_version_cmd).readlines() gip_re = re.compile('Generic Information Provider\s+(.*?)\s*-.*') gip_version = 'UNKNOWN' for line in vdt_version_out: m = gip_re.match(line) if m: gip_version = m.groups()[0] break gip_version += '; $Revision$' # Get the timestamp in the two formats we wanted now = time.strftime("%a %b %d %T UTC %Y", time.gmtime()) # Load up the template for GlueLocationLocalID # To view its contents, see $VDT_LOCATION/gip/templates/GlueCluster template = getTemplate("GlueCluster", "GlueLocationLocalID") cluster_id = getClusterID(cp) for subClusterId in getSubClusterIDs(cp): # Dictionary of data to fill in for GlueLocationLocalID info = { 'locationId': 'GIP_VERSION', 'subClusterId': subClusterId, 'clusterId': cluster_id, 'locationName': 'GIP_VERSION', 'version': gip_version, 'path': now, } # Spit out our template, fill it with the appropriate info. printTemplate(template, info) except Exception, e: # Log error, then report it via stderr. log.exception(e) sys.stdout = sys.stderr raise
def runList(self, list, type): for test in list: # check if entry is a directory... don't try to execute directories if not os.path.isdir(test): # check if the entry is an .xsl file, don't execute, just copy to the output directory if os.path.splitext(test)[1] in self.extList: copy(test, self.output_dir) continue # Ok, not a directory, and not an .xsl file, we can be reasonably sure that this is an actual test... *now* execute it cmd = '/bin/bash -c "%(source)s; %(test)s %(args)s -f xml"' % {"source": self.source_cmd, "test": test, "args": self.args} print >> sys.stderr, "Running %s" % cmd output = runCommand(cmd).read() output_file = self.write_results(test, output) self.output_files.append({"file" : output_file, "type" : type})
def lsfCommand(command, cp): """ Run a command for the LSF batch system Config options used: * lsf.host. The LSF server hostname. Defaults to localhost @returns: File-like object of the LSF output. """ lsfHost = cp_get(cp, "lsf", "host", "localhost") if lsfHost.lower() == "none" or lsfHost.lower() == "localhost": lsfHost = "" cmd = command % {'lsfHost': lsfHost} log.debug('Executing LSF command %s' % cmd) fp = runCommand(cmd) return fp
def main(): try: # Load up the site configuration cp = config() se_only = cp_getBoolean(cp, "gip", "se_only", False) if not se_only and 'VDT_LOCATION' in os.environ: # get the VDT version vdt_version_cmd = os.path.expandvars("$VDT_LOCATION/vdt/bin/") + 'vdt-version --no-wget' vdt_version_out = runCommand(vdt_version_cmd).readlines() gip_re = re.compile('Generic Information Provider\s+(.*?)\s*-.*') gip_version = 'UNKNOWN' for line in vdt_version_out: m = gip_re.match(line) if m: gip_version = m.groups()[0] break gip_version += '; $Revision$' # Get the timestamp in the two formats we wanted now = time.strftime("%a %b %d %T UTC %Y", time.gmtime()) # Load up the template for GlueLocationLocalID # To view its contents, see $VDT_LOCATION/gip/templates/GlueCluster template = getTemplate("GlueCluster", "GlueLocationLocalID") cluster_id = getClusterID(cp) for subClusterId in getSubClusterIDs(cp): # Dictionary of data to fill in for GlueLocationLocalID info = {'locationId': 'GIP_VERSION', 'subClusterId': subClusterId, 'clusterId': cluster_id, 'locationName': 'GIP_VERSION', 'version': gip_version, 'path': now, } # Spit out our template, fill it with the appropriate info. printTemplate(template, info) except Exception, e: # Log error, then report it via stderr. log.exception(e) sys.stdout = sys.stderr raise
def bestman_srm_ping(cp, endpoint, section='bestman'): """ Perform a srm-ping operation against a BeStMan endpoint and return the resulting key-value pairs. @param cp: Site's Config object @param endpoint: Endpoint to query (full service URL). """ endpoint = endpoint.replace('httpg', 'srm') # Hardcode the proxy filename in order to play nicely with our testing fmwk. if gip_testing.replace_command: proxy_filename = '/tmp/http_proxy' else: fd, proxy_filename = tempfile.mkstemp() results = {} try: if not gip_testing.replace_command: create_proxy(cp, proxy_filename, section=section) validate_proxy(cp, proxy_filename) cmd = 'srm-ping %s -proxyfile %s' % (endpoint, proxy_filename) fp = runCommand(cmd) output = fp.read() if fp.close(): log.debug("srm-ping failed; command %s failed with output: %s" % (cmd, output)) raise ValueError("srm-ping failed.") results = parse_srm_ping(output) finally: try: os.unlink(proxy_filename) except: pass ctr = 0 key = 'gsiftpTxfServers[%i]' % ctr while key in results: if 'gsiftpTxfServers' not in results: results['gsiftpTxfServers'] = results[key] else: results['gsiftpTxfServers'] = ';'.join([results['gsiftpTxfServers'], results[key]]) del results[key] ctr += 1 key = 'gsiftpTxfServers[%i]' % ctr return results
def validate_proxy(cp, proxy_filename): """ Determine that there is a valid proxy at a given location @param proxy_filename: The file to check @returns: True if the proxy is valid in proxy_filename; False otherwise. """ #if not which('grid-proxy-info'): # raise ValueError("Could not find grid-proxy-info; perhaps you forgot"\ # " to source $VDT_LOCATION/setup.sh?") cmd = 'grid-proxy-info -f %s' % proxy_filename fd = runCommand(cmd) fd.read() if fd.close(): raise ProxyCreateException("Unable to validate proxy; " \ "command run by user daemon: %s" % cmd ) return True
def getCreamVersion(cp): """ Returns the CREAM version """ if 'VDT_LOCATION' in os.environ: vdt_version_cmd = os.path.expandvars("$VDT_LOCATION/vdt/bin/") + 'vdt-version --no-wget' vdt_version_out = runCommand(vdt_version_cmd).readlines() else: return 'UNKNOWN' cream_re = re.compile('gLite CE CREAM\s+(.*?)\s*-.*') creamVersion = 'UNKNOWN' for line in vdt_version_out: m = cream_re.match(line) if m: creamVersion = m.groups()[0] break return creamVersion
def bootstrapLSF(cp): """ If it exists, source the profile.lsf file """ lsf_profile = cp_get(cp, "lsf", "lsf_profile", "/lsf/conf/profile.lsf") if not os.path.exists(lsf_profile): log.warning("Could not find the LSF profile file; looked in %s" % lsf_profile) return log.debug('Executing lsf profile from %s' % lsf_profile) cmd = "/bin/sh -c 'source %s; /usr/bin/env'" % lsf_profile output = runCommand(cmd) for line in output.readlines(): line = line.strip() info = line.split('=', 2) if len(info) != 2: continue os.environ[info[0]] = info[1]
def getCreamVersion(cp): """ Returns the CREAM version """ if 'VDT_LOCATION' in os.environ: vdt_version_cmd = os.path.expandvars( "$VDT_LOCATION/vdt/bin/") + 'vdt-version --no-wget' vdt_version_out = runCommand(vdt_version_cmd).readlines() else: return 'UNKNOWN' cream_re = re.compile('gLite CE CREAM\s+(.*?)\s*-.*') creamVersion = 'UNKNOWN' for line in vdt_version_out: m = cream_re.match(line) if m: creamVersion = m.groups()[0] break return creamVersion
def main(): cp = getTestConfig("xml") results_dir = os.path.expandvars(cp_get(cp, "gip_tests", "results_dir", "$VDT_LOCATION/apache/htdocs/")) # check for the existence of the css, images, and includes directories in the results dir # if they do not exist, copy the dirs and their contents to the results dir source_dir = os.path.expandvars('$GIP_LOCATION/reporting/http') css_dir = "%s/css" % results_dir images_dir = "%s/images" % results_dir includes_dir = "%s/includes" % results_dir if not os.path.isdir(css_dir): runCommand("cp -r %s/css %s" % (source_dir, css_dir)) if not os.path.isdir(images_dir): runCommand("cp -r %s/images %s" % (source_dir, images_dir)) if not os.path.isdir(includes_dir): runCommand("cp -r %s/includes %s" % (source_dir, includes_dir))
def runList(self, list, type): for test in list: # check if entry is a directory... don't try to execute directories if not os.path.isdir(test): # check if the entry is an .xsl file, don't execute, just copy to the output directory if os.path.splitext(test)[1] in self.extList: copy(test, self.output_dir) continue # Ok, not a directory, and not an .xsl file, we can be reasonably sure that this is an actual test... *now* execute it cmd = '/bin/bash -c "%(source)s; %(test)s %(args)s -f xml"' % { "source": self.source_cmd, "test": test, "args": self.args } print >> sys.stderr, "Running %s" % cmd output = runCommand(cmd).read() output_file = self.write_results(test, output) self.output_files.append({"file": output_file, "type": type})
def create_proxy(cp, proxy_filename, section='bestman'): """ Attempt to create a very shortlived proxy at a given location. """ #if not which('grid-proxy-init'): # raise ValueError("Could not find grid-proxy-init; perhaps you forgot"\ # " to source $VDT_LOCATION/setup.sh in the environment beforehand?") usercert = cp_get(cp, section, "usercert", "/etc/grid-security/http/" \ "httpcert.pem") userkey = cp_get(cp, section, "userkey", "/etc/grid-security/http/" \ "httpkey.pem") if not os.path.exists(usercert): raise ProxyCreateException("Certificate to create proxy, %s, does not" \ " exist." % usercert) cmd = 'grid-proxy-init -valid 00:05 -cert %s -key %s -out %s' % \ (usercert, userkey, proxy_filename) fd = runCommand(cmd) fd.read() if fd.close(): raise ProxyCreateException("Unable to create a valid proxy; failed " \ "command run by user daemon: %s" % cmd )
def getOSGVersion(cp): """ Returns the running version of the OSG """ osg_ver_backup = cp_get(cp, "ce", "osg_version", "OSG 1.2.0") osg_version_script = cp_get(cp, "gip", "osg_version_script", "") osg_ver = '' if len(osg_version_script) == 0: osg_version_script = '$VDT_LOCATION/osg-version' osg_version_script = os.path.expandvars(osg_version_script) if not os.path.exists(osg_version_script): osg_version_script = os.path.expandvars("$VDT_LOCATION/osg/bin/" \ "osg-version") if os.path.exists(osg_version_script): try: osg_ver = runCommand(osg_version_script).read().strip() except Exception, e: log.exception(e)
def main(): cp = getTestConfig("xml") results_dir = os.path.expandvars( cp_get(cp, "gip_tests", "results_dir", "$VDT_LOCATION/apache/htdocs/")) # check for the existence of the css, images, and includes directories in the results dir # if they do not exist, copy the dirs and their contents to the results dir source_dir = os.path.expandvars('$GIP_LOCATION/reporting/http') css_dir = "%s/css" % results_dir images_dir = "%s/images" % results_dir includes_dir = "%s/includes" % results_dir if not os.path.isdir(css_dir): runCommand("cp -r %s/css %s" % (source_dir, css_dir)) if not os.path.isdir(images_dir): runCommand("cp -r %s/images %s" % (source_dir, images_dir)) if not os.path.isdir(includes_dir): runCommand("cp -r %s/includes %s" % (source_dir, includes_dir))
def main(): try: # Load up the site configuration cp = config() se_only = cp_getBoolean(cp, "gip", "se_only", False) if not se_only and 'VDT_LOCATION' in os.environ: # get the VDT version vdt_version_cmd = os.path.expandvars("$VDT_LOCATION/vdt/bin/") + 'vdt-version --brief' vdt_version = runCommand(vdt_version_cmd).readlines()[0].strip() if (vdt_version == ""): vdt_version = "OLD_VDT" # Get the timestamp in the two formats we wanted now = time.strftime("%a %b %d %T UTC %Y", time.gmtime()) # Load up the template for GlueLocationLocalID # To view its contents, see $VDT_LOCATION/gip/templates/GlueCluster template = getTemplate("GlueCluster", "GlueLocationLocalID") cluster_id = getClusterID(cp) for subClusterId in getSubClusterIDs(cp): # Dictionary of data to fill in for GlueLocationLocalID info = {'locationId': 'VDT_VERSION', 'subClusterId': subClusterId, 'clusterId': cluster_id, 'locationName': 'VDT_VERSION', 'version': vdt_version, 'path': now, } # Spit out our template, fill it with the appropriate info. printTemplate(template, info) except Exception, e: # Log error, then report it via stderr. log.exception(e) sys.stdout = sys.stderr raise
def condorCommand(command, cp, info=None): #pylint: disable-msg=W0613 """ Execute a command in the shell. Returns a file-like object containing the stdout of the command Use this function instead of executing directly (os.popen); this will allow you to hook your providers into the testing framework. @param command: The command to execute @param cp: The GIP configuration object @keyword info: A dictionary-like object for Python string substitution @returns: a file-like object. """ # must test for empty dict for special cases like the condor_status # command which has -format '%s' arguments. Python will try to do # the string substitutions regardless of single quotes if info: cmd = command % info else: cmd = command log.debug("Running command %s." % cmd) return runCommand(cmd)
def getOSGVersion(cp): """ Returns the running version of the OSG """ osg_ver_backup = cp_get(cp, "ce", "osg_version", "OSG 1.2.0") osg_version_script = cp_get(cp, "gip", "osg_version_script", "") osg_ver = '' if len(osg_version_script) == 0: osg_version_script = vdtDir('$VDT_LOCATION/osg-version', '/usr/bin/osg-version') osg_version_script = os.path.expandvars(osg_version_script) if not os.path.exists(osg_version_script): osg_version_script = os.path.expandvars("$VDT_LOCATION/osg/bin/" \ "osg-version") if os.path.exists(osg_version_script): try: osg_ver = runCommand(osg_version_script).read().strip() except Exception, e: log.exception(e)
def getQueueInfo(cp): """ Looks up the queue and job information from SGE. @param cp: Configuration of site. @returns: A dictionary of queue data and a dictionary of job data. """ queue_list = {} xml = runCommand(sge_queue_info_cmd) handler = QueueInfoParser() parseXmlSax(xml, handler) queue_info = handler.getQueueInfo() for queue, qinfo in queue_info.items(): if queue == 'waiting': continue # get queue name name = queue.split("@")[0] q = queue_list.get( name, { 'slots_used': 0, 'slots_total': 0, 'slots_free': 0, 'waiting': 0, 'name': name }) try: q['slots_used'] += int(qinfo['slots_used']) except: pass try: q['slots_total'] += int(qinfo['slots_total']) except: pass q['slots_free'] = q['slots_total'] - q['slots_used'] if 'arch' in qinfo: q['arch'] = qinfo['arch'] q['max_running'] = q['slots_total'] try: state = queue_info[queue]["state"] if state.find("d") >= 0 or state.find("D") >= 0: status = "Draining" elif state.find("s") >= 0: status = "Closed" else: status = "Production" except: status = "Production" q['status'] = status q['priority'] = 0 # No such thing that I can find for a queue # How do you handle queues with no limit? sqc = SGEQueueConfig(sgeCommand(sge_queue_config_cmd % name, cp)) try: q['priority'] = int(sqc['priority']) except: pass max_wall_hard = convert_time_to_secs(sqc.get('h_rt', 'INFINITY')) max_wall_soft = convert_time_to_secs(sqc.get('s_rt', 'INFINITY')) max_wall = min(max_wall_hard, max_wall_soft) try: q['max_wall'] = min(max_wall, q['max_wall']) except: q['max_wall'] = max_wall user_list = sqc.get('user_lists', 'NONE') if user_list.lower().find('none') >= 0: user_list = re.split('\s*,?\s*', user_list) if 'all' in user_list: user_list = [] q['user_list'] = user_list queue_list[name] = q waiting_jobs = 0 for job in queue_info['waiting']: waiting_jobs += 1 queue_list['waiting'] = {'waiting': waiting_jobs} return queue_list, queue_info
def getLrmsInfo(cp): for line in runCommand(sge_version_cmd): return line.strip('\n') raise Exception("Unable to determine LRMS version info.")
def getQueueInfo(cp): """ Looks up the queue and job information from SGE. @param cp: Configuration of site. @returns: A dictionary of queue data and a dictionary of job data. """ queue_list = {} xml = runCommand(sge_queue_info_cmd) handler = QueueInfoParser() parseXmlSax(xml, handler) queue_info = handler.getQueueInfo() for queue, qinfo in queue_info.items(): if queue == 'waiting': continue # get queue name name = queue.split("@")[0] q = queue_list.get(name, {'slots_used': 0, 'slots_total': 0, 'slots_free': 0, 'waiting' : 0, 'name' : name}) try: q['slots_used'] += int(qinfo['slots_used']) except: pass try: q['slots_total'] += int(qinfo['slots_total']) except: pass q['slots_free'] = q['slots_total'] - q['slots_used'] if 'arch' in qinfo: q['arch'] = qinfo['arch'] q['max_running'] = q['slots_total'] try: state = queue_info[queue]["state"] if state.find("d") >= 0 or state.find("D") >= 0: status = "Draining" elif state.find("s") >= 0: status = "Closed" else: status = "Production" except: status = "Production" q['status'] = status q['priority'] = 0 # No such thing that I can find for a queue # How do you handle queues with no limit? sqc = SGEQueueConfig(sgeCommand(sge_queue_config_cmd % name, cp)) try: q['priority'] = int(sqc['priority']) except: pass max_wall_hard = convert_time_to_secs(sqc.get('h_rt', 'INFINITY')) max_wall_soft = convert_time_to_secs(sqc.get('s_rt', 'INFINITY')) max_wall = min(max_wall_hard, max_wall_soft) try: q['max_wall'] = min(max_wall, q['max_wall']) except: q['max_wall'] = max_wall user_list = sqc.get('user_lists', 'NONE') if user_list.lower().find('none') >= 0: user_list = re.split('\s*,?\s*', user_list) if 'all' in user_list: user_list = [] q['user_list'] = user_list queue_list[name] = q waiting_jobs = 0 for job in queue_info['waiting']: waiting_jobs += 1 queue_list['waiting'] = {'waiting': waiting_jobs} return queue_list, queue_info