def parse_config_file(): # Turn everything off by default. These settings be modified # when the configuration file is read. global pbs_home global pbs_exec global power_ramp_rate_enable global power_on_off_enable global node_idle_limit global min_node_down_delay global max_jobs_analyze_limit global max_concurrent_nodes try: # This block will work for PBS Pro versions 13 and later pbs_conf = pbs.get_pbs_conf() pbs_home = pbs_conf['PBS_HOME'] pbs_exec = pbs_conf['PBS_EXEC'] except: pbs.logmsg(pbs.EVENT_DEBUG, "PBS_HOME needs to be defined in the config file") pbs.logmsg(pbs.EVENT_DEBUG, "Exiting the power hook") pbs.event().accept() # Identify the config file and read in the data config_file = '' if 'PBS_HOOK_CONFIG_FILE' in os.environ: config_file = os.environ["PBS_HOOK_CONFIG_FILE"] tmpcfg = '' if not config_file: tmpcfg = os.path.join(pbs_home, 'server_priv', 'hooks', 'PBS_power.CF') if os.path.isfile(tmpcfg): config_file = tmpcfg if not config_file: tmpcfg = os.path.join(pbs_home, 'mom_priv', 'hooks', 'PBS_power.CF') if os.path.isfile(tmpcfg): config_file = tmpcfg if not config_file: raise Exception("Config file not found") pbs.logmsg(pbs.EVENT_DEBUG3, "Config file is %s" % config_file) try: fd = open(config_file, 'r') config = json.load(fd) fd.close() except IOError: raise Exception("I/O error reading config file") except: raise Exception("Error reading config file") # Assign default values to attributes power_ramp_rate_enable = False power_on_off_enable = False node_idle_limit = 1800 min_node_down_delay = 1800 max_jobs_analyze_limit = 100 max_concurrent_nodes = 10 # Now assgin values read from config file if 'power_on_off_enable' in config: power_on_off_enable = config['power_on_off_enable'] pbs.logmsg(pbs.EVENT_DEBUG3, "power_on_off_enable is set to %s" % str(power_on_off_enable)) if 'power_ramp_rate_enable' in config: power_ramp_rate_enable = config['power_ramp_rate_enable'] pbs.logmsg(pbs.EVENT_DEBUG3, "power_ramp_rate_enable is set to %s" % str(power_ramp_rate_enable)) if 'node_idle_limit' in config: node_idle_limit = int(config['node_idle_limit']) if not node_idle_limit or node_idle_limit < 0: node_idle_limit = 1800 pbs.logmsg(pbs.EVENT_DEBUG3, "node_idle_limit is set to %d" % node_idle_limit) if 'min_node_down_delay' in config: min_node_down_delay = int(config['min_node_down_delay']) if not min_node_down_delay or min_node_down_delay < 0: min_node_down_delay = 1800 pbs.logmsg(pbs.EVENT_DEBUG3, "min_node_down_delay is set to %d" % min_node_down_delay) if 'max_jobs_analyze_limit' in config: max_jobs_analyze_limit = int(config['max_jobs_analyze_limit']) if not max_jobs_analyze_limit or max_jobs_analyze_limit < 0: max_jobs_analyze_limit = 100 pbs.logmsg(pbs.EVENT_DEBUG3, "max_jobs_analyze_limit is set to %d" % max_jobs_analyze_limit) if 'max_concurrent_nodes' in config: max_concurrent_nodes = int(config['max_concurrent_nodes']) if not max_concurrent_nodes or max_concurrent_nodes < 0: max_concurrent_nodes = 10 pbs.logmsg(pbs.EVENT_DEBUG3, "max_concurrent_nodes is set to %d" % max_concurrent_nodes)
except ValueError: pbs.logmsg(pbs.EVENT_DEBUG3, "Error reading json output for job %s" % jobid) continue if job_state == 'Q': if start_time and evnlist: for chunk in evnlist.split("+"): vn = chunk.split(":")[0][1:] if vn not in exec_vnodes: exec_vnodes[vn] = {} exec_vnodes[vn]["neededby"] = start_time elif start_time < exec_vnodes[vn]["neededby"]: exec_vnodes[vn]["neededby"] = start_time i += 1 pbs_conf = pbs.get_pbs_conf() if 'PBS_HOME' in pbs_conf: pbs_home = pbs_conf['PBS_HOME'] else: pbs.logmsg(pbs.EVENT_DEBUG, "PBS_HOME needs to be defined in the config file") pbs.logmsg(pbs.EVENT_DEBUG, "Exiting the power hook") pbs.event().accept() # Identify the nodes file and read in the data node_file = '' sleep_node_list = [] node_file = os.path.join(pbs_home, 'server_priv', 'hooks', 'tmp', 'pbs_power_nodes_file') if os.path.isfile(node_file) and os.stat(node_file).st_size: pbs.logmsg(pbs.EVENT_DEBUG3, "pbs_power_nodes_file is %s" % node_file)