def getVoQueues(cp): """ Determine the (vo, queue) tuples for this site. This allows for central configuration of which VOs are advertised. Sites will be able to blacklist queues they don't want to advertise, whitelist certain VOs for a particular queue, and blacklist VOs from queues. @param cp: Site configuration @returns: A list of (vo, queue) tuples representing the queues each VO is allowed to run in. """ voMap = VoMapper(cp) try: queue_exclude = [i.strip() for i in cp.get("pbs", "queue_exclude").\ split(',')] except: queue_exclude = [] vo_queues = [] queueInfo = getQueueInfo(cp) rvf_info = parseRvf('pbs.rvf') rvf_queue_list = rvf_info.get('queue', {}).get('Values', None) if rvf_queue_list: rvf_queue_list = rvf_queue_list.split() log.info("The RVF lists the following queues: %s." % ', '.join( \ rvf_queue_list)) for queue, qinfo in queueInfo.items(): if rvf_queue_list and queue not in rvf_queue_list: continue if queue in queue_exclude: continue volist = sets.Set(voList(cp, voMap)) try: whitelist = [i.strip() for i in cp.get("pbs", "%s_whitelist" % \ queue).split(',')] except: whitelist = [] whitelist = sets.Set(whitelist) try: blacklist = [i.strip() for i in cp.get("pbs", "%s_blacklist" % \ queue).split(',')] except: blacklist = [] blacklist = sets.Set(blacklist) if 'users' in qinfo or 'groups' in qinfo: acl_vos = parseAclInfo(queue, qinfo, voMap) volist.intersection_update(acl_vos) # Force any VO in the whitelist to show up in the volist, even if it # isn't in the acl_users / acl_groups for vo in whitelist: if vo not in volist: volist.add(vo) # Apply white and black lists for vo in volist: if (vo in blacklist or "*" in blacklist) and ((len(whitelist) == 0)\ or vo not in whitelist): continue vo_queues.append((vo, queue)) return vo_queues
def compareLists(l1, l2): """ Compare two lists of items; turn them into sets and then look at the symmetric differences. """ s1 = sets.Set(l1) s2 = sets.Set(l2) if len(s1.symmetric_difference(s2)) == 0: return True return False
def getVoQueues(cp): voMap = VoMapper(cp) try: queue_exclude = [ i.strip() for i in cp.get("sge", "queue_exclude").split(',') ] except: queue_exclude = [] # SGE has a special "waiting" queue -- ignore it. queue_exclude.append('waiting') vo_queues = [] queue_list, q = getQueueInfo(cp) rvf_info = parseRvf('sge.rvf') rvf_queue_list = rvf_info.get('queue', {}).get('Values', None) if rvf_queue_list: rvf_queue_list = rvf_queue_list.split() log.info("The RVF lists the following queues: %s." % ', '.join( \ rvf_queue_list)) else: log.warning("Unable to load a RVF file for SGE.") for queue, qinfo in queue_list.items(): if rvf_queue_list and queue not in rvf_queue_list: continue if queue in queue_exclude: continue volist = sets.Set(voList(cp, voMap)) try: whitelist = [ i.strip() for i in cp.get("sge", "%s_whitelist" % queue).split(',') ] except: whitelist = [] whitelist = sets.Set(whitelist) try: blacklist = [ i.strip() for i in cp.get("sge", "%s_blacklist" % queue).split(',') ] except: blacklist = [] blacklist = sets.Set(blacklist) if 'user_list' in qinfo: acl_vos = parseAclInfo(queue, qinfo, voMap) if acl_vos: volist.intersection_update(acl_vos) for vo in volist: if (vo in blacklist or "*" in blacklist) and ((len(whitelist) == 0)\ or vo not in whitelist): continue vo_queues.append((vo, queue)) return vo_queues
def parse_pool_manager(pool_manager_output): pgroups = {} lgroups = {} links = {} link_settings = {} pools = sets.Set() for line in pool_manager_output.splitlines(): m = create_pool.match(line) if m: pools.add(m.groups()[0]) continue m = create_pgroup.match(line) if m: pgroups[m.groups()[0]] = sets.Set() continue m = addto_pgroup.match(line) if m: group, pool = m.groups() pgroups[group].add(pool) continue m = create_link.match(line) if m: links[m.groups()[0]] = sets.Set() continue m = add_link.match(line) if m: link, pgroup = m.groups() links[link].add(pgroup) continue m = create_lg.match(line) if m: lgroups[m.groups()[0]] = sets.Set() continue m = add_lg.match(line) if m: lgroup, link = m.groups() lgroups[lgroup].add(link) continue m = linkset_re.match(line) if m: link, read, write, cache, p2p = m.groups() link_settings[link] = { 'read': read, 'write': write, 'cache': cache, 'p2p': p2p } return pgroups, lgroups, links, link_settings, pools
def guessVO(cp, group): """ From the group name, guess my VO name """ mapper = VoMapper(cp) bycp = determineGroupVOsFromConfig(cp, group, mapper) vos = voList(cp, vo_map=mapper) byname = sets.Set() for vo in vos: if group.find(vo) >= 0: byname.add(vo) altname = group.replace('group', '') altname = altname.replace('-', '') altname = altname.replace('_', '') altname = altname.strip() try: bymapper = mapper[altname] except: bymapper = None if bycp != None: return bycp elif bymapper: return [bymapper] elif byname: return byname else: return [altname]
def handle_providers(entries, providers): """ Add the output from the providers to the list of the GIP entries. This will match the DNs; if two DNs are repeated, then one will be thrown out @param entries: A list of LdapData objects @param providers: A list of provider information dictionaries. @returns: The altered entries list. """ provider_entries = [] for _, p_info in providers.items(): if 'output' in p_info: fp = cStringIO.StringIO(p_info['output']) provider_entries += read_ldap(fp, multi=True) remove_entries = [] # Calculate all the duplicate entries, build a list of the ones # to remove. for entry in entries: for p_entry in provider_entries: if compareDN(entry, p_entry): remove_entries.append(entry) for entry in sets.Set(remove_entries): log.debug("Removing entry %s" % entry) try: entries.remove(entry) except ValueError: pass # Now add all the new entries from the providers for p_entry in provider_entries: entries.append(p_entry) return entries
def parse_SAs(self): self.sas = [] self.voinfos = [] cntr = 0 token = self.info.get('staticToken(%i)' % cntr, None) while token: sa_info = {} sa_name = None for info in token.split(): try: key, val = info.split('=') except: val = info key = 'name' if key == 'name': sa_info['saLocalID'] = '%s:%s:%s' % (val, 'replica', 'online') sa_name = val elif key == 'size': try: size = int(val) size_kb = size / 1024 size_gb = size_kb / 1024**2 except Exception, e: log.exception(e) size, size_kb, size_gb = 0, 0, 0 sa_info['totalOnline'] = size_gb sa_info['reservedOnline'] = size_gb sa_info['path'] = self.getPathForSA(space=sa_name) vo_info = {} vos = self.getVOsForSpace(sa_name) sa_vos = sets.Set() for vo in vos: sa_vos.add(vo) #if not vo.startswith('VO'): # sa_vos.add('VO: %s' % vo) sa_vos = list(sa_vos) sa_vos.sort() sa_info['acbr'] = '\n'.join(['GlueSAAccessControlBaseRule: %s' % i \ for i in sa_vos]) for vo in self.getVOsForSpace(sa_name): id = '%s:%s' % (vo, sa_name) tag = sa_info.get('tag', sa_name) path = self.getPathForSA(space=sa_name, vo=vo) info = { 'voInfoID': id, 'name': 'BeStMan static space %s for VO %s' % (sa_name, vo), 'path': path, 'tag': tag, 'acbr': 'GlueVOInfoAccessControlBaseRule: %s' % vo, 'saLocalID': sa_info.get('saLocalID', 'UNKNOWN_SA') } self.voinfos.append(info) self.sas.append(sa_info) cntr += 1 token = self.info.get('staticToken(%i)' % cntr, None)
def determineGroupVOsFromConfig(cp, group, voMap): """ Given a group name and the config object, determine the VOs which are allowed in that group; this is based solely on the config files. """ # This is the old behavior. Base everything on (groupname)_vos bycp = cp_get(cp, "condor", "%s_vos" % group, None) if bycp: return [i.strip() for i in bycp.split(',')] # This is the new behavior. Base everything on (groupname)_blacklist and # (groupname)_whitelist. Done to mimic the PBS configuration. volist = sets.Set(voList(cp, voMap)) try: whitelist = [i.strip() for i in cp.get("condor", "%s_whitelist" % \ group).split(',')] except: whitelist = [] whitelist = sets.Set(whitelist) try: blacklist = [i.strip() for i in cp.get("condor", "%s_blacklist" % \ group).split(',')] except: blacklist = [] blacklist = sets.Set(blacklist) # Return None if there's no explicit white/black list setting. if len(whitelist) == 0 and len(blacklist) == 0: return None # Force any VO in the whitelist to show up in the volist, even if it # isn't in the acl_users / acl_groups for vo in whitelist: if vo not in volist: volist.add(vo) # Apply white and black lists results = sets.Set() for vo in volist: if (vo in blacklist or "*" in blacklist) and ((len(whitelist) == 0)\ or vo not in whitelist): continue results.add(vo) return list(results)
def parseAclInfo(queue, qinfo, vo_mapper): """ Take a queue information dictionary and determine which VOs are in the ACL list. The used keys are: - users: A set of all user names allowed to access this queue. - groups: A set of all group names allowed to access this queue. @param queue: Queue name (for logging purposes). @param qinfo: Queue info dictionary @param vo_mapper: VO mapper object @returns: A set of allowed VOs """ # TODO: find a sample SGE site which uses this! return [] user_list = qinfo.get('user_list', sets.Set()) users = sets.Set() all_groups = grp.getgrall() all_users = pwd.getpwall() group_dict = {} group_list = [i[1:] for i in user_list if i.startswith('@')] user_list = [i for i in user_list if not i.startswith('@')] for group in all_groups: if group[0] in group_list or group[2] in group_list: users.add(group[0]) group_dict[group[2]] = group[0] for user in all_users: try: group = group_dict[user[3]] except: continue if group[0] in group_list or user[3] in group_list: users.add(group[0]) vos = sets.Set() for user in users: try: vos.add(vo_mapper[user]) except: pass log.info("The acl info for queue %s (users %s, groups %s) mapped to %s." % \ (queue, ', '.join(user_list), ', '.join(group_list), ', '.join(vos))) return vos
def getQueueList(cp): """ Returns a list of all the queue names that are supported. @param cp: Site configuration @returns: List of strings containing the queue names. """ vo_queues = getVoQueues(cp) queues = sets.Set() for vo, queue in vo_queues: queues.add(queue) return queues
def print_CESEBind(cp): group_template = getTemplate("GlueCESEBind", "GlueCESEBindGroupCEUniqueID") se_template = getTemplate("GlueCESEBind", "GlueCESEBindSEUniqueID") bind_info = getCESEBindInfo(cp) cegroups = {} for info in bind_info: printTemplate(se_template, info) ses = cegroups.setdefault(info['ceUniqueID'], sets.Set()) ses.add(info['seUniqueID']) for ce, ses in cegroups.items(): ses = '\n'.join(['GlueCESEBindGroupSEUniqueID: %s' % i for i in ses]) info = {'ceUniqueID': ce, 'se_groups': ses} printTemplate(group_template, info)
def parseAclInfo(queue, qinfo, vo_mapper): """ Take a queue information dictionary and determine which VOs are in the ACL list. The used keys are: - users: A set of all user names allowed to access this queue. - groups: A set of all group names allowed to access this queue. @param queue: Queue name (for logging purposes). @param qinfo: Queue info dictionary @param vo_mapper: VO mapper object @returns: A set of allowed VOs """ users = qinfo.get('users', sets.Set()) if 'groups' in qinfo: all_groups = grp.getgrall() all_users = pwd.getpwall() group_dict = {} for group in all_groups: if group[0] in qinfo['groups'] or group[2] in qinfo['groups']: users.add(group[0]) group_dict[group[2]] = group[0] for user in all_users: try: group = group_dict[user[3]] except: continue if group[0] in qinfo['groups'] or user[3] in qinfo['groups']: users.add(group[0]) vos = sets.Set() for user in users: try: vos.add(vo_mapper[user]) except: pass log.info("The acl info for queue %s (users %s, groups %s) mapped to %s." % \ (queue, ', '.join(qinfo.get('users', [])), ', '.join(qinfo.get('groups', [])), ', '.join(vos))) return vos
def test_voList(self): """ Make sure voList does indeed load up the correct VOs. """ cp = ConfigParser.ConfigParser() cp.add_section("vo") cp.set("vo", "vo_blacklist", "ilc") cp.set("vo", "vo_whitelist", "vo1") cp.set("vo","user_vo_map", "test_configs/fermigrid-osg-user-vo-map.txt") vos = voList(cp) vos = sets.Set(vos) diff = vos.symmetric_difference(fermigrid_vos) self.failIf(diff, msg="Difference between voList output and desired " \ "output: %s." % ', '.join(diff))
def get_vos_from_acbr(acbr): acbr_lines = acbr.splitlines() vos = sets.Set() for line in acbr_lines: acbr = line.split()[-1] try: acbr = normalizeFQAN(acbr) acbr = acbr.split('/') if len(acbr) == 1 or len(acbr[0]) > 0: continue acbr = acbr[1] except: continue vos.add(acbr) return vos
def getSESpace(self, gb=False, total=False): if cp_getBoolean(self._cp, self._section, 'use_df', False) or \ self.status == False: # Let a configuration option override the use_df option. space = cp_get(self._cp, self._section, 'space', '') if space: try: used, free, tot = eval(space, {}, {}) used, free, tot = int(used), int(free), int(tot) except: used, free, tot = 0, 0, 0 else: paths = sets.Set() # Lookup SA paths only if there's a single SA. # Otherwise, use the default path (otherwise we get a inf loop) if self.sas: for sa in self.getSAs(): path = sa['path'] paths.add(path) else: paths = [self.getPathForSA(space=None, \ section=self._section)] used, free, tot = 0, 0, 0 for path in paths: try: stat_info = os.statvfs(path) blocks = stat_info[statvfs.F_BLOCKS] bsize = stat_info[statvfs.F_BSIZE] avail = stat_info[statvfs.F_BFREE] except Exception, e: log.exception(e) continue used += (blocks - avail) * bsize / 1024. free += avail * bsize / 1024. tot += blocks * bsize / 1024. if total: if gb: return int(used/1000.**2), int(free/1000.**2), \ int(tot/1000.**2) else: return int(used), int(free), int(tot) else: if gb: return int(used / 1000.**2), int(free / 1000.**2) else: return int(used), int(free)
def getLGAllowedVOs(cp, vos, name=None): """ Return the allowed VOs for a certain linkgroup. Uses getAllowedVOs to determine any manual mappings from the config file. """ allowed = [] # See if we've manually set this information if name: try: return getAllowedVOs(cp, name, return_default=False) except: pass mapper = VoMapper(cp) for vo_policy in vo_re.finditer(vos): vo_policy = vo_policy.groups()[0] if vo_policy == '*:*': return ['VO:%s' % i for i in voListStorage(cp)] if vo_policy.startswith('/'): log.debug("VO Policy: %s" % vo_policy) info = tuple(vo_policy.split(':')) if len(info) == 2: try: allowed.append('VOMS:%s/Role=%s' % info) except: pass else: log.error("Invalid VO policy: %s" % vo_policy) else: try: vo = mapper[vo_policy.split(':')[0]] allowed.append('VO:%s' % vo) except: pass # Remove duplicates and return allowed = list(sets.Set(allowed)) # If there aren't any allowed VOs, then use the manual overrides. if not allowed: return getAllowedVOs(cp, name) return allowed
def getAllowedVOs(cp, space, return_default=True): """ Returns a list of ACBRs for VOs which are allowed to access this space. Throws a general exception if return_default=False and there's no explicit mapping for this space. """ allowed_vos = cp_get(cp, "dcache", "space_%s_vos" % space, None) if not allowed_vos: allowed_vos = cp_get(cp, "dcache", "allowed_vos", None) if not allowed_vos: if return_default: allowed_vos = cp_get(cp, "dcache", "default_policy", "*") else: raise Exception("No manual access controls for %s." % space) allowed_vos = [i.strip() for i in allowed_vos.split(',') if i.strip()] if '*' in allowed_vos: for vo in voListStorage(cp): if vo not in allowed_vos: allowed_vos.append(vo) allowed_vos.remove('*') allowed_vos = sets.Set(allowed_vos) return list(['VO:%s' % i for i in allowed_vos])
def print_VOViewLocal(cp): """ Print the GLUE VOView entity; shows the VO's view of the condor batch system. Config options used: * ce.name. The human-readable name of the ce. * condor.status. The status of condor; defaults to "Production" * osg_dirs.app. The $OSG_APP directory; defaults to "/Unknown" * osg_dirs.data. The $OSG_DATA directory; defaults to "/Unknown" * se.name. The human-readable name of the closest SE. @param cp: The GIP configuration object @type cp: ConfigParser.ConfigParser """ VOView = getTemplate("GlueCE", "GlueVOViewLocalID") ce_name = cp_get(cp, "ce", "name", "") #status = cp_get(cp, "condor", "status", "Production") #condorVersion = getLrmsInfo(cp) total_nodes, _, unclaimed = parseNodes(cp) vo_map = VoMapper(cp) jobs_info = getJobsInfo(vo_map, cp) groupInfo = getGroupInfo(vo_map, cp) # Add in the default group all_group_vos = [] total_assigned = 0 for key, val in groupInfo.items(): if key == 'default': continue all_group_vos.extend(val['vos']) total_assigned += val.get('quota', 0) all_vos = sets.Set(voList(cp)) defaultVoList = [i for i in all_vos if i not in all_group_vos] if 'default' not in groupInfo: groupInfo['default'] = {} groupInfo['default']['vos'] = defaultVoList if total_nodes > total_assigned: log.info("There are %i assigned job slots out of %i total; assigning" \ " the rest to the default group." % (total_assigned, total_nodes)) groupInfo['default']['quota'] = total_nodes - total_assigned else: log.warning("More assigned nodes (%i) than actual nodes (%i)!" % \ (total_assigned, total_nodes)) if defaultGroupIsExcluded(cp): if groupInfo.has_key('default'): del groupInfo['default'] for group in groupInfo: jinfo = jobs_info.get(group, {}) vos = sets.Set(groupInfo[group].get('vos', [group])) vos.update(jinfo.keys()) vos.intersection_update(all_vos) # Enforce invariants # VO_FREE_SLOTS <= CE_FREE_SLOTS # VO_FREE_SLOTS <= CE_ASSIGNED - VO_RUNNING # This code determines CE_ASSIGNED ginfo = groupInfo[group] if ginfo.get("quota", 0) > 0: assigned = ginfo.get("quota", 0) else: assigned = total_nodes log.debug("All VOs for %s: %s" % (group, ", ".join(vos))) ce_unique_id = buildCEUniqueID(cp, ce_name, 'condor', group) max_wall = cp_getInt(cp, "condor", "max_wall", 1440) myrunning = sum([i.get('running', 0) for i in jinfo.values()], 0) assigned = max(assigned, myrunning) for vo in vos: acbr = 'VO:%s' % vo info = jinfo.get(vo.lower(), {"running": 0, "idle": 0, "held": 0}) ert, wrt = responseTimes(cp, info["running"], info["idle"] + \ info["held"], max_job_time=max_wall*60) free = min(unclaimed, assigned - myrunning, assigned - int(info['running'])) free = int(free) waiting = int(info["idle"]) + int(info["held"]) if waiting > cp_getInt(cp, 'condor', 'idle_slack', '10'): free = 0 info = { "vo": vo, "acbr": acbr, "ceUniqueID": ce_unique_id, "voLocalID": vo, "ce_name": ce_name, "job_manager": 'condor', "queue": vo, "running": info["running"], # Held jobs are included as "waiting" since the definition is: # Number of jobs that are in a state different than running "waiting": waiting, "total": info["running"] + info["idle"] + info["held"], "free_slots": free, "job_slots": int(total_nodes), "ert": ert, "wrt": wrt, "default_se": getDefaultSE(cp), 'app': cp_get(cp, 'osg_dirs', 'app', '/Unknown'), "data": cp_get(cp, "osg_dirs", "data", "/Unknown"), } printTemplate(VOView, info)
sys.path.insert(0, os.path.expandvars("$GIP_LOCATION/lib/python")) import tempfile23 as tempfile import ConfigParser from gip_sets import Set import gip_sets as sets from gip_common import config, cp_get, cp_getBoolean, voList, configContents from gip_cluster import getOSGVersion, getApplications from gip_testing import runTest, streamHandler import gip_testing import gip_osg fermigrid_vos = sets.Set(['osg', 'cdms', 'lqcd', 'auger', 'i2u2', 'cdf', 'des', 'dzero', 'nanohub', 'grase', 'cms', 'fermilab', 'astro', 'accelerator', 'hypercp', 'ktev', 'miniboone', 'minos', 'nova', 'numi', 'mipp', 'patriot', 'sdss', 'theory', 'fermilab-test', 'accelerator', 'cdms', 'LIGO', 'glow', 'dosar', 'star', 'geant4', 'mariachi', 'atlas', 'nwicg', 'ops', 'gugrid', 'gpn', 'compbiogrid', 'engage', 'pragma', 'nysgrid', 'sbgrid', 'cigi', 'mis', 'fmri', 'gridex', 'vo1']) class TestGipCommon(unittest.TestCase): def test_config(self): """ Make sure that the ConfigParser object can load without errors """ cp = config() def test_config_dir(self): # create temp dir old_gip_location = os.environ['GIP_LOCATION']
def calculate_spaces(cp, admin, section='se'): """ Determine the storage areas attached to this dCache. This returns two lists. The first list, sas, is a list of dictionaries which contain the key-value pairs needed to fill out the GlueSA object. The second list, vos, is a list of dictionaries which contain the key-value pairs needd to fill in the GlueVOInfo object. @param cp: ConfigParser object @param admin: Admin interface to dCache @returns: sas, vos (see above description of return values. """ # If SrmSpaceManager isn't running, this will cause an exception. # Catch it and pretend we just have no reservations or link groups try: space_output = admin.execute(SrmSpaceManager, 'ls') resv, lg = parsers.parse_srm_space_manager(space_output) except: resv = [] lg = [] # Get the pool information psu_output = admin.execute(PoolManager, 'psu dump setup') pgroups, lgroups, links, link_settings, pools = \ parsers.parse_pool_manager(psu_output) listOfPools = pools_module.lookupPoolStorageInfo(admin, \ getLogger("GIP.dCache.Pools")) pm_info = admin.execute(PoolManager, 'info') can_stage = pm_info.find('Allow staging : on') >= 0 can_p2p = pm_info.find('Allow p2p : on') >= 0 # Some post-parsing: go from list of pools to dictionary by pool name pool_info = {} pool_objs = {} for pool in listOfPools: pool_info[pool.poolName] = pool.totalSpaceKB pool_objs[pool.poolName] = pool for pool in pools: if pool not in pool_info: pool_info[pool] = 0 # In order to make sure we don't have overlapping spaces, we remove the pool # from the pools list in order to record ones we already account for. # Build the map from link group to pools lgroups_to_pools = {} for lgroup, assc_links in lgroups.items(): cur_set = sets.Set() lgroups_to_pools[lgroup] = cur_set for link in assc_links: for pgroup in links[link]: for pool in pgroups[pgroup]: cur_set.add(pool) pools.remove(pool) # Ensure already-seen pools are not in the remaining pool groups for pgroup, pg_set in pgroups.items(): pg_set.intersection_update(pools) def cmp(x, y): "Sort pool groups by total size" return sum([pool_info[i] for i in pgroups[x]]) < \ sum([pool_info[i] for i in pgroups[y]]) pgroup_list = pgroups.keys() # Python 2.4 and 2.5 support named parameters, but python 2.3 # does not. Trying the named parameter first for future # compatibility reasons, if it fails (i.e. on python 2.3) then # resort to the python 2.3 method try: pgroup_list.sort(cmp=cmp) except: pgroup_list.sort(cmp) sas = [] vos = [] # Build a SA from each link group for lgroup, lgpools in lgroups.items(): lg_info = None for l in lg: if l['name'] == lgroup: lg_info = l break if not lg_info: continue sa = calculate_space_from_linkgroup(cp,lg_info, [pool_objs[i] for i in \ lgpools if i in pool_objs], section=section) sas.append(sa) voinfos = calculate_voinfo_from_lg(cp, lg_info, resv, section=section) vos.extend(voinfos) # Build a SA from each nontrivial pool group # Start with the largest and work our way down. for pgroup in pgroup_list: pg_pools = pgroups[pgroup] del pgroups[pgroup] for pg2, pg2_pools in pgroups.items(): pg2_pools.difference_update(pg_pools) my_pool_objs = [pool_objs[i] for i in pg_pools if i in pool_objs] if not my_pool_objs: continue sa = calculate_space_from_poolgroup(cp, pgroup, my_pool_objs, admin, links, link_settings, allow_staging=can_stage, allow_p2p=can_p2p, section=section) sas.append(sa) voinfos = calculate_voinfo_from_pgroup(cp, pgroup, section=section) vos.extend(voinfos) return sas, vos
def calculate_voinfo_from_lg(cp, lg, resv, section='se'): """ Calculate all the VOInfo for the LinkGroup. Algorithm: 0) Calculate all the allowed VOs for this link group. 1) Calculate the allowed path for each VO/FQAN/space description. a) Try finding a non-default path for each space description b) fallback to the LinkGroup's default path. 2) Group all the reservations by VO/FQAN, path, and space description 3) Create one VOInfo object per FQAN/path/space description 4) For any remaining VOs who have a path but no reserved space, create additional VOInfo objects. If the space description is "null", change the name to "DEFAULT" """ log.debug("Starting to calculate voinfo for group %s." % lg['name']) acbr_spacedesc = {} lgId = lg['id'] allowed_fqans = getLGAllowedVOs(cp, lg['vos'], lg['name']) # Build a list of ACBR -> unique space descriptions for r in resv: if r['linkGroupId'] != lgId: continue if 'acbr' not in r: r['acbr'] = getReservationACBR(cp, r['voGroup'], r['voRole']) if not r['acbr']: continue acbr = r['acbr'] if acbr not in acbr_spacedesc: acbr_spacedesc[acbr] = sets.Set() spaces = acbr_spacedesc[acbr] spaces.add(r['descr']) # Rename null->DEFAULT for acbr, spaces in acbr_spacedesc.items(): if 'null' in spaces: spaces.remove('null') spaces.add('DEFAULT') # Build a map of (acbr, path) -> unique space descriptions default_path = getPath(cp, lg['name'], section=section) acbr_path_spacedesc = {} for acbr, spaces in acbr_spacedesc.items(): try: vo = acbr if acbr.startswith("VO:"): vo = vo[3:] default_acbr_path = getPath(cp, lg['name'], vo, \ return_default=False, section=section) except: default_acbr_path = default_path for space in spaces: try: vo = acbr if acbr.startswith("VO:"): vo = vo[3:] path = getPath(cp, space, vo, return_default=False, section=section) except: path = default_acbr_path key = (acbr, path) if key not in acbr_path_spacedesc: acbr_path_spacedesc[key] = sets.Set() acbr_path_spacedesc[key].add(space) allowed_path = {} for acbr in allowed_fqans: try: vo = acbr if acbr.startswith("VO:"): vo = vo[3:] default_acbr_path = getPath(cp, lg['name'], vo, \ return_default=False, section=section) except: default_acbr_path = default_path allowed_path[acbr] = default_acbr_path log.info("For link group %s, we have the following space description info" \ ": %s" % (lg['name'], str(acbr_path_spacedesc))) voinfos = [] seUniqueID = cp.get(section, "unique_name") # Build VOInfo objects from space descriptions for key, spaces in acbr_path_spacedesc.items(): acbr, path = key acbr = 'GlueVOInfoAccessControlBaseRule: %s' % acbr for space in spaces: id = '%s:%s' % (acbr, space) info = { 'voInfoID': id, 'seUniqueID': seUniqueID, 'name': id, 'path': path, 'tag': space, 'acbr': acbr, 'saLocalID': lg['saLocalID'] } voinfos.append(info) if key[0] in allowed_fqans: allowed_fqans.remove(key[0]) # Add VOInfo objects for remaining VOs for acbr in allowed_fqans: path = allowed_path[acbr] full_acbr = 'GlueVOInfoAccessControlBaseRule: %s' % acbr info = { 'voInfoID': acbr, 'seUniqueID': seUniqueID, 'name': '%s with no reserved space' % acbr, 'path': path, 'tag': 'UNAVAILABLE', 'acbr': full_acbr, 'saLocalID': lg['saLocalID'], } voinfos.append(info) return voinfos
def calculate_space_from_poolgroup(cp, pgroup, pools, admin, links, \ link_settings, allow_staging=False, allow_p2p=False, section='se'): saLocalID = '%s:poolgroup' % pgroup seUniqueID = cp.get(section, 'unique_name') myLinks = sets.Set() for link, pgroups in links.items(): if pgroup in pgroups: myLinks.add(link) or_func = lambda x, y: x or y can_write = reduce(or_func, [link_settings[i]['write'] > 0 for i in myLinks], False) can_read = reduce(or_func, [link_settings[i]['read'] > 0 for i in myLinks], False) can_p2p = reduce(or_func, [link_settings[i]['p2p'] > 0 for i in myLinks], False) and allow_p2p can_stage = reduce(or_func, [link_settings[i]['cache'] > 0 for i in myLinks], False) and allow_staging accesslatency = 'online' retentionpolicy = 'replica' if can_stage: accesslatency = 'nearline' retentionpolicy = 'custodial' saLocalID = '%s:%s:%s' % (pgroup, retentionpolicy, accesslatency) if can_stage: expirationtime = 'releaseWhenExpired' else: expirationtime = 'neverExpire' totalKB = sum([i.totalSpaceKB for i in pools]) usedKB = sum([i.usedSpaceKB for i in pools]) reservedKB = sum([i.reservedKB for i in pools])+sum([i.preciousKB for i in \ pools]) availableKB = sum([i.freeSpaceKB for i in pools]) un, fn, tn = getSETape(cp, vo=pgroup) acbr_attr = 'GlueSAAccessControlBaseRule: %s' acbr = '\n'.join([acbr_attr % i for i in getAllowedVOs(cp, pgroup)]) path = getPath(cp, pgroup, section=section) info = { "saLocalID": saLocalID, "seUniqueID": seUniqueID, "root": "/", "path": path, "filetype": "permanent", "saName": saLocalID, "totalOnline": totalKB / 1024**2, "usedOnline": usedKB / 1024**2, "freeOnline": availableKB / 1024**2, "reservedOnline": reservedKB / 1024**2, "totalNearline": tn, "usedNearline": un, "freeNearline": fn, "reservedNearline": 0, "retention": retentionpolicy, "accessLatency": accesslatency, "expiration": expirationtime, "availableSpace": availableKB, "usedSpace": usedKB, "acbr": acbr, } return info
def __init__(self, pginfo): ObjectHandler.__init__(self, 'curpg', ['total', 'free', 'used']) self.pginfo = pginfo self.curpg = {'pools': sets.Set(), 'links': sets.Set()}
def getQueueInfo(cp): """ Looks up the queue information from PBS. The returned dictionary contains the following keys: - B{status}: Production, Queueing, Draining, Closed - B{priority}: The priority of the queue. - B{max_wall}: Maximum wall time. - B{max_running}: Maximum number of running jobs. - B{running}: Number of running jobs in this queue. - B{wait}: Waiting jobs in this queue. - B{total}: Total number of jobs in this queue. @param cp: Configuration of site. @returns: A dictionary of queue data. The keys are the queue names, and the value is the queue data dictionary. """ queueInfo = {} queue_data = None for orig_line in pbsCommand(queue_info_cmd, cp): line = orig_line.strip() if line.startswith("Queue: "): if queue_data != None: try: if queue_data["started"] and queue_data["enabled"]: queue_data["status"] = "Production" elif queue_data["enabled"]: queue_data["status"] = "Queueing" elif queue_data["started"]: queue_data["status"] = "Draining" else: queue_data["status"] = "Closed" except: queue_data["status"] = "Closed" msg = "The 'Started' and/or 'enabled' attributes do not " \ "exist for the %s queue." % line[7:] log.warning(msg) if "started" in queue_data.keys(): del queue_data["started"] if "enabled" in queue_data.keys(): del queue_data['enabled'] queue_data = {} queue_name = line[7:] queueInfo[queue_name] = queue_data continue if queue_data == None: continue if len(line) == 0: continue attr, val = line.split(" = ") if attr == "Priority": queue_data['priority'] = int(val) elif attr == "total_jobs": queue_data["total"] = int(val) elif attr == "state_count": info = val.split() for entry in info: state, count = entry.split(':') count = int(count) if state == 'Queued': queue_data['wait'] = queue_data.get('wait', 0) + count #elif state == 'Waiting': # queue_data['wait'] = queue_data.get('wait', 0) + count elif state == 'Running': queue_data['running'] = count elif attr == "resources_max.walltime": queue_data["max_wall"] = HMSToMin(val) elif attr == "enabled": queue_data["enabled"] = val == "True" elif attr == "started": queue_data["started"] = val == "True" elif attr == "max_running": queue_data["max_running"] = int(val) elif attr == "resources_max.nodect": queue_data["job_slots"] = int(val) elif attr == "max_queuable" or attr == 'max_queueable': try: queue_data["max_waiting"] = int(val) queue_data["max_queuable"] = int(val) except: log.warning("Invalid input for max_queuable: %s" % str(val)) elif attr == "acl_group_enable" and val.lower() == 'true': queue_data["groups"] = sets.Set() elif attr == "acl_groups" and 'groups' in queue_data: queue_data["groups"].update(val.split(',')) elif attr == "acl_user_enable" and val.lower() == 'true': queue_data["users"] = sets.Set() elif attr == "acl_users" and 'users' in queue_data: queue_data["users"].update(val.split(',')) if queue_data != None: try: if queue_data["started"] and queue_data["enabled"]: queue_data["status"] = "Production" elif queue_data["enabled"]: queue_data["status"] = "Queueing" elif queue_data["started"]: queue_data["status"] = "Draining" else: queue_data["status"] = "Closed" del queue_data["started"] del queue_data['enabled'] except: queue_data["status"] = "Closed" msg = "The 'Started' and/or 'enabled' attributes do not " \ "exist for the %s queue." % line[7:] log.warning(msg) return queueInfo
def endElement(self, name): ObjectHandler.endElement(self, name) if name == 'poolgroup' and 'name' in self.curpg: self.pginfo[self.curpg['name']] = self.curpg self.curpg = {'pools': sets.Set(), 'links': sets.Set()}
# Default to no groups. groupInfo = {} log.debug("Group Info: %s" % str(groupInfo)) # Accumulate the entire statistics, instead of breaking down by VO. running, idle, held = 0, 0, 0 for group, ginfo in jobs_info.items(): for vo, info in ginfo.items(): running += info.get('running', 0) idle += info.get('idle', 0) held += info.get('held') # Set up the "default" group with all the VOs which aren't already in a # group groupInfo['default'] = {'prio': 999999, 'quota': 999999, 'vos': sets.Set()} all_group_vos = [] total_assigned = 0 for key, val in groupInfo.items(): if key == 'default': continue all_group_vos.extend(val['vos']) try: total_assigned += val['quota'] except: pass if total_nodes > total_assigned: log.info("There are %i assigned job slots out of %i total; assigning" \ " the rest to the default group." % (total_assigned, total_nodes)) groupInfo['default']['quota'] = total_nodes - total_assigned else:
@param cp: Site configuration @returns: List of strings containing the queue names. """ doPath(cp) vo_map = VoMapper(cp) # Determine the group information, if there are any Condor groups try: groupInfo = getGroupInfo(vo_map, cp) except Exception, e: log.exception(e) # Default to no groups. groupInfo = {} # filter out queues that don't match a VO allVos = sets.Set(voList(cp)) for group in groupInfo.keys(): vos = sets.Set(groupInfo[group]['vos']) if not vos.intersection(allVos): log.debug('Filtering out %s in getQueueList -- no matching VO' % groupInfo[group]['vos']) del groupInfo[group] # Set up the "default" group with all the VOs which aren't already in a # group if not defaultGroupIsExcluded(cp): groupInfo['default'] = {'prio': 999999, 'quota': 999999, 'vos': sets.Set()} all_group_vos = [] for val in groupInfo.values(): all_group_vos.extend(val['vos']) defaultVoList = voList(cp, vo_map=vo_map)