Example #1
0
import os
import sys
import time
import unittest

#Standard testing imports:
sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, getLogger, cp_get
from gip_testing import runTest, streamHandler

log = getLogger("GIP.Test.Wrapper")

#Add the path with the osg_info_wrapper script:
sys.path.append(os.path.expandvars("$GIP_LOCATION/libexec"))
import osg_info_wrapper


class TestOsgInfoWrapper(unittest.TestCase):
    def test_simple(self):
        """
        Simple test of the OSG Info Wrapper.  Make sure that both the provider
        and plugin functionality works.
        """
        cp = config("test_modules/simple/config")
        entries = osg_info_wrapper.main(cp, return_entries=True)
        has_timestamp = False
        has_ce = False
        for entry in entries:
            if entry.glue.get('LocationName', (0, ))[0] == 'TIMESTAMP':
                has_timestamp = True
            if entry.dn[0] == 'GlueCEUniqueID=red.unl.edu:2119/jobmanager' \
Example #2
0
import sys
import gip_sets as sets

from xml.sax import make_parser, SAXParseException
from xml.sax.handler import ContentHandler, feature_external_ges

from gip_common import getLogger

log = getLogger("GIP.Storage.dCache.InfoProviderParser")

IN_TOP = 0
IN_POOLS = 1
IN_LINKGROUPS = 2
IN_LINKS = 3
IN_POOLGROUPS = 4
IN_RESERVATIONS = 5
IN_DOORS = 6
IN_SUMMARY = 7
IN_POOLMANAGER = 8
IN_POOLMANAGER_VERSION = 9
IN_SUMMARY_POOLS = 10

class InfoProviderHandler(ContentHandler):

    def __init__(self):
        self.pools = {}
        self.doors = {}
        self.poolgroups = {}
        self.links = {}
        self.linkgroups = {}
Example #3
0
import sys
py23 = sys.version_info[0] == 2 and sys.version_info[1] >= 3
if not py23:
        import operator
        def sum(data, start=0): return reduce(operator.add, data, start)

# dCache imports
import pools as pools_module
import admin
import parsers

# GIP imports
from gip_common import getLogger, VoMapper, matchFQAN, cp_get
from gip_storage import getSETape, voListStorage, getPath as getStoragePath

log = getLogger("GIP.dCache.SA")

PoolManager = 'PoolManager'
SrmSpaceManager = 'SrmSpaceManager'

def calculate_spaces(cp, admin, section='se'):
    """
    Determine the storage areas attached to this dCache.

    This returns two lists.  The first list, sas, is a list of dictionaries
    which contain the key-value pairs needed to fill out the GlueSA object.

    The second list, vos, is a list of dictionaries which contain the key-value
    pairs needd to fill in the GlueVOInfo object.

    @param cp: ConfigParser object
Example #4
0
#!/usr/bin/env python

import os
import sys

# Make sure the gip_common libraries are in our path
sys.path.insert(0, os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, getTemplate, getLogger, printTemplate, cp_get, cp_getBoolean
from gip_cluster import getSubClusterIDs, getClusterID

# Retrieve our logger in case of failure
log = getLogger("GIP.grid_dirs")

def main():
    try:
        # Load up the site configuration
        cp = config()
        se_only = cp_getBoolean(cp, "gip", "se_only", False)
        if not se_only:
            # Load up the template for GlueLocationLocalID
            # To view its contents, see $VDT_LOCATION/gip/templates/GlueCluster
            template = getTemplate("GlueCluster", "GlueLocationLocalID")
            cluster_id = getClusterID(cp)
            osg_grid = cp_get(cp, "osg_dirs", "grid_dir", None)
    
            if not osg_grid:
                raise RuntimeError('grid_dir ($OSG_GRID) not defined!')
                
            for subClusterId in getSubClusterIDs(cp):
                # Dictionary of data to fill in for GlueLocationLocalID
                info = {'locationId':   'OSG_GRID',
Example #5
0
from xml.sax import make_parser, SAXParseException
from xml.sax.handler import ContentHandler, feature_external_ges

from gip_common import voList, cp_getBoolean, getLogger, cp_get, voList, \
    VoMapper, cp_getInt, cp_getList
from gip_testing import runCommand

condor_version = "condor_version"
condor_group = "condor_config_val %(daemon)s GROUP_NAMES"
condor_quota = "condor_config_val %(daemon)s GROUP_QUOTA_%(group)s"
condor_prio = "condor_config_val %(daemon)s GROUP_PRIO_FACTOR_%(group)s"
condor_status = "condor_status -xml -constraint '%(constraint)s'"
condor_status_submitter = "condor_status -submitter -xml -constraint '%(constraint)s'"
condor_job_status = "condor_q -xml -constraint '%(constraint)s'"

log = getLogger("GIP.Condor")

class ClassAdParser(ContentHandler):
    """
    Streaming SAX handler for the output of condor_* -xml calls; it's around
    60 times faster and has a similar reduction in required memory.

    Use this as a ContentHandler for a SAX parser; call getJobInfo afterward
    to get the information about each job.
    
    getJobInfo returns a dictionary of jobs; the key for the dictionary is the
    Condor attribute passed in as 'idx' to the constructor; the value is another
    dictionary of key-value pairs from the condor JDL, where the keys is in
    the attribute list passed to the constructor.
    """
Example #6
0
    import operator

    def sum(data, start=0):
        return reduce(operator.add, data, start)


# dCache imports
import pools as pools_module
import admin
import parsers

# GIP imports
from gip_common import getLogger, VoMapper, matchFQAN, cp_get
from gip_storage import getSETape, voListStorage, getPath as getStoragePath

log = getLogger("GIP.dCache.SA")

PoolManager = 'PoolManager'
SrmSpaceManager = 'SrmSpaceManager'


def calculate_spaces(cp, admin, section='se'):
    """
    Determine the storage areas attached to this dCache.

    This returns two lists.  The first list, sas, is a list of dictionaries
    which contain the key-value pairs needed to fill out the GlueSA object.

    The second list, vos, is a list of dictionaries which contain the key-value
    pairs needd to fill in the GlueVOInfo object.
Example #7
0
#!/usr/bin/env python

import os
import sys

sys.path.insert(0, os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, cp_getBoolean
from gip.providers.creamCE import main
from gip_common import getTemplate, getLogger, printTemplate

log = getLogger('GIP.CreamCE')

if __name__ == '__main__':
    cp = config()
    if cp_getBoolean(cp, "cream", "enabled", False) and \
           cp_getBoolean(cp, "gip", "se_only", False):
        main()
Example #8
0
"""
Populate the GIP based upon the values from the OSG configuration
"""

import os
import re
import sys
import socket
import ConfigParser

from gip_sections import ce, site, pbs, condor, sge, lsf, se, subcluster, \
    cluster, cesebind, cream, slurm
from gip_common import getLogger, py23, vdtDir, get_file_list

log = getLogger("GIP")

site_sec = "Site Information"
misc_sec = "Misc Services"
pbs_sec = "PBS"
condor_sec = "Condor"
sge_sec = 'SGE'
storage_sec = 'Storage'
gip_sec = 'GIP'
dcache_sec = 'dcache'
lsf_sec = 'LSF'
cream_sec = 'CREAM'
slurm_sec = 'SLURM'

default_osg_ress_servers = \
    "https://osg-ress-1.fnal.gov:8443/ig/services/CEInfoCollector[OLD_CLASSAD]"
default_osg_bdii_servers = \
Example #9
0
in gip_storage.
"""

import sys
import gip_sets as sets
import socket

from gip_common import cp_get, getLogger, config, getTemplate, printTemplate, \
    cp_getBoolean, cp_getInt, normalizeFQAN
from gip_storage import voListStorage, getSETape, \
    getClassicSESpace, StorageElement
from gip.bestman.BestmanInfo import BestmanInfo
from gip.dcache.DCacheInfo import DCacheInfo
from gip.dcache.DCacheInfo19 import DCacheInfo19

log = getLogger("GIP.Storage.Generic")

def print_SA(se, cp, section="se"): #pylint: disable-msg=W0613
    """
    Print out the SALocal information for GLUE 1.3.
    """
    vo_limit_str = cp_get(cp, section, "vo_limits", "")
    vo_limit = {}
    cumulative_total = {}
    # Determine the limits for each VO
    for vo_str in vo_limit_str.split(','):
        vo_str = vo_str.strip()
        info = vo_str.split(":")
        if len(info) != 2:
            continue
        vo = info[0].strip()
Example #10
0
import time
import socket

import gip_common
import osg_glue2_provider.myosg as myosg
import osg_glue2_provider.version

log = gip_common.getLogger("GIP.FTS")

default_url = 'http://myosg.grid.iu.edu/rgsummary/xml?all_resources=on&gridtype=on&gridtype_1=on&active=on&active_value=1&disable_value=1&has_wlcg=on'

create_time = time.time()
host = socket.gethostname()

def formatStorage(resource, resource_group):
    info = formatDomain(resource_group)
    info['storage_service_id'] = resource['fqdn']
    return info

def formatSRM(service, resource, resource_group):
    info = formatStorage(resource, resource_group)
    info['srm_endpoint_id'] = "%s/srm/2.2.0" % resource['fqdn']
    info['srm_endpoint_url'] = service.setdefault('uri_override', 'httpg://%s:8443/srm/v2/server' % resource['fqdn'])
    info['srm_service_name'] = info['srm_endpoint_url']
    info['srm_service_endpoint_id'] = "%s_endpoint" % info['srm_endpoint_url']
    return info

def formatSRMPolicy(service, resource, resource_group):
    info = formatSRM(service, resource, resource_group)
    policy_rules = []
Example #11
0
import os
import re
import gip_sets as sets
import statvfs

from gip_common import cp_get, cp_getBoolean, getLogger
from gip_storage import StorageElement, voListStorage
import srm_ping

log = getLogger('GIP.Storage.Bestman')

class BestmanInfo(StorageElement):

    def __init__(self, cp, **kw):
        super(BestmanInfo, self).__init__(cp, **kw)
        srms = self.getSRMs()
        if not srms:
            raise ValueError("No SRM endpoint configured!")
        self.srm_info = srms[0]
        self.endpoint = self.srm_info['endpoint']
        self.info = {}
        self.status = False

    def run(self):
        try:
            self.info = srm_ping.bestman_srm_ping(self._cp, self.endpoint,
                section=self._section)
            log.info("Returned BestMan info: %s" % str(self.info))
            self.status = True
        except srm_ping.ProxyCreateException, e:
Example #12
0
import re
import sys
import os

if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))

from gip_common import cp_get, cp_getBoolean, config, getLogger, getTemplate, printTemplate, voList
from gip_common import vdtDir
from gip_testing import runCommand
import gip_sets as sets
import time
import zlib

log = getLogger("GIP.CREAM")


def getUniqueHash(cp):
    # EGI uses unix 'cksum' command; we'll use zlib's crc instead.
    loc = cp_get(
        cp, 'gip', 'osg_config',
        vdtDir(os.path.expandvars('$VDT_LOCATION/monitoring/config.ini'),
               '/etc/osg/config.ini'))
    loc = os.path.expandvars(loc)
    try:
        hash = zlib.crc32(loc)
    except:
        log.error('Could not find config.ini for checksum')
        hash = '0008675309'
Example #13
0
#!/usr/bin/env python

import os
import sys

# Make sure the gip_common libraries are in our path
sys.path.insert(0, os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, getTemplate, getLogger, printTemplate, cp_get, cp_getBoolean
from gip_cluster import getSubClusterIDs, getClusterID

# Retrieve our logger in case of failure
log = getLogger("GIP.grid_dirs")


def main():
    try:
        # Load up the site configuration
        cp = config()
        se_only = cp_getBoolean(cp, "gip", "se_only", False)
        if not se_only:
            # Load up the template for GlueLocationLocalID
            # To view its contents, see $VDT_LOCATION/gip/templates/GlueCluster
            template = getTemplate("GlueCluster", "GlueLocationLocalID")
            cluster_id = getClusterID(cp)
            osg_grid = cp_get(cp, "osg_dirs", "grid_dir", None)

            if not osg_grid:
                raise RuntimeError('grid_dir ($OSG_GRID) not defined!')

            for subClusterId in getSubClusterIDs(cp):
                # Dictionary of data to fill in for GlueLocationLocalID
Example #14
0
      os.EX_USAGE = 64
                                                   
try:
   #python 2.5 and above  
   import hashlib as md5
except ImportError:
   # pylint: disable-msg=F0401
   import md5

if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, getLogger, cp_get, cp_getBoolean, cp_getInt, gipDir
from gip_ldap import read_ldap, compareDN, LdapData
import gip_sets as sets

log = getLogger("GIP.Wrapper")

def create_if_not_exist(*paths):
    """
    Create a directories if they do not exist
    """
    for path in paths:
        # Bail out if it already exists
        if os.path.exists(path):
            continue
        log.info("Creating directory %s because it doesn't exist." % path)
        try:
            os.makedirs(path)
        except Exception, e:
            log.error("Unable to make necessary directory, %s" % path)
            log.exception(e)
Example #15
0
import re

import gip_sections
sec = gip_sections.site

from gip_common import cp_get, voList, getLogger

log = getLogger("GIP.Site")

split_re = re.compile('\s*;?,?\s*')
def filter_sponsor(cp, text):
    vo_list = voList(cp)
    vo_map = dict([(i.lower(), i) for i in vo_list])
    text = text.replace('"', '').replace("'", '')
    entries = split_re.split(text)
    results = []
    if len(entries) == 1:
        entry = entries[0]
        if len(entry.split(':')) == 1:
            entries[0] = entry.strip() + ":100"
    for entry in entries:
        try:
            vo, number = entry.split(":")
            number = float(number)
        except:
            log.warning("Entry for sponsor, `%s`, is not in <vo>:<value>" \
                "format." % str(entry))
            continue
        if vo in vo_map:
            vo = vo_map[vo]
Example #16
0
#!/usr/bin/env python

import os
import sys

if 'GIP_LOCATION' in os.environ:
    sys.path.insert(0, os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, getLogger, cp_get, cp_getBoolean
from gip.providers.pbs import main as pbs_main
from gip.providers.condor import main as condor_main
from gip.providers.sge import main as sge_main
from gip.providers.lsf import main as lsf_main
from gip.providers.slurm import main as slurm_main

log = getLogger("GIP.BatchSystem")

def main():
    cp = config()
    se_only = cp_getBoolean(cp, "gip", "se_only", False)
    if not se_only:
        job_manager = cp_get(cp, "ce", "job_manager", None)
        if job_manager:
            log.info("Using job manager %s" % job_manager)
        else:
           log.error("Job manager not specified!")
           sys.exit(2)
        if job_manager == 'pbs':
            pbs_main()
        elif job_manager == 'condor':
            condor_main()
        elif job_manager == 'sge':
Example #17
0
"""

from __future__ import generators

import signal
import sys
import os
import re
import pty
import time
import resource
import ConfigParser

from gip_common import getLogger, cp_get

log = getLogger("GIP.dCache.Admin")

def connect_admin(cp):
    """
    Connect to the site's admin interface.

    @param cp: Configuration of the site.
    @type cp: ConfigParser
    """
    info = {'Interface':'dCache'}
    info['AdminHost'] = cp_get(cp, "dcache_admin", "hostname", "localhost")
    try:
        info['Username'] = cp.get("dcache_admin", "username")
    except:
        pass
    try:
Example #18
0
#!/usr/bin/python
    
import re
import sys  
import os
        
if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))
    
from gip_common import config, getLogger, getTemplate, printTemplate
from gip_cese_bind import getCESEBindInfo
import gip_sets as sets

log = getLogger("GIP.CESEBind")

def print_CESEBind(cp):
    group_template = getTemplate("GlueCESEBind", "GlueCESEBindGroupCEUniqueID")
    se_template = getTemplate("GlueCESEBind", "GlueCESEBindSEUniqueID")
    bind_info = getCESEBindInfo(cp)
    cegroups = {}
    for info in bind_info:
        printTemplate(se_template, info)
        ses = cegroups.setdefault(info['ceUniqueID'], sets.Set())
        ses.add(info['seUniqueID'])
    for ce, ses in cegroups.items():
        ses = '\n'.join(['GlueCESEBindGroupSEUniqueID: %s' % i for i in ses])
        info = {'ceUniqueID': ce, 'se_groups': ses}
        printTemplate(group_template, info)

def main():
    try:
Example #19
0
#!/usr/bin/env python

import os
import sys

sys.path.insert(0, os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, cp_getBoolean
from gip.providers.creamCE import main 
from gip_common import getTemplate, getLogger, printTemplate

log = getLogger('GIP.CreamCE')

if __name__ == '__main__':
    cp = config()
    if cp_getBoolean(cp, "cream", "enabled", False) and \
           cp_getBoolean(cp, "gip", "se_only", False):
        main()
Example #20
0
import re

import gip_sections
sec = gip_sections.site

from gip_common import cp_get, voList, getLogger

log = getLogger("GIP.Site")

split_re = re.compile('\s*;?,?\s*')


def filter_sponsor(cp, text):
    vo_list = voList(cp)
    vo_map = dict([(i.lower(), i) for i in vo_list])
    text = text.replace('"', '').replace("'", '')
    entries = split_re.split(text)
    results = []
    if len(entries) == 1:
        entry = entries[0]
        if len(entry.split(':')) == 1:
            entries[0] = entry.strip() + ":100"
    for entry in entries:
        try:
            vo, number = entry.split(":")
            number = float(number)
        except:
            log.warning("Entry for sponsor, `%s`, is not in <vo>:<value>" \
                "format." % str(entry))
            continue
        if vo in vo_map:
Example #21
0
        pwd_tuple = pwd.getpwnam(gip_user)
        pw_uid = pwd_tuple[2]
        pw_gid = pwd_tuple[3]

        os.setregid(pw_gid, pw_gid)
        os.setreuid(pw_uid, pw_uid)
    except:
        # the username was invalid so pass (logging has not been set up yet)
        # Note: we can't log because if we log as root then the ownership of the
        #       log files can potentially get messed up
        print >> sys.stderr, "Invalid username configured: %s" % gip_user
else:
    # Not root so we can't change privileges so pass
    pass

log = getLogger("GIP.Wrapper")


def create_if_not_exist(*paths):
    """
    Create a directories if they do not exist
    """
    for path in paths:
        # Bail out if it already exists
        if os.path.exists(path):
            continue
        log.info("Creating directory %s because it doesn't exist." % path)
        try:
            os.makedirs(path)
        except Exception, e:
            log.error("Unable to make necessary directory, %s" % path)
Example #22
0
#!/usr/bin/python

import sys, time, os

# Make sure the gip_common libraries are in our path
if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, getTemplate, getLogger, printTemplate, cp_getBoolean
from gip_testing import runCommand
from gip_cluster import getSubClusterIDs, getClusterID

# Retrieve our logger in case of failure
log = getLogger("GIP.timestamp")

def main():
    try:
        # Load up the site configuration
        cp = config()
        se_only = cp_getBoolean(cp, "gip", "se_only", False)
        if not se_only and 'VDT_LOCATION' in os.environ: 
    
            # get the VDT version
            vdt_version_cmd = os.path.expandvars("$VDT_LOCATION/vdt/bin/") + 'vdt-version --brief'
            vdt_version = runCommand(vdt_version_cmd).readlines()[0].strip()
            if (vdt_version == ""): vdt_version = "OLD_VDT"
            
            # Get the timestamp in the two formats we wanted
            now = time.strftime("%a %b %d %T UTC %Y", time.gmtime())
    
            # Load up the template for GlueLocationLocalID
            # To view its contents, see $VDT_LOCATION/gip/templates/GlueCluster
Example #23
0
in gip_storage.
"""

import sys
import gip_sets as sets
import socket

from gip_common import cp_get, getLogger, config, getTemplate, printTemplate, \
    cp_getBoolean, cp_getInt, normalizeFQAN
from gip_storage import voListStorage, getSETape, \
    getClassicSESpace, StorageElement
from gip.bestman.BestmanInfo import BestmanInfo
from gip.dcache.DCacheInfo import DCacheInfo
from gip.dcache.DCacheInfo19 import DCacheInfo19

log = getLogger("GIP.Storage.Generic")


def print_SA(se, cp, section="se"):  #pylint: disable-msg=W0613
    """
    Print out the SALocal information for GLUE 1.3.
    """
    vo_limit_str = cp_get(cp, section, "vo_limits", "")
    vo_limit = {}
    cumulative_total = {}
    # Determine the limits for each VO
    for vo_str in vo_limit_str.split(','):
        vo_str = vo_str.strip()
        info = vo_str.split(":")
        if len(info) != 2:
            continue
Example #24
0
"""
Populate the GIP based upon the values from the OSG configuration
"""

import os
import re
import sys
import socket
import ConfigParser

from gip_sections import ce, site, pbs, condor, sge, lsf, se, subcluster, \
    cluster, cesebind, cream, slurm, htcondorce
from gip_common import getLogger, py23, vdtDir, get_file_list

log = getLogger("GIP")

site_sec = "Site Information"
misc_sec = "Misc Services"
pbs_sec = "PBS"
condor_sec = "Condor"
sge_sec = 'SGE'
storage_sec = 'Storage'
gip_sec = 'GIP'
dcache_sec = 'dcache'
lsf_sec = 'LSF'
cream_sec = 'CREAM'
slurm_sec = 'SLURM'
gateway_sec = 'Gateway'

default_osg_bdii_servers = \
    "http://is1.grid.iu.edu:14001[RAW], http://is2.grid.iu.edu:14001[RAW]"
Example #25
0
File: lsf.py Project: tiradani/gip
if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))

import gip_cluster
from gip_testing import runCommand
from gip_common import config, VoMapper, getLogger, addToPath, getTemplate, \
    printTemplate, cp_get, cp_getInt, responseTimes, cp_getBoolean
from gip_cluster import getClusterID
from lsf_common import parseNodes, getQueueInfo, getJobsInfo, getLrmsInfo, \
    getVoQueues
from gip_sections import ce
from gip_storage import getDefaultSE
from gip_batch import buildCEUniqueID, getGramVersion, getCEImpl, getPort, \
     buildContactString, getHTPCInfo

log = getLogger("GIP.LSF")


def print_CE(cp):
    """
    Print out the GlueCE objects for LSF; one GlueCE per grid queue.
    """
    try:
        lsfVersion = getLrmsInfo(cp)
    except:
        lsfVersion = 'Unknown'

    log.debug('Using LSF version %s' % lsfVersion)
    queueInfo = getQueueInfo(cp)
    try:
        totalCpu, freeCpu, queueCpus = parseNodes(queueInfo, cp)
Example #26
0
if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))

import gip_cluster
from gip_common import config, VoMapper, getLogger, addToPath, getTemplate, \
    printTemplate, cp_get, cp_getInt, responseTimes, cp_getBoolean
from gip_cluster import getClusterID
from slurm_common import parseNodes, getQueueInfo, getJobsInfo, getLrmsInfo, \
    getVoQueues
from gip_sections import ce
from gip_storage import getDefaultSE
from gip_batch import buildCEUniqueID, getGramVersion, getCEImpl, getPort, \
     buildContactString, getHTPCInfo

log = getLogger("GIP.SLURM")


def print_CE(cp):
    slurmVersion = getLrmsInfo(cp)
    queueInfo = getQueueInfo(cp)
    ce_name = cp_get(cp, ce, "name", "UNKNOWN_CE")
    CE = getTemplate("GlueCE", "GlueCEUniqueID")
    try:
        excludeQueues = [i.strip() for i in cp_get(cp, "slurm", \
            "queue_exclude", "").split(',')]
    except:
        excludeQueues = []
    vo_queues = getVoQueues(cp)
    for queue, info in queueInfo.items():
        if queue in excludeQueues:
Example #27
0
import os
import re
import sys

if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))
import gip_cluster

from gip_common import config, cp_get, cp_getBoolean, getLogger, getTemplate, \
    printTemplate
from gip_testing import runCommand
from gip_sections import *
from gip_cese_bind import getCEList
from gip_cluster import getClusterName, getClusterID

log = getLogger("GIP.Cluster")


def print_clusters(cp):
    cluster_name = cp_get(cp, 'cluster', 'name', None)
    if not cluster_name:
        cluster_name = cp_get(cp, 'ce', 'hosting_cluster', None)
    if not cluster_name:
        cluster_name = cp_get(cp, 'ce', 'unique_name', None)
    if not cluster_name:
        getClusterName(cp)
        #raise Exception("Could not determine cluster name.")
    #clusterUniqueID = cp_get(cp, 'ce', 'unique_name', cluster_name)
    clusterUniqueID = getClusterID(cp)
    siteUniqueID = cp_get(cp, "site", "unique_name", 'UNKNOWN_SITE')
    extraCEs = cp_get(cp, 'cluster', 'other_ces', [])
Example #28
0
"""
Module for interacting with a dCache storage element.
"""

import os
import re
import gip_sets as sets
import stat
import statvfs

import gip_testing
from gip_common import getLogger, cp_get, cp_getBoolean, cp_getInt, matchFQAN
from gip_sections import se
from gip.dcache.admin import connect_admin
from gip.dcache.pools import lookupPoolStorageInfo
log = getLogger("GIP.Storage")

def execute(p, command, bind_vars=None):
    """
    Given a Postgres connection, execute a SQL statement.

    @param p: Postgres connection, as returned by L{connect}
    @type p: psycopg2.Connection
    @param command: SQL statement
    @param bind_vars: Bind vars for B{command}, if any.
    @returns: All resulting rows.
    """
    try:
        from psycopg2.extras import DictCursor #pylint: disable-msg=F0401
        curs = p.cursor(cursor_factory=DictCursor)
    except:
Example #29
0
#!/usr/bin/python

import sys, time, os
import re
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
import popen2
from socket import gethostname

# Make sure the gip_common libraries are in our path
sys.path.insert(0, os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, getTemplate, getLogger, printTemplate, \
     cp_getBoolean, cp_get, vdtDir

log = getLogger("GIP.authorization_service")
# Retrieve our logger in case of failure

def publish_gridmap_file(cp, template):
    hostname = cp_get(cp, "ce", 'name', gethostname())
    siteID = cp_get(cp, "site", "unique_name", gethostname())

    info = {'serviceID': '%s:gridmap-file' % hostname,
            'serviceType': 'gridmap-file',
            'serviceName': 'Authorization',
            'version': 'UNDEFINED',
            'endpoint': 'Not Applicable',
            'semantics': 'UNDEFINED',
            'owner': '',
            'url': 'localhost://etc/grid-security/gridmap-file',
            'uri': 'localhost://etc/grid-security/gridmap-file',
            'status': 'OK',
Example #30
0
                    fdlist.remove(outfd)
                else:
                    outdata.write(outchunk)
            if errfd in ready[0]:
                errchunk = stderr.read()
                if errchunk == '':
                    fdlist.remove(errfd)
                else:
                    errdata.write(errchunk)

        exitStatus = child.wait()
        outdata.seek(0)
        errdata.seek(0)
        
        if exitStatus:
            log = getLogger("GIP.common")
            log.info('Command %s exited with %d, stderr: %s' % (cmd, os.WEXITSTATUS(exitStatus), errdata.readlines()))

        return outdata

def generateTests(cp, cls, args=[]):
    """
    Given a class and args, generate a test case for every site in the BDII.

    @param cp: Site configuration
    @type cp: ConfigParser
    @param cls: Test class to use to generate a test suite.  It is assumed
        that the constructor for this class has signature cls(cp, site_name)
    @type cls: class
    @keyword args: List of sites; if it is not empty, then tests will only be
        generated for the given sites.
Example #31
0
File: sge.py Project: tiradani/gip
import os

if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))

import gip_cluster
from gip_common import config, VoMapper, getLogger, addToPath, getTemplate, printTemplate, cp_get, cp_getBoolean, cp_getInt
from gip_cluster import getClusterID
from gip_sections import ce
from gip_storage import getDefaultSE
from gip_batch import buildCEUniqueID, getGramVersion, getCEImpl, getPort, \
     buildContactString, getHTPCInfo
from sge_common import getQueueInfo, getJobsInfo, getLrmsInfo, getVoQueues, \
    getQueueList

log = getLogger("GIP.SGE")


def print_CE(cp):
    SGEVersion = getLrmsInfo(cp)
    queueInfo, _ = getQueueInfo(cp)
    ce_name = cp_get(cp, ce, "name", "UNKNOWN_CE")
    ce_template = getTemplate("GlueCE", "GlueCEUniqueID")
    queueList = getQueueList(cp)

    vo_queues = getVoQueues(cp)

    default_max_waiting = 999999
    for queue in queueInfo.values():
        if 'name' not in queue or queue['name'] not in queueList:
            continue
Example #32
0
"""
Module for interacting with PBS.
"""

import re
import grp
import pwd
import gip_sets as sets
import os

from gip_common import HMSToMin, getLogger, VoMapper, voList, parseRvf
from gip_common import addToPath, cp_get
from gip_testing import runCommand

log = getLogger("GIP.PBS")

batch_system_info_cmd = "qstat -B -f %(pbsHost)s"
queue_info_cmd = "qstat -Q -f %(pbsHost)s"
jobs_cmd = "qstat"
pbsnodes_cmd = "pbsnodes -a"

def pbsOutputFilter(fp):
    """
    PBS can be a pain to work with because it automatically cuts 
    lines off at 80 chars and continues the line on the next line.  For
    example::

        Server: red
        server_state = Active
        server_host = red.unl.edu
Example #33
0
#!/usr/bin/python

import sys, time, os

# Make sure the gip_common libraries are in our path
if 'GIP_LOCATION' in os.environ:
    sys.path.insert(0, os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, getTemplate, getLogger, printTemplate, cp_getBoolean
from gip_testing import runCommand
from gip_cluster import getSubClusterIDs, getClusterID

# Retrieve our logger in case of failure
log = getLogger("GIP.timestamp")

def main():
    try:
        # Load up the site configuration
        cp = config()
        se_only = cp_getBoolean(cp, "gip", "se_only", False)
        if not se_only and 'VDT_LOCATION' in os.environ: 
    
            # get the VDT version
            vdt_version_cmd = os.path.expandvars("$VDT_LOCATION/vdt/bin/") + 'vdt-version --brief'
            vdt_version = runCommand(vdt_version_cmd).readlines()[0].strip()
            if (vdt_version == ""): vdt_version = "OLD_VDT"
            
            # Get the timestamp in the two formats we wanted
            now = time.strftime("%a %b %d %T UTC %Y", time.gmtime())
    
            # Load up the template for GlueLocationLocalID
            # To view its contents, see $VDT_LOCATION/gip/templates/GlueCluster
Example #34
0
                    fdlist.remove(outfd)
                else:
                    outdata.write(outchunk)
            if errfd in ready[0]:
                errchunk = os.read(errfd, 4096)
                if errchunk == '':
                    fdlist.remove(errfd)
                else:
                    errdata.write(errchunk)

        exitStatus = child.wait()
        outdata.seek(0)
        errdata.seek(0)

        if exitStatus:
            log = getLogger("GIP.common")
            log.info('Command %s exited with %d, stderr: %s' %
                     (cmd, os.WEXITSTATUS(exitStatus), errdata.readlines()))

        return outdata


def generateTests(cp, cls, args=[]):
    """
    Given a class and args, generate a test case for every site in the BDII.

    @param cp: Site configuration
    @type cp: ConfigParser
    @param cls: Test class to use to generate a test suite.  It is assumed
        that the constructor for this class has signature cls(cp, site_name)
    @type cls: class
Example #35
0
import xml.dom.minidom as dom
import cgi
import urllib
import urllib2
import urlparse

try:
    import xml.etree.ElementTree as ElementTree
except:
    import elementtree.ElementTree as ElementTree

import gip_common

log = gip_common.getLogger("GIP.MyOSG")


class MyOSG(object):
    def __init__(self):
        self.resource_groups = []

    def query(self, url):
        urlparts = urlparse.urlsplit(url)
        urlparams = cgi.parse_qsl(urlparts[3])
        urlparams += [
            ("datasource", "summary"),
            ("summary_attrs_showservice", "on"),
            ("summary_attrs_showfqdn", "on"),
            ("summary_attrs_showcontact", "on"),
            ("summary_attrs_showvoownership", "on"),
        ]
        params = urllib.urlencode(urlparams, doseq=True)
Example #36
0

# Standard GIP imports
import gip_cluster
from gip_common import config, VoMapper, getLogger, addToPath, getTemplate, \
    voList, printTemplate, cp_get, cp_getBoolean, cp_getInt, responseTimes
from gip_cluster import getClusterID
from condor_common import parseNodes, getJobsInfo, getLrmsInfo, getGroupInfo
from condor_common import defaultGroupIsExcluded, doPath
from gip_storage import getDefaultSE
from gip_batch import buildCEUniqueID, getGramVersion, getCEImpl, getPort, \
     buildContactString, getHTPCInfo

from gip_sections import ce, se

log = getLogger("GIP.Condor")


def print_CE(cp):
    """
    Print out the CE(s) for Condor

    Config options used:
       * ce.name.  The name of the CE.  Defaults to "".
       * condor.status.  The status of the condor LRMS.  Defaults to
          "Production".
       * ce.globus_version.  The used Globus version.  Defaults to 4.0.6
       * ce.hosting_cluster.  The attached cluster name.  Defaults to ce.name
       * ce.host_name.  The CE's host name.  Default to ce.name
       * condor.preemption.  Whether or not condor allows preemption.  Defaults
          to False
Example #37
0
import os
import re
import gip_sets as sets
import statvfs

from gip_common import cp_get, cp_getBoolean, getLogger
from gip_storage import StorageElement, voListStorage
import srm_ping

log = getLogger('GIP.Storage.Bestman')


class BestmanInfo(StorageElement):
    def __init__(self, cp, **kw):
        super(BestmanInfo, self).__init__(cp, **kw)
        srms = self.getSRMs()
        if not srms:
            raise ValueError("No SRM endpoint configured!")
        self.srm_info = srms[0]
        self.endpoint = self.srm_info['endpoint']
        self.info = {}
        self.status = False

    def run(self):
        try:
            self.info = srm_ping.bestman_srm_ping(self._cp,
                                                  self.endpoint,
                                                  section=self._section)
            log.info("Returned BestMan info: %s" % str(self.info))
            self.status = True
        except srm_ping.ProxyCreateException, e:
Example #38
0
#!/usr/bin/python

import re
import sys
import os

if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))

from gip_common import config, getLogger, getTemplate, printTemplate
from gip_cese_bind import getCESEBindInfo
import gip_sets as sets

log = getLogger("GIP.CESEBind")


def print_CESEBind(cp):
    group_template = getTemplate("GlueCESEBind", "GlueCESEBindGroupCEUniqueID")
    se_template = getTemplate("GlueCESEBind", "GlueCESEBindSEUniqueID")
    bind_info = getCESEBindInfo(cp)
    cegroups = {}
    for info in bind_info:
        printTemplate(se_template, info)
        ses = cegroups.setdefault(info['ceUniqueID'], sets.Set())
        ses.add(info['seUniqueID'])
    for ce, ses in cegroups.items():
        ses = '\n'.join(['GlueCESEBindGroupSEUniqueID: %s' % i for i in ses])
        info = {'ceUniqueID': ce, 'se_groups': ses}
        printTemplate(group_template, info)

Example #39
0
#!/usr/bin/env python

import os
import re
import sys

if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, getLogger, getTemplate, printTemplate
from gip_cluster import getApplications, getSubClusterIDs, getClusterID

log = getLogger("GIP.Software")

def print_Locations(cp):
    template = getTemplate("GlueCluster", "GlueLocationLocalID")
    cluster_id = getClusterID(cp)
    for subClusterId in getSubClusterIDs(cp):
        for entry in getApplications(cp):
            entry['subClusterId'] = subClusterId
            entry['clusterId'] = cluster_id
            printTemplate(template, entry)

def main():
    try:
        cp = config()
        print_Locations(cp)
    except Exception, e:
        sys.stdout = sys.stderr
        log.exception(e)
        raise
Example #40
0
"""
Module for interacting with PBS.
"""

import re
import grp
import pwd
import gip_sets as sets
import os

from gip_common import HMSToMin, getLogger, VoMapper, voList, parseRvf
from gip_common import addToPath, cp_get
from gip_testing import runCommand

log = getLogger("GIP.PBS")

batch_system_info_cmd = "qstat -B -f %(pbsHost)s"
queue_info_cmd = "qstat -Q -f %(pbsHost)s"
jobs_cmd = "qstat"
pbsnodes_cmd = "pbsnodes -a"


def pbsOutputFilter(fp):
    """
    PBS can be a pain to work with because it automatically cuts 
    lines off at 80 chars and continues the line on the next line.  For
    example::

        Server: red
        server_state = Active
        server_host = red.unl.edu
Example #41
0
import sys
import gip_sets as sets

from xml.sax import make_parser, SAXParseException
from xml.sax.handler import ContentHandler, feature_external_ges

from gip_common import getLogger

log = getLogger("GIP.Storage.dCache.InfoProviderParser")

IN_TOP = 0
IN_POOLS = 1
IN_LINKGROUPS = 2
IN_LINKS = 3
IN_POOLGROUPS = 4
IN_RESERVATIONS = 5
IN_DOORS = 6
IN_SUMMARY = 7
IN_POOLMANAGER = 8
IN_POOLMANAGER_VERSION = 9
IN_SUMMARY_POOLS = 10


class InfoProviderHandler(ContentHandler):
    def __init__(self):
        self.pools = {}
        self.doors = {}
        self.poolgroups = {}
        self.links = {}
        self.linkgroups = {}
        self.reservations = {}
Example #42
0
File: lsf.py Project: holzman/gip
if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))
    
import gip_cluster
from gip_testing import runCommand
from gip_common import config, VoMapper, getLogger, addToPath, getTemplate, \
    printTemplate, cp_get, cp_getInt, responseTimes, cp_getBoolean
from gip_cluster import getClusterID
from lsf_common import parseNodes, getQueueInfo, getJobsInfo, getLrmsInfo, \
    getVoQueues
from gip_sections import ce
from gip_storage import getDefaultSE
from gip_batch import buildCEUniqueID, getGramVersion, getCEImpl, getPort, \
     buildContactString, getHTPCInfo

log = getLogger("GIP.LSF")

def print_CE(cp):
    """
    Print out the GlueCE objects for LSF; one GlueCE per grid queue.
    """
    try:
        lsfVersion = getLrmsInfo(cp)
    except:
        lsfVersion = 'Unknown'

    log.debug('Using LSF version %s' % lsfVersion)    
    queueInfo = getQueueInfo(cp)
    try:
        totalCpu, freeCpu, queueCpus = parseNodes(queueInfo, cp)
    except:
"""
The original python software provider.

This is now obsolete.  To see what will really be used beyond version 1.0,
look at gip/lib/python/gip/providers/software.py
"""

import os
import re
import sys

if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, getLogger, getTemplate

log = getLogger("GIP.Software")


def print_Locations(cp):
    app_dir = cp.get("osg_dirs", "app")
    ce_name = cp.get('ce', 'name')
    template = getTemplate("GlueCluster", "GlueLocationLocalID")
    path = "%s/etc/grid3-locations.txt" % app_dir
    if not os.path.exists(path):
        path = '%s/etc/osg-locations.txt' % app_dir
    fp = open(path, 'r')
    for line in fp:
        line = line.strip()
        info = line.split()
        if len(info) != 3 or info[0].startswith('#'):
            continue
Example #44
0
def calculate_spaces(cp, admin, section='se'):
    """
    Determine the storage areas attached to this dCache.

    This returns two lists.  The first list, sas, is a list of dictionaries
    which contain the key-value pairs needed to fill out the GlueSA object.

    The second list, vos, is a list of dictionaries which contain the key-value
    pairs needd to fill in the GlueVOInfo object.

    @param cp: ConfigParser object
    @param admin: Admin interface to dCache
    @returns: sas, vos (see above description of return values.
    """
    # If SrmSpaceManager isn't running, this will cause an exception.
    # Catch it and pretend we just have no reservations or link groups
    try:
        space_output = admin.execute(SrmSpaceManager, 'ls')
        resv, lg = parsers.parse_srm_space_manager(space_output)
    except:
        resv = []
        lg = []

    # Get the pool information
    psu_output = admin.execute(PoolManager, 'psu dump setup')
    pgroups, lgroups, links, link_settings, pools = \
        parsers.parse_pool_manager(psu_output)
    listOfPools = pools_module.lookupPoolStorageInfo(admin, \
        getLogger("GIP.dCache.Pools"))
    pm_info = admin.execute(PoolManager, 'info')
    can_stage = pm_info.find('Allow staging : on') >= 0
    can_p2p = pm_info.find('Allow p2p : on') >= 0

    # Some post-parsing: go from list of pools to dictionary by pool name
    pool_info = {}
    pool_objs = {}
    for pool in listOfPools:
        pool_info[pool.poolName] = pool.totalSpaceKB
        pool_objs[pool.poolName] = pool
    for pool in pools:
        if pool not in pool_info:
            pool_info[pool] = 0

    # In order to make sure we don't have overlapping spaces, we remove the pool
    # from the pools list in order to record ones we already account for.

    # Build the map from link group to pools
    lgroups_to_pools = {}
    for lgroup, assc_links in lgroups.items():
        cur_set = sets.Set()
        lgroups_to_pools[lgroup] = cur_set
        for link in assc_links:
            for pgroup in links[link]:
                for pool in pgroups[pgroup]:
                    cur_set.add(pool)
                    pools.remove(pool)

    # Ensure already-seen pools are not in the remaining pool groups
    for pgroup, pg_set in pgroups.items():
        pg_set.intersection_update(pools)

    def cmp(x, y):
        "Sort pool groups by total size"
        return sum([pool_info[i] for i in pgroups[x]]) < \
               sum([pool_info[i] for i in pgroups[y]])

    pgroup_list = pgroups.keys()

    # Python 2.4 and 2.5 support named parameters, but python 2.3
    #  does not.  Trying the named parameter first for future
    #  compatibility reasons, if it fails (i.e. on python 2.3) then
    #  resort to the python 2.3 method
    try:
        pgroup_list.sort(cmp=cmp)
    except:
        pgroup_list.sort(cmp)

    sas = []
    vos = []
    # Build a SA from each link group
    for lgroup, lgpools in lgroups.items():
        lg_info = None
        for l in lg:
            if l['name'] == lgroup:
                lg_info = l
                break
        if not lg_info:
            continue
        sa = calculate_space_from_linkgroup(cp,lg_info, [pool_objs[i] for i in \
            lgpools if i in pool_objs], section=section)
        sas.append(sa)
        voinfos = calculate_voinfo_from_lg(cp, lg_info, resv, section=section)
        vos.extend(voinfos)

    # Build a SA from each nontrivial pool group
    # Start with the largest and work our way down.
    for pgroup in pgroup_list:
        pg_pools = pgroups[pgroup]
        del pgroups[pgroup]
        for pg2, pg2_pools in pgroups.items():
            pg2_pools.difference_update(pg_pools)
        my_pool_objs = [pool_objs[i] for i in pg_pools if i in pool_objs]
        if not my_pool_objs:
            continue
        sa = calculate_space_from_poolgroup(cp,
                                            pgroup,
                                            my_pool_objs,
                                            admin,
                                            links,
                                            link_settings,
                                            allow_staging=can_stage,
                                            allow_p2p=can_p2p,
                                            section=section)
        sas.append(sa)
        voinfos = calculate_voinfo_from_pgroup(cp, pgroup, section=section)
        vos.extend(voinfos)

    return sas, vos
Example #45
0
    
import re
import sys  
import os
        
if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))
    
from gip_common import cp_get, cp_getBoolean, config, getLogger, getTemplate, printTemplate, voList
from gip_common import vdtDir
from gip_testing import runCommand
import gip_sets as sets
import time
import zlib

log = getLogger("GIP.CREAM")

def getUniqueHash(cp):
    # EGI uses unix 'cksum' command; we'll use zlib's crc instead.
    loc = cp_get(cp, 'gip', 'osg_config', vdtDir(os.path.expandvars('$VDT_LOCATION/monitoring/config.ini'),
                                                 '/etc/osg/config.ini'))
    loc = os.path.expandvars(loc)
    try:
        hash = zlib.crc32(loc)
    except:
        log.error('Could not find config.ini for checksum')
        hash = '0008675309'

    return hash

def getCreamVersion(cp):
Example #46
0
import socket

from gip_common import getLogger, cp_get
from gip_storage import StorageElement, voListStorage, getdCacheSESpace
from admin import connect_admin
from space_calculator import calculate_spaces

log = getLogger("GIP.Storage.dCache")


class DCacheInfo(StorageElement):
    def __init__(self, cp, **kw):
        super(DCacheInfo, self).__init__(cp, **kw)
        self.status = 'Production'

    def run(self):
        try:
            self.admin = connect_admin(self._cp)
        except Exception, e:
            log.exception(e)
            self.status = 'Closed'
        try:
            self.sas, self.vos = calculate_spaces(self._cp,
                                                  self.admin,
                                                  section=self._section)
        except Exception, e:
            log.exception(e)

    def getPort(self):
        port = cp_get(self._cp, self._section, "srm_port", "8443")
        return port
Example #47
0
import xml.dom.minidom as dom
import cgi
import urllib
import urllib2
import urlparse

try:
    import xml.etree.ElementTree as ElementTree
except:
    import elementtree.ElementTree as ElementTree

import gip_common

log = gip_common.getLogger("GIP.MyOSG")

class MyOSG(object):

    def __init__(self):
        self.resource_groups = []

    def query(self, url):
        urlparts = urlparse.urlsplit(url)
        urlparams = cgi.parse_qsl(urlparts[3])
        urlparams += [("datasource", "summary"),
                      ("summary_attrs_showservice", "on"),
                      ("summary_attrs_showfqdn", "on"),
                      ("summary_attrs_showcontact", "on"),
                      ("summary_attrs_showvoownership", "on"),
                     ]
        params = urllib.urlencode(urlparams, doseq=True)
Example #48
0
def calculate_spaces(cp, admin, section='se'):
    """
    Determine the storage areas attached to this dCache.

    This returns two lists.  The first list, sas, is a list of dictionaries
    which contain the key-value pairs needed to fill out the GlueSA object.

    The second list, vos, is a list of dictionaries which contain the key-value
    pairs needd to fill in the GlueVOInfo object.

    @param cp: ConfigParser object
    @param admin: Admin interface to dCache
    @returns: sas, vos (see above description of return values.
    """
    # If SrmSpaceManager isn't running, this will cause an exception.
    # Catch it and pretend we just have no reservations or link groups
    try:
        space_output = admin.execute(SrmSpaceManager, 'ls')
        resv, lg = parsers.parse_srm_space_manager(space_output)
    except:
        resv = []
        lg = []

    # Get the pool information
    psu_output = admin.execute(PoolManager, 'psu dump setup')
    pgroups, lgroups, links, link_settings, pools = \
        parsers.parse_pool_manager(psu_output)
    listOfPools = pools_module.lookupPoolStorageInfo(admin, \
        getLogger("GIP.dCache.Pools"))
    pm_info = admin.execute(PoolManager, 'info')
    can_stage = pm_info.find('Allow staging : on') >= 0
    can_p2p = pm_info.find('Allow p2p : on') >= 0

    # Some post-parsing: go from list of pools to dictionary by pool name
    pool_info = {}
    pool_objs = {}
    for pool in listOfPools:
        pool_info[pool.poolName] = pool.totalSpaceKB
        pool_objs[pool.poolName] = pool
    for pool in pools:
        if pool not in pool_info:
            pool_info[pool] = 0


    # In order to make sure we don't have overlapping spaces, we remove the pool
    # from the pools list in order to record ones we already account for.

    # Build the map from link group to pools
    lgroups_to_pools = {}
    for lgroup, assc_links in lgroups.items():
        cur_set = sets.Set()
        lgroups_to_pools[lgroup] = cur_set
        for link in assc_links:
            for pgroup in links[link]:
                for pool in pgroups[pgroup]:
                    cur_set.add(pool)
                    pools.remove(pool)

    # Ensure already-seen pools are not in the remaining pool groups
    for pgroup, pg_set in pgroups.items():
        pg_set.intersection_update(pools)

    def cmp(x, y):
        "Sort pool groups by total size"
        return sum([pool_info[i] for i in pgroups[x]]) < \
               sum([pool_info[i] for i in pgroups[y]])

    pgroup_list = pgroups.keys()
    
    # Python 2.4 and 2.5 support named parameters, but python 2.3 
    #  does not.  Trying the named parameter first for future 
    #  compatibility reasons, if it fails (i.e. on python 2.3) then
    #  resort to the python 2.3 method
    try:
        pgroup_list.sort(cmp=cmp)
    except:
        pgroup_list.sort(cmp)
        

    sas = []
    vos = []
    # Build a SA from each link group
    for lgroup, lgpools in lgroups.items():
        lg_info = None
        for l in lg:
            if l['name'] == lgroup:
                lg_info = l
                break
        if not lg_info:
            continue
        sa = calculate_space_from_linkgroup(cp,lg_info, [pool_objs[i] for i in \
            lgpools if i in pool_objs], section=section)
        sas.append(sa)
        voinfos = calculate_voinfo_from_lg(cp, lg_info, resv, section=section)
        vos.extend(voinfos)

    # Build a SA from each nontrivial pool group
    # Start with the largest and work our way down.
    for pgroup in pgroup_list:
        pg_pools = pgroups[pgroup]
        del pgroups[pgroup]
        for pg2, pg2_pools in pgroups.items():
            pg2_pools.difference_update(pg_pools)
        my_pool_objs = [pool_objs[i] for i in pg_pools if i in pool_objs]
        if not my_pool_objs:
            continue
        sa = calculate_space_from_poolgroup(cp, pgroup, my_pool_objs, admin,
            links, link_settings, allow_staging=can_stage, allow_p2p=can_p2p,
            section=section)
        sas.append(sa)
        voinfos = calculate_voinfo_from_pgroup(cp, pgroup, section=section)
        vos.extend(voinfos)

    return sas, vos
Example #49
0
import os
import re
import sys

if "GIP_LOCATION" in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))
import gip_cluster

from gip_common import config, cp_get, cp_getBoolean, getLogger, getTemplate, printTemplate
from gip_testing import runCommand
from gip_sections import *
from gip_cese_bind import getCEList
from gip_cluster import getClusterName, getClusterID

log = getLogger("GIP.Cluster")


def print_clusters(cp):
    cluster_name = cp_get(cp, "cluster", "name", None)
    if not cluster_name:
        cluster_name = cp_get(cp, "ce", "hosting_cluster", None)
    if not cluster_name:
        cluster_name = cp_get(cp, "ce", "unique_name", None)
    if not cluster_name:
        getClusterName(cp)
        # raise Exception("Could not determine cluster name.")
    # clusterUniqueID = cp_get(cp, 'ce', 'unique_name', cluster_name)
    clusterUniqueID = getClusterID(cp)
    siteUniqueID = cp_get(cp, "site", "unique_name", "UNKNOWN_SITE")
    extraCEs = cp_get(cp, "cluster", "other_ces", [])
    if extraCEs:
Example #50
0
File: slurm.py Project: holzman/gip
    getLogger,
    addToPath,
    getTemplate,
    printTemplate,
    cp_get,
    cp_getInt,
    responseTimes,
    cp_getBoolean,
)
from gip_cluster import getClusterID
from slurm_common import parseNodes, getQueueInfo, getJobsInfo, getLrmsInfo, getVoQueues
from gip_sections import ce
from gip_storage import getDefaultSE
from gip_batch import buildCEUniqueID, getGramVersion, getCEImpl, getPort, buildContactString, getHTPCInfo

log = getLogger("GIP.SLURM")


def print_CE(cp):
    slurmVersion = getLrmsInfo(cp)
    queueInfo = getQueueInfo(cp)
    ce_name = cp_get(cp, ce, "name", "UNKNOWN_CE")
    CE = getTemplate("GlueCE", "GlueCEUniqueID")
    try:
        excludeQueues = [i.strip() for i in cp_get(cp, "slurm", "queue_exclude", "").split(",")]
    except:
        excludeQueues = []
    vo_queues = getVoQueues(cp)
    for queue, info in queueInfo.items():
        if queue in excludeQueues:
            continue
Example #51
0
import os
import sys
import time
import unittest

#Standard testing imports:
sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, getLogger, cp_get
from gip_testing import runTest, streamHandler

log = getLogger("GIP.Test.Wrapper")

#Add the path with the osg_info_wrapper script:
sys.path.append(os.path.expandvars("$GIP_LOCATION/libexec"))
import osg_info_wrapper

class TestOsgInfoWrapper(unittest.TestCase):

    def test_simple(self):
        """
        Simple test of the OSG Info Wrapper.  Make sure that both the provider
        and plugin functionality works.
        """
        cp = config("test_modules/simple/config")
        entries = osg_info_wrapper.main(cp, return_entries=True)
        has_timestamp = False
        has_ce = False
        for entry in entries:
            if entry.glue.get('LocationName', (0,))[0] == 'TIMESTAMP':
                has_timestamp = True
Example #52
0
File: sge.py Project: holzman/gip
import os

if 'GIP_LOCATION' in os.environ:
    sys.path.append(os.path.expandvars("$GIP_LOCATION/lib/python"))
    
import gip_cluster
from gip_common import config, VoMapper, getLogger, addToPath, getTemplate, printTemplate, cp_get, cp_getBoolean, cp_getInt
from gip_cluster import getClusterID
from gip_sections import ce
from gip_storage import getDefaultSE
from gip_batch import buildCEUniqueID, getGramVersion, getCEImpl, getPort, \
     buildContactString, getHTPCInfo
from sge_common import getQueueInfo, getJobsInfo, getLrmsInfo, getVoQueues, \
    getQueueList

log = getLogger("GIP.SGE")

def print_CE(cp):
    SGEVersion = getLrmsInfo(cp)
    queueInfo, _ = getQueueInfo(cp)
    ce_name = cp_get(cp, ce, "name", "UNKNOWN_CE")
    ce_template = getTemplate("GlueCE", "GlueCEUniqueID")
    queueList = getQueueList(cp)

    vo_queues = getVoQueues(cp)

    default_max_waiting = 999999
    for queue in queueInfo.values():
        if 'name' not in queue or queue['name'] not in queueList:
            continue
        if queue['name'] == 'waiting':
Example #53
0
import re
import gip_sets as sets
import urllib2
import socket

from gip_common import getLogger, cp_get, normalizeFQAN
from gip_storage import StorageElement, voListStorage
from DCacheInfoProviderParser import parse_fp
from space_calculator import getAllowedVOs, getLGAllowedVOs

log = getLogger("GIP.Storage.dCache")

class DCacheInfo19(StorageElement):

    def __init__(self, cp, **kw):
        super(DCacheInfo19, self).__init__(cp, **kw)
        self.status = 'Production'
        self.dom = None
        self.sas = []
        self.vos = []
        self.seen_pools = sets.Set()

    def run(self):
        endpoint = cp_get(self._cp, self._section, "infoProviderEndpoint", "")
        try:
            self.handler = parse_fp(urllib2.urlopen(endpoint))
        except Exception, e:
            log.exception(e)
            self.handler = None
        self.parse()
Example #54
0
"""
Ping the BeStMan SRM server for information.
"""

import os
import re
import tempfile

import gip_testing
from gip_common import cp_get, getLogger
from gip_testing import runCommand

log = getLogger('GIP.Storage.Bestman.srm_ping')

def which(executable):
    """
    Helper function to determine the location of an executable.

    @param executable: Name of the program.
    @returns: Full path to executable, or None if it can't be found.
    """
    for dirname in os.environ.get('PATH', '/bin:/usr/bin'):
        fullname = os.path.join(dirname, executable)
        if os.path.exists(fullname):
            return fullname
    return None

class ProxyCreateException(Exception):
    pass
Example #55
0
#!/usr/bin/env python

import os
import sys

if 'GIP_LOCATION' in os.environ:
    sys.path.insert(0, os.path.expandvars("$GIP_LOCATION/lib/python"))
from gip_common import config, getLogger, cp_get, cp_getBoolean
#from gip.providers.dcache import main as dcache_main
from gip.providers.generic_storage import main as generic_main

log = getLogger("GIP.SE")

def main():
    log.info("Using generic storage element.")
    generic_main()

if __name__ == '__main__':
    main()