示例#1
0
 def getEmail(self, dn):
     # get DN
     _logger.debug("getDN for %s" % dn)
     dbProxy = DBProxy()
     distinguishedName = dbProxy.cleanUserID(dn)
     _logger.debug("DN = %s" % distinguishedName)
     if distinguishedName == "":
         _logger.error("cannot get DN for %s" % dn)
         return ""
     # get email from MetaDB
     mailAddrInDB, dbUptime = self.taskBuffer.getEmailAddr(
         distinguishedName, withUpTime=True)
     _logger.debug("email in MetaDB : '%s'" % mailAddrInDB)
     notSendMail = False
     if mailAddrInDB not in [None, '']:
         # email mortification is suppressed
         if mailAddrInDB.split(':')[0] == 'notsend':
             notSendMail = True
     # avoid too frequently lookup
     if dbUptime is not None and datetime.datetime.utcnow(
     ) - dbUptime < datetime.timedelta(hours=1):
         _logger.debug("no lookup")
         if notSendMail or mailAddrInDB in [None, '']:
             return 'notsend'
         else:
             return mailAddrInDB.split(':')[-1]
     # get email from DQ2
     try:
         tmpStatus, userInfo = rucioAPI.finger(dn)
         if tmpStatus:
             mailAddr = userInfo['email']
             _logger.debug("email from DDM : '%s'" % mailAddr)
         else:
             mailAddr = None
             _logger.error(
                 "failed to get email from DDM : {}".format(userInfo))
         if mailAddr is None:
             mailAddr = ''
         # make email field to update DB
         mailAddrToDB = ''
         if notSendMail:
             mailAddrToDB += 'notsend:'
         mailAddrToDB += mailAddr
         # update database
         _logger.debug("update email for %s to %s" %
                       (distinguishedName, mailAddrToDB))
         self.taskBuffer.setEmailAddr(distinguishedName, mailAddrToDB)
         if notSendMail:
             return 'notsend'
         return mailAddr
     except Exception as e:
         _logger.error("getEmail failed with {} {}".format(
             str(e), traceback.format_exc()))
     return ""
示例#2
0
 def getEmail(self, dn):
     # get DN
     _logger.debug("getDN for %s" % dn)
     dbProxy = DBProxy()
     distinguishedName = dbProxy.cleanUserID(dn)
     _logger.debug("DN = %s" % distinguishedName)
     if distinguishedName == "":
         _logger.error("cannot get DN for %s" % dn)
         return ""
     # get email from MetaDB
     mailAddrInDB, dbUptime = self.taskBuffer.getEmailAddr(
         distinguishedName, withUpTime=True)
     _logger.debug("email in MetaDB : '%s'" % mailAddrInDB)
     notSendMail = False
     if mailAddrInDB not in [None, '']:
         # email mortification is suppressed
         if mailAddrInDB.split(':')[0] == 'notsend':
             notSendMail = True
     # avoid too frequently lookup
     if dbUptime is not None and datetime.datetime.utcnow(
     ) - dbUptime < datetime.timedelta(hours=1):
         _logger.debug("no lookup")
         if notSendMail or mailAddrInDB in [None, '']:
             return 'notsend'
         else:
             return mailAddrInDB.split(':')[-1]
     # get email from DQ2
     realDN = re.sub('/CN=limited proxy', '', dn)
     realDN = re.sub('(/CN=proxy)+', '', realDN)
     try:
         tmpStatus, userInfo = rucioAPI.finger(realDN)
         mailAddr = userInfo['email']
         _logger.debug("email from DDM : '%s'" % mailAddr)
         if mailAddr is None:
             mailAddr = ''
         # make email field to update DB
         mailAddrToDB = ''
         if notSendMail:
             mailAddrToDB += 'notsend:'
         mailAddrToDB += mailAddr
         # update database
         _logger.debug("update email for %s to %s" %
                       (distinguishedName, mailAddrToDB))
         self.taskBuffer.setEmailAddr(distinguishedName, mailAddrToDB)
         if notSendMail:
             return 'notsend'
         return mailAddr
     except Exception:
         errType, errValue = sys.exc_info()[:2]
         _logger.error("%s %s" % (errType, errValue))
     return ""
from pandaserver.taskbuffer.OraDBProxy import DBProxy
from config import panda_config
import sys

if __name__ == "__main__":
    """
    Functional testing of the shares tree
    """
    proxyS = DBProxy()
    proxyS.connect(panda_config.dbhost, panda_config.dbpasswd,
                   panda_config.dbuser, panda_config.dbname)

    # print the shares in order of under usage
    print('--------------LEAVE SHARES SORTED BY UNDER-PLEDGING---------------')
    print(proxyS.get_sorted_leaves())

    # print the global share structure
    print('--------------GLOBAL SHARES TREE---------------')
    print(proxyS.tree)

    # print the normalized leaves, which will be the actual applied shares
    print('--------------LEAVE SHARES---------------')
    print(proxyS.leave_shares)

    # print the current grid status
    print('--------------CURRENT GRID STATUS---------------')
    print(
        proxyS.tree.pretty_print_hs_distribution(
            proxyS._DBProxy__hs_distribution))

    # check a couple of shares if they are valid leave names
示例#4
0
optP.add_option('--cloud',
                action='store',
                dest='cloud',
                default=None,
                help='cloud')
optP.add_option('--maxJobs',
                action='store',
                dest='maxJobs',
                default=None,
                help='max number of jobs to be killed')
options, args = optP.parse_args()

if options.cloud is None and options.site is None:
    optP.error("--site=<computingSite> and/or --cloud=<cloud> is required")

proxyS = DBProxy()
proxyS.connect(panda_config.dbhost, panda_config.dbpasswd, panda_config.dbuser,
               panda_config.dbname)

jobsMap = {}

if len(args) == 0:
    optP.error('priority is required')

varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':currentPriority'] = args[0]
sql = "SELECT PandaID,currentPriority FROM %s WHERE prodSourceLabel=:prodSourceLabel AND currentPriority<:currentPriority "
if options.killRunning:
    sql += "AND jobStatus=:jobStatus "
    varMap[':jobStatus'] = 'running'
示例#5
0
from testutils import sendCommand

def retrieveJob(site):
    function = "getJob"
    node = {}
    node['siteName'] = site
    node['mem'] = 1000
    node['node'] = socket.getfqdn()

    data = sendCommand(function, node, _logger)
    jobD = parse_qs(data)  # jobD indicates it's a job in dictionary format, not a JobSpec object
    return jobD


if __name__ == "__main__":
    proxyS = DBProxy()
    proxyS.connect(panda_config.dbhost,panda_config.dbpasswd,panda_config.dbuser,panda_config.dbname)

    #proxyS.getCriteriaForGlobalShares('BNL-OSG')

    site = 'CERN-PROD'

    DIRECT = 'direct'
    WEB = 'web'

    mode = WEB

    if mode == DIRECT:
        for i in range(3):
            t_before = time.time()
            _logger.info(proxyS.getJobs(1, site, 'managed', None, 1000,
from testutils import sendCommand

def retrieveJob(site):
    function = "getJob"
    node = {}
    node['siteName'] = site
    node['mem'] = 1000
    node['node'] = socket.getfqdn()

    data = sendCommand(function, node, _logger)
    jobD = urlparse.parse_qs(data)  # jobD indicates it's a job in dictionary format, not a JobSpec object
    return jobD


if __name__ == "__main__":
    proxyS = DBProxy()
    proxyS.connect(panda_config.dbhost,panda_config.dbpasswd,panda_config.dbuser,panda_config.dbname)

    #proxyS.getCriteriaForGlobalShares('BNL-OSG')

    site = 'CERN-PROD'

    DIRECT = 'direct'
    WEB = 'web'

    mode = WEB

    if mode == DIRECT:
        for i in xrange(3):
            t_before = time.time()
            _logger.info(proxyS.getJobs(1, site, 'managed', None, 1000,