def lockJob(self,dn,jobID): # make token tmpProxy = DBProxy() self.token = "%s:%s:" % (tmpProxy.cleanUserID(dn),jobID) _logger.debug("%s lockJob" % self.token) # lock resST,resVal = self.taskBuffer.lockJobForReBrokerage(dn,jobID,self.simulation,self.forceOpt, forFailed=self.forFailed) # failed if not resST: _logger.debug("%s lockJob failed since %s" % (self.token,resVal['err'])) return False,resVal['err'] # keep jobID self.jobID = jobID # set PandaID,buildStatus,userName self.rPandaID = resVal['rPandaID'] self.bPandaID = resVal['bPandaID'] self.userName = resVal['userName'] self.buildStatus = resVal['bStatus'] self.buildJobID = resVal['bJobID'] self.minPandaIDlibDS = resVal['minPandaIDlibDS'] self.maxPandaIDlibDS = resVal['maxPandaIDlibDS'] # use JobID as rev num self.revNum = self.taskBuffer.getJobIdUser(dn) _logger.debug("%s run PandaID=%s / build PandaID=%s Status=%s JobID=%s rev=%s" % \ (self.token,self.rPandaID,self.bPandaID,self.buildStatus, self.buildJobID,self.revNum)) # return return True,''
def getEmail(self,dn): # get DN _logger.debug("getDN for %s" % dn) dbProxy = DBProxy() distinguishedName = dbProxy.cleanUserID(dn) _logger.debug("DN = %s" % distinguishedName) if distinguishedName == "": _logger.error("cannot get DN for %s" % dn) return "" # get email from MetaDB mailAddrInDB,dbUptime = self.taskBuffer.getEmailAddr(distinguishedName,withUpTime=True) _logger.debug("email in MetaDB : '%s'" % mailAddrInDB) notSendMail = False if not mailAddrInDB in [None,'']: # email mortification is suppressed if mailAddrInDB.split(':')[0] == 'notsend': notSendMail = True # avoid too frequently lookup if dbUptime != None and datetime.datetime.utcnow()-dbUptime < datetime.timedelta(hours=1): _logger.debug("no lookup") if notSendMail or mailAddrInDB in [None,'']: return 'notsend' else: return mailAddrInDB.split(':')[-1] # get email from DQ2 realDN = re.sub('/CN=limited proxy','',dn) realDN = re.sub('(/CN=proxy)+','',realDN) try: _logger.debug("dq2Info.finger(%s)" % realDN) for iDDMTry in range(3): status,out = dq2Info.finger(realDN) if status != 0 or out.find("DQ2 internal server exception") != -1 \ or out.find("An error occurred on the central catalogs") != -1 \ or out.find("MySQL server has gone away") != -1: time.sleep(10) else: break _logger.debug(out) exec "userInfo=%s" % out mailAddr = userInfo['email'] _logger.debug("email from DQ2 : '%s'" % mailAddr) if mailAddr == None: mailAddr = '' # make email field to update DB mailAddrToDB = '' if notSendMail: mailAddrToDB += 'notsend:' mailAddrToDB += mailAddr # update database _logger.debug("update email for %s to %s" % (distinguishedName,mailAddrToDB)) self.taskBuffer.setEmailAddr(distinguishedName,mailAddrToDB) if notSendMail: return 'notsend' return mailAddr except: errType,errValue = sys.exc_info()[:2] _logger.error("%s %s" % (errType,errValue)) return "" return ""
def getEmail(self,dn): # get DN _logger.debug("getDN for %s" % dn) dbProxy = DBProxy() distinguishedName = dbProxy.cleanUserID(dn) _logger.debug("DN = %s" % distinguishedName) if distinguishedName == "": _logger.error("cannot get DN for %s" % dn) return "" # get email from MetaDB mailAddrInDB,dbUptime = self.taskBuffer.getEmailAddr(distinguishedName,withUpTime=True) _logger.debug("email in MetaDB : '%s'" % mailAddrInDB) notSendMail = False if not mailAddrInDB in [None,'']: # email mortification is suppressed if mailAddrInDB.split(':')[0] == 'notsend': notSendMail = True # avoid too frequently lookup if dbUptime != None and datetime.datetime.utcnow()-dbUptime < datetime.timedelta(hours=1): _logger.debug("no lookup") if notSendMail or mailAddrInDB in [None,'']: return 'notsend' else: return mailAddrInDB.split(':')[-1] # get email from DQ2 realDN = re.sub('/CN=limited proxy','',dn) realDN = re.sub('(/CN=proxy)+','',realDN) try: tmpStatus,userInfo = rucioAPI.finger(realDN) mailAddr = userInfo['email'] _logger.debug("email from DDM : '%s'" % mailAddr) if mailAddr == None: mailAddr = '' # make email field to update DB mailAddrToDB = '' if notSendMail: mailAddrToDB += 'notsend:' mailAddrToDB += mailAddr # update database _logger.debug("update email for %s to %s" % (distinguishedName,mailAddrToDB)) self.taskBuffer.setEmailAddr(distinguishedName,mailAddrToDB) if notSendMail: return 'notsend' return mailAddr except: errType,errValue = sys.exc_info()[:2] _logger.error("%s %s" % (errType,errValue)) return "" return ""
def cleanName(dn): # extract First Last from DN dbProxy = DBProxy() extractedDN = dbProxy.cleanUserID(dn) # replace -. extractedDN = re.sub('-|\.',' ',extractedDN) # change to lower extractedDN = extractedDN.lower() # remove ATLAS extractedDN = re.sub('\(*atlas\)*','',extractedDN) # remove numbers extractedDN = re.sub('\d*','',extractedDN) # remove Jr extractedDN = re.sub(' jr( |$)',' ',extractedDN) # remove whitespaces extractedDN = re.sub(' +',' ',extractedDN) extractedDN = extractedDN.strip() # return return extractedDN
from taskbuffer.OraDBProxy import DBProxy # password from config import panda_config optP = optparse.OptionParser(conflict_handler="resolve") optP.add_option('--user', action='store',dest='user', default=None,help='prodUserName') optP.add_option('--unban',action='store_const',const=True,dest='unban',default=False,help='unban the user') options,args = optP.parse_args() if options.user == None: print "--user=<prodUserName> is required" sys.exit(1) proxyS = DBProxy() proxyS.connect(panda_config.dbhost,panda_config.dbpasswd,panda_config.dbuser,panda_config.dbname) prodUserName = sys.argv[1] import userinterface.Client as Client varMap = {} varMap[':name'] = options.user if options.unban: varMap[':status'] = None else: varMap[':status'] = 'disabled' sql = "UPDATE ATLAS_PANDAMETA.users SET status=:status WHERE name=:name" status,res = proxyS.querySQLS(sql,varMap)
optP.add_option('--cloud', action='store', dest='cloud', default=None, help='cloud') optP.add_option('--maxJobs', action='store', dest='maxJobs', default=None, help='max number of jobs to be killed') options, args = optP.parse_args() if options.cloud == None and options.site == None: optP.error("--site=<computingSite> and/or --cloud=<cloud> is required") proxyS = DBProxy() proxyS.connect(panda_config.dbhost, panda_config.dbpasswd, panda_config.dbuser, panda_config.dbname) jobsMap = {} if len(args) == 0: optP.error('priority is required') varMap = {} varMap[':prodSourceLabel'] = 'managed' varMap[':currentPriority'] = args[0] sql = "SELECT PandaID,currentPriority FROM %s WHERE prodSourceLabel=:prodSourceLabel AND currentPriority<:currentPriority " if options.killRunning: sql += "AND jobStatus=:jobStatus " varMap[':jobStatus'] = 'running'
import sys from taskbuffer.OraDBProxy import DBProxy from dataservice import AddressFinder # password from config import panda_config passwd = panda_config.dbpasswd # instantiate DB proxies proxyS = DBProxy(True) proxyS.connect(panda_config.dbhost,panda_config.dbpasswd,panda_config.dbuser,panda_config.dbname) # get DN and address status,res = proxyS.querySQLS("SELECT dn,email,name FROM ATLAS_PANDAMETA.users",{},arraySize=1000000) if res == None: print "SQL error" sys.exit(0) # to upper chrs def toUpper(emails): retA = [] for email in emails: retA.append(email.upper()) return retA outF = open('newemail.sql','w') for dn,origEmail,name in res: if dn == None: dn = name