Пример #1
0
 def bulkSendMsg(self, msgType, msgLevel='info'):
     try:
         nChunk = 20
         for iMsg, message in enumerate(self.msgBuffer):
             # get logger
             tmpPandaLogger = PandaLogger()
             # lock HTTP handler
             tmpPandaLogger.lock()
             tmpPandaLogger.setParams({'Type': msgType})
             # get logger
             tmpLogger = tmpPandaLogger.getHttpLogger(
                 jedi_config.master.loggername)
             # add message
             message = self.monToken + ' ' + message
             if msgLevel == 'error':
                 tmpLogger.error(message)
             elif msgLevel == 'warning':
                 tmpLogger.warning(message)
             elif msgLevel == 'info':
                 tmpLogger.info(message)
             else:
                 tmpLogger.debug(message)
             # release HTTP handler
             tmpPandaLogger.release()
             if (iMsg + 1) % nChunk == 0:
                 time.sleep(1)
     except:
         pass
Пример #2
0
 def sendMsg(self, message, msgType, msgLevel='info', escapeChar=False):
     try:
         # get logger
         tmpPandaLogger = PandaLogger()
         # lock HTTP handler
         tmpPandaLogger.lock()
         tmpPandaLogger.setParams({'Type': msgType})
         # get logger
         tmpLogger = tmpPandaLogger.getHttpLogger(
             jedi_config.master.loggername)
         # escape special characters
         if escapeChar:
             message = message.replace('<', '&lt;')
             message = message.replace('>', '&gt;')
         # add message
         message = self.monToken + ' ' + message
         if msgLevel == 'error':
             tmpLogger.error(message)
         elif msgLevel == 'warning':
             tmpLogger.warning(message)
         elif msgLevel == 'info':
             tmpLogger.info(message)
         else:
             tmpLogger.debug(message)
         # release HTTP handler
         tmpPandaLogger.release()
     except:
         pass
Пример #3
0
 def bulkSendMsg(self,msgType,msgLevel='info',loggerName=None):
     try:
         nChunk = 20
         if loggerName == None:
             loggerName = jedi_config.master.loggername
         for iMsg,message in enumerate(self.msgBuffer):
             # get logger
             tmpPandaLogger = PandaLogger()
             # lock HTTP handler
             tmpPandaLogger.lock()
             tmpPandaLogger.setParams({'Type':msgType})
             # get logger
             tmpLogger = tmpPandaLogger.getHttpLogger(loggerName)
             # add message
             message = self.monToken + ' ' + message
             if msgLevel=='error':
                 tmpLogger.error(message)
             elif msgLevel=='warning':
                 tmpLogger.warning(message)
             elif msgLevel=='info':
                 tmpLogger.info(message)
             else:
                 tmpLogger.debug(message)                
             # release HTTP handler
             tmpPandaLogger.release()
             if (iMsg+1) % nChunk == 0:
                 time.sleep(1)
     except:
         pass
Пример #4
0
 def sendMsg(self,message,msgType,msgLevel='info',escapeChar=False):
     try:
         # get logger
         tmpPandaLogger = PandaLogger()
         # lock HTTP handler
         tmpPandaLogger.lock()
         tmpPandaLogger.setParams({'Type':msgType})
         # get logger
         tmpLogger = tmpPandaLogger.getHttpLogger(jedi_config.master.loggername)
         # escape special characters
         if escapeChar:
             message = message.replace('<','&lt;')
             message = message.replace('>','&gt;')
         # add message
         message = self.monToken + ' ' + message
         if msgLevel=='error':
             tmpLogger.error(message)
         elif msgLevel=='warning':
             tmpLogger.warning(message)
         elif msgLevel=='info':
             tmpLogger.info(message)
         else:
             tmpLogger.debug(message)                
         # release HTTP handler
         tmpPandaLogger.release()
     except:
         pass
Пример #5
0
 def sendMsg(self, message, msgType, msgLevel="info"):
     try:
         # get logger
         tmpPandaLogger = PandaLogger()
         # lock HTTP handler
         tmpPandaLogger.lock()
         tmpPandaLogger.setParams({"Type": msgType})
         # get logger
         tmpLogger = tmpPandaLogger.getHttpLogger(jedi_config.master.loggername)
         # add message
         message = self.monToken + " " + message
         if msgLevel == "error":
             tmpLogger.error(message)
         elif msgLevel == "warning":
             tmpLogger.warning(message)
         elif msgLevel == "info":
             tmpLogger.info(message)
         else:
             tmpLogger.debug(message)
         # release HTTP handler
         tmpPandaLogger.release()
     except:
         pass
Пример #6
0
    import pickle

import stomp

from pandaserver.config import panda_config
from pandaserver.brokerage.SiteMapper import SiteMapper
from pandaserver.dataservice.Finisher import Finisher
from pandaserver.dataservice import DataServiceUtils
from pandaserver.srvcore.CoreUtils import commands_get_status_output

import logging
logging.basicConfig(level=logging.DEBUG)

# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
_logger = PandaLogger().getLogger('fileCallbackListener')

# keep PID
pidFile = '%s/file_callback_listener.pid' % panda_config.logdir

# overall timeout value
overallTimeout = 60 * 59

# expiration time
expirationTime = datetime.datetime.utcnow() + datetime.timedelta(
    minutes=overallTimeout)


# kill whole process
def catch_sig(sig, frame):
    try:
Пример #7
0
import pandaserver.brokerage.broker
from pandaserver.dataservice import DynDataDistributer
from pandaserver.dataservice.MailUtils import MailUtils
from pandaserver.dataservice.Notifier import Notifier
from pandaserver.taskbuffer.JobSpec import JobSpec
from pandaserver.taskbuffer import JobUtils
from pandaserver.userinterface import Client

from pandaserver.dataservice.DDM import rucioAPI
from pandaserver.dataservice.DataServiceUtils import select_scope

from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper

# logger
_logger = PandaLogger().getLogger('EventPicker')


class EventPicker:
    # constructor
    def __init__(self, taskBuffer, siteMapper, evpFileName, ignoreError):
        self.taskBuffer = taskBuffer
        self.siteMapper = siteMapper
        self.ignoreError = ignoreError
        self.evpFileName = evpFileName
        self.token = datetime.datetime.utcnow().isoformat(' ')
        # logger
        self.logger = LogWrapper(_logger, self.token)
        self.pd2p = DynDataDistributer.DynDataDistributer([],
                                                          self.taskBuffer,
                                                          self.siteMapper,
Пример #8
0
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
from pandaserver.taskbuffer import EventServiceUtils
from pandaserver.taskbuffer import retryModule
from pandaserver.taskbuffer import JobUtils
import pandaserver.taskbuffer.ErrorCode
import pandaserver.dataservice.ErrorCode
from pandaserver.dataservice import Closer

try:
    long
except NameError:
    long = int

# logger
_logger = PandaLogger().getLogger('Adder')

panda_config.setupPlugin()


class AdderGen(object):
    # constructor
    def __init__(self,
                 taskBuffer,
                 jobID,
                 jobStatus,
                 attemptNr,
                 ignoreTmpError=True,
                 siteMapper=None,
                 pid=None,
                 prelock_pid=None,
Пример #9
0
import sys
import threading
import traceback
from pandaserver.configurator import aux
from pandaserver.configurator.aux import *
from datetime import datetime, timedelta

from pandaserver.config import panda_config
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandaserver.configurator import db_interface as dbif
from pandaserver.taskbuffer.TaskBuffer import taskBuffer

_logger = PandaLogger().getLogger('configurator')
_session = dbif.get_session()

# Definitions of roles
WRITE_LAN = 'write_lan'
READ_LAN = 'read_lan'
DEFAULT = 'default'


class Configurator(threading.Thread):

    def __init__(self):
        threading.Thread.__init__(self)

        if hasattr(panda_config, 'AGIS_URL_SITES'):
            self.AGIS_URL_SITES = panda_config.AGIS_URL_SITES
        else:
            self.AGIS_URL_SITES = 'http://atlas-agis-api.cern.ch/request/site/query/?json&vo_name=atlas&state=ACTIVE'
        _logger.debug('Getting site dump...')
Пример #10
0
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandajedi.jedicore.MsgWrapper import MsgWrapper

from pandajedi.jedicore.JediTaskBufferInterface import JediTaskBufferInterface

from pandajedi.jediddm.DDMInterface import DDMInterface

from pandajedi.jediorder.JobBroker import JobBroker
from pandajedi.jediorder.JobSplitter import JobSplitter
from pandajedi.jediorder.JobGenerator import JobGeneratorThread
from pandajedi.jedicore.ThreadUtils import ThreadPool
from pandajedi.jediorder.TaskSetupper import TaskSetupper

import sys

logger = PandaLogger().getLogger('JobGenerator')
tmpLog = MsgWrapper(logger)

tbIF = JediTaskBufferInterface()
tbIF.setupInterface()

siteMapper = tbIF.getSiteMapper()

ddmIF = DDMInterface()
ddmIF.setupInterface()

jediTaskID = int(sys.argv[1])

datasetIDs = None
if len(sys.argv) > 2:
    datasetIDs = [int(sys.argv[2])]
Пример #11
0
import sys
from threading import Lock

from pandaserver.config import panda_config

# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
_logger = PandaLogger().getLogger('Initializer')


# initialize cx_Oracle using dummy connection to avoid "Unable to acquire Oracle environment handle"
class Initializer:
    def __init__(self):
        self.lock = Lock()
        self.first = True

    def init(self):
        _logger.debug("init new=%s" % self.first)
        # do nothing when nDBConnection is 0
        if panda_config.nDBConnection == 0:
            return True
        # lock
        self.lock.acquire()
        if self.first:
            self.first = False
            try:
                _logger.debug("connect")
                # connect
                if panda_config.backend == 'oracle':
                    import cx_Oracle
                    conn = cx_Oracle.connect(dsn=panda_config.dbhost,
Пример #12
0
import hashlib

try:
    long
except NameError:
    long = int

import pandaserver.userinterface.Client as Client
from pandaserver.taskbuffer.JobSpec import JobSpec
from pandaserver.taskbuffer.FileSpec import FileSpec
from pandaserver.taskbuffer.TaskBuffer import taskBuffer
from pandaserver.config import panda_config
from pandaserver.test.testutils import sendCommand

from pandacommon.pandalogger.PandaLogger import PandaLogger
_logger = PandaLogger().getLogger('testJobFlowATLAS')


class JobFlowATLAS(object):
    """
    Class to test the different states of a job. It has functions to submit, get, 
    finish and get the status a job. Some of the functions include assert statements
    for nostests that check the output is as expected.
    """

    __datasetName = 'panda.destDB.%s' % uuid.uuid1()
    __destName = None
    __jobList = []

    __XMLTEMPLATE_BASE = """<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<!-- ATLAS file meta-data catalog -->
Пример #13
0
import datetime
import pandaserver.jobdispatcher.Protocol as Protocol
from pandaserver.taskbuffer import ErrorCode
from pandaserver.userinterface import Client
from pandaserver.config import panda_config

from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper

try:
    long
except NameError:
    long = int

# logger
_logger = PandaLogger().getLogger('Utils')


# check if server is alive
def isAlive(req):
    return "alive=yes"


# extract name from DN
def cleanUserID(id):
    try:
        up = re.compile('/(DC|O|OU|C|L)=[^\/]+')
        username = up.sub('', id)
        up2 = re.compile('/CN=[0-9]+')
        username = up2.sub('', username)
        up3 = re.compile(' [0-9]+')
Пример #14
0
from pandacommon.pandautils import PandaUtils
from pandacommon.pandalogger.LogWrapper import LogWrapper
from pandaserver.srvcore.CoreUtils import commands_get_status_output

from pandaserver.taskbuffer.TaskBufferInterface import TaskBufferInterface

try:
    long
except NameError:
    long = int

# password
from pandaserver.config import panda_config

# logger
_logger = PandaLogger().getLogger('add')

tmpLog = LogWrapper(_logger, None)

tmpLog.debug("===================== start =====================")

# overall timeout value
overallTimeout = 20

# grace period
try:
    gracePeriod = int(sys.argv[1])
except Exception:
    gracePeriod = 3

# current minute
Пример #15
0
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandajedi.jedicore.MsgWrapper import MsgWrapper

from pandajedi.jedicore.JediTaskBufferInterface import JediTaskBufferInterface
from pandajedi.jedirefine import RefinerUtils
from pandajedi.jediorder.TaskBroker import TaskBroker

from pandajedi.jediddm.DDMInterface import DDMInterface

import sys

logger = PandaLogger().getLogger('TaskBroker')
tmpLog = MsgWrapper(logger)

tbIF = JediTaskBufferInterface()
tbIF.setupInterface()

siteMapper = tbIF.getSiteMapper()

ddmIF = DDMInterface()
ddmIF.setupInterface()

jediTaskID = int(sys.argv[1])

s, taskSpec = tbIF.getTaskWithID_JEDI(jediTaskID, False)
body = TaskBroker(None, tbIF, ddmIF, taskSpec.vo, taskSpec.prodSourceLabel)
body.initializeMods(tbIF, ddmIF)

taskParam = tbIF.getTaskParamsWithID_JEDI(jediTaskID)
taskParamMap = RefinerUtils.decodeJSON(taskParam)
Пример #16
0
import time
import datetime
import multiprocessing
import traceback

from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
from pandacommon.pandautils.thread_utils import GenericThread, WeightedLists
from pandaserver.config import panda_config
from pandaserver.brokerage.SiteMapper import SiteMapper
from pandaserver.taskbuffer.TaskBuffer import TaskBuffer
from pandaserver.taskbuffer.TaskBufferInterface import TaskBufferInterface
from pandaserver.dataservice.AdderGen import AdderGen

# logger
_logger = PandaLogger().getLogger('add_main')


# main
def main(argv=tuple(), tbuf=None, **kwargs):

    try:
        long
    except NameError:
        long = int

    prelock_pid = GenericThread().get_pid()
    tmpLog = LogWrapper(_logger, "<pid={}>".format(prelock_pid))

    tmpLog.debug("===================== start =====================")
Пример #17
0
from pandaserver.taskbuffer import EventServiceUtils
import pandaserver.taskbuffer.ErrorCode
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandaserver.brokerage.SiteMapper import SiteMapper
from pandaserver.dataservice.Finisher import Finisher
from pandaserver.dataservice import DataServiceUtils
from pandaserver.dataservice.DataServiceUtils import select_scope
from pandaserver.dataservice.Closer import Closer
import pandaserver.taskbuffer.ErrorCode
from pandaserver.srvcore.CoreUtils import commands_get_status_output

# password
from pandaserver.config import panda_config

# logger
_logger = PandaLogger().getLogger('datasetManager')

_logger.debug("===================== start =====================")

# memory checker
def _memoryCheck(str):
    try:
        proc_status = '/proc/%d/status' % os.getpid()
        procfile = open(proc_status)
        name   = ""
        vmSize = ""
        vmRSS  = ""
        # extract Name,VmSize,VmRSS
        for line in procfile:
            if line.startswith("Name:"):
                name = line.split()[-1]
Пример #18
0
from pandaserver.taskbuffer import OraDBProxy as DBProxy

from pandaserver.config import panda_config
from pandaserver.taskbuffer.JobSpec import JobSpec
from pandaserver.taskbuffer.FileSpec import FileSpec
from pandaserver.taskbuffer.DatasetSpec import DatasetSpec
from pandacommon.pandalogger.PandaLogger import PandaLogger

try:
    long
except NameError:
    long = int

# logger
_logger = PandaLogger().getLogger('ConBridge')


# exception for normal termination
class HarmlessEx(Exception):
    pass


# terminate child process by itself when master has gone
class Terminator(threading.Thread):

    # constructor
    def __init__(self, consock):
        threading.Thread.__init__(self)
        self.consock = consock
Пример #19
0
"""
proxy for database connection

"""
import re

import warnings
from pandaserver.config import panda_config
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
from pandaserver.taskbuffer import OraDBProxy

warnings.filterwarnings('ignore')

# logger
_logger = PandaLogger().getLogger('EiDBProxy')


# proxy
class EiDBProxy(OraDBProxy.DBProxy):

    # constructor
    def __init__(self, useOtherError=False):
        OraDBProxy.DBProxy.__init__(self, useOtherError)

    # connect to DB (just for INTR)
    def connect(self,
                dbhost=panda_config.ei_dbhost,
                dbpasswd=panda_config.ei_dbpasswd,
                dbuser=panda_config.ei_dbuser,
                dbname=panda_config.ei_dbname,
Пример #20
0
from pandaserver.taskbuffer.OraDBProxy import DBProxy
import socket
from pandaserver.config import panda_config
import time
try:
    from urlparse import parse_qs
except ImportError:
    from urllib.parse import parse_qs

from pandacommon.pandalogger.PandaLogger import PandaLogger
_logger = PandaLogger().getLogger('testGetCriteriaGlobalShares')
from testutils import sendCommand

def retrieveJob(site):
    function = "getJob"
    node = {}
    node['siteName'] = site
    node['mem'] = 1000
    node['node'] = socket.getfqdn()

    data = sendCommand(function, node, _logger)
    jobD = parse_qs(data)  # jobD indicates it's a job in dictionary format, not a JobSpec object
    return jobD


if __name__ == "__main__":
    proxyS = DBProxy()
    proxyS.connect(panda_config.dbhost,panda_config.dbpasswd,panda_config.dbuser,panda_config.dbname)

    #proxyS.getCriteriaForGlobalShares('BNL-OSG')
Пример #21
0
'''
email utilities
'''

import sys
import smtplib

from pandaserver.config import panda_config
from pandacommon.pandalogger.PandaLogger import PandaLogger

# logger
_logger = PandaLogger().getLogger('MailUtils')


# wrapper to patch smtplib.stderr to send debug info to logger
class StderrLogger(object):
    def __init__(self, tmpLog):
        self.tmpLog = tmpLog

    def write(self, message):
        message = message.strip()
        if message != '':
            self.tmpLog.debug(message)


# wrapper of SMTP to redirect messages
class MySMTP(smtplib.SMTP):
    def set_log(self, tmp_log):
        self.tmpLog = tmp_log
        try:
            self.org_stderr = getattr(smtplib, 'stderr')
Пример #22
0
import re
import sys
import datetime
import traceback
from pandaserver.taskbuffer.TaskBuffer import taskBuffer
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
from pandaserver.brokerage.SiteMapper import SiteMapper

# password
from pandaserver.config import panda_config
passwd = panda_config.dbpasswd

# logger
_logger = PandaLogger().getLogger('prioryMassage')
tmpLog = LogWrapper(_logger)

tmpLog.debug("================= start ==================")

# instantiate TB
taskBuffer.init(panda_config.dbhost, panda_config.dbpasswd, nDBConnection=1)

# instantiate sitemapper
siteMapper = SiteMapper(taskBuffer)

# get usage breakdown
usageBreakDownPerUser = {}
usageBreakDownPerSite = {}
workingGroupList = []
for table in ['ATLAS_PANDA.jobsActive4', 'ATLAS_PANDA.jobsArchived4']:
    varMap = {}
Пример #23
0
from pandaserver.test.testutils import sendCommand

from pandacommon.pandalogger.PandaLogger import PandaLogger
_logger = PandaLogger().getLogger('testUpdateWorkerPilotStatus')

node = {}
node['workerID'] = 9139456
node['harvesterID'] = 'CERN_central_k8s'
node['status'] = 'started'
node['site'] = 'CERN'

function = "updateWorkerPilotStatus"
data = sendCommand(function, node, _logger)
_logger.debug(data)


Пример #24
0
 def __init__(self):
     self._logger = PandaLogger().getLogger('PilotStreaming')
     return
Пример #25
0
setup dataset

'''

import sys
import uuid
import traceback
import threading

from pandaserver.config import panda_config
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
from pandaserver.taskbuffer import EventServiceUtils

# logger
_logger = PandaLogger().getLogger('Setupper')

panda_config.setupPlugin()


# main class
class Setupper(threading.Thread):
    # constructor
    def __init__(self,
                 taskBuffer,
                 jobs,
                 resubmit=False,
                 pandaDDM=False,
                 ddmAttempt=0,
                 forkRun=False,
                 onlyTA=False,
Пример #26
0
'''
activate job

'''

from pandacommon.pandalogger.PandaLogger import PandaLogger

# logger
_logger = PandaLogger().getLogger('Activator')


class Activator:
    # constructor
    def __init__(self, taskBuffer, dataset, enforce=False):
        self.dataset = dataset
        self.taskBuffer = taskBuffer
        self.enforce = enforce

    # to keep backward compatibility
    def start(self):
        self.run()

    def join(self):
        pass

    # main
    def run(self):
        _logger.debug("start: %s" % self.dataset.name)
        if self.dataset.status in ['completed', 'deleting', 'deleted'
                                   ] and not self.enforce:
            _logger.debug("   skip: %s" % self.dataset.name)
Пример #27
0
class PilotStreaming:
    def __init__(self):
        self._logger = PandaLogger().getLogger('PilotStreaming')
        return

    def run(self):
        """
        Gets and iterates over ups queues, deciding the job requirements and sending these to Harvester
        via the command interface
        :return:
        """

        # timing
        time_start = time.time()
        self._logger.debug('Start.')

        # get unified pilot streaming (ups) queues
        ups_queues = taskBuffer.ups_get_queues()
        self._logger.debug('UPS queues: {0}'.format(ups_queues))

        # get worker stats
        worker_stats = taskBuffer.ups_load_worker_stats_legacy()

        for ups_queue in ups_queues:
            # get the worker and job stats for the queue
            try:
                tmp_worker_stats = worker_stats[ups_queue]
                self._logger.debug('worker_stats for queue {0}: {1}'.format(
                    ups_queue, tmp_worker_stats))
                # tmp_job_stats = job_stats[ups_queue]
            except KeyError:
                # skip queue if no data available
                self._logger.debug(
                    'No worker stats for queue {0}'.format(ups_queue))
                continue

            new_workers_per_harvester = taskBuffer.ups_new_worker_distribution_legacy(
                ups_queue, tmp_worker_stats)
            self._logger.info('queue: {0}, results: {1}'.format(
                ups_queue, new_workers_per_harvester))

            # variables for the harvester command
            command = '{0}:{1}'.format('SET_N_WORKERS', ups_queue)
            status = 'new'
            ack_requested = False
            lock_interval = None
            com_interval = None

            for harvester_id in new_workers_per_harvester:
                params = new_workers_per_harvester[harvester_id]
                taskBuffer.commandToHarvester(harvester_id, command,
                                              ack_requested, status,
                                              lock_interval, com_interval,
                                              params)

        # timing
        time_stop = time.time()
        self._logger.debug(
            'Done. Pilot streaming took: {0} s'.format(time_stop - time_start))

        return
Пример #28
0
 def run(self, taskBuffer, aSiteMapper, holdingAna):
     # import
     from pandaserver.dataservice.AdderGen import AdderGen
     # get logger
     _logger = PandaLogger().getLogger('add_process')
     # get file list
     timeNow = datetime.datetime.utcnow()
     timeInt = datetime.datetime.utcnow()
     dirName = panda_config.logdir
     fileList = os.listdir(dirName)
     fileList.sort()
     # remove duplicated files
     tmpList = []
     uMap = {}
     for file in fileList:
         match = re.search('^(\d+)_([^_]+)_.{36}(_\d+)*$', file)
         if match is not None:
             fileName = '%s/%s' % (dirName, file)
             id = match.group(1)
             jobStatus = match.group(2)
             if id in uMap:
                 try:
                     os.remove(fileName)
                 except Exception:
                     pass
             else:
                 if jobStatus != EventServiceUtils.esRegStatus:
                     uMap[id] = fileName
                 if long(id) in holdingAna:
                     # give a priority to buildJobs
                     tmpList.insert(0, file)
                 else:
                     tmpList.append(file)
     nFixed = 50
     randTmp = tmpList[nFixed:]
     random.shuffle(randTmp)
     fileList = tmpList[:nFixed] + randTmp
     # add
     while len(fileList) != 0:
         # time limit to avoid too many copyArchive running at the same time
         if (datetime.datetime.utcnow() -
                 timeNow) > datetime.timedelta(minutes=overallTimeout):
             tmpLog.debug("time over in Adder session")
             break
         # get fileList
         if (datetime.datetime.utcnow() -
                 timeInt) > datetime.timedelta(minutes=15):
             timeInt = datetime.datetime.utcnow()
             # get file
             fileList = os.listdir(dirName)
             fileList.sort()
             # remove duplicated files
             tmpList = []
             uMap = {}
             for file in fileList:
                 match = re.search('^(\d+)_([^_]+)_.{36}(_\d+)*$', file)
                 if match is not None:
                     fileName = '%s/%s' % (dirName, file)
                     id = match.group(1)
                     jobStatus = match.group(2)
                     if id in uMap:
                         try:
                             os.remove(fileName)
                         except Exception:
                             pass
                     else:
                         if jobStatus != EventServiceUtils.esRegStatus:
                             uMap[id] = fileName
                         if long(id) in holdingAna:
                             # give a priority to buildJob
                             tmpList.insert(0, file)
                         else:
                             tmpList.append(file)
             fileList = tmpList
         # check if
         if PandaUtils.isLogRotating(5, 5):
             tmpLog.debug("terminate since close to log-rotate time")
             break
         # choose a file
         file = fileList.pop(0)
         # check format
         match = re.search('^(\d+)_([^_]+)_.{36}(_\d+)*$', file)
         if match is not None:
             fileName = '%s/%s' % (dirName, file)
             if not os.path.exists(fileName):
                 continue
             try:
                 modTime = datetime.datetime(
                     *(time.gmtime(os.path.getmtime(fileName))[:7]))
                 thr = None
                 if (timeNow - modTime) > datetime.timedelta(hours=24):
                     # last chance
                     tmpLog.debug("Last Add File {0} : {1}".format(
                         os.getpid(), fileName))
                     thr = AdderGen(taskBuffer,
                                    match.group(1),
                                    match.group(2),
                                    fileName,
                                    ignoreTmpError=False,
                                    siteMapper=aSiteMapper)
                 elif (timeInt -
                       modTime) > datetime.timedelta(minutes=gracePeriod):
                     # add
                     tmpLog.debug("Add File {0} : {1}".format(
                         os.getpid(), fileName))
                     thr = AdderGen(taskBuffer,
                                    match.group(1),
                                    match.group(2),
                                    fileName,
                                    ignoreTmpError=True,
                                    siteMapper=aSiteMapper)
                 if thr is not None:
                     thr.run()
             except Exception:
                 type, value, traceBack = sys.exc_info()
                 tmpLog.error("%s %s" % (type, value))
Пример #29
0
try:
    from Queue import Queue
except ImportError:
    from queue import Queue
from pandaserver.taskbuffer import OraDBProxy as DBProxy
import os
import time
import random
from threading import Lock
from pandaserver.config import panda_config
from pandaserver.taskbuffer.ConBridge import ConBridge
from pandacommon.pandalogger.PandaLogger import PandaLogger

# logger
_logger = PandaLogger().getLogger('DBProxyPool')


class DBProxyPool:
    def __init__(self,
                 dbhost,
                 dbpasswd,
                 nConnection,
                 useTimeout=False,
                 dbProxyClass=None):
        # crate lock for callers
        self.lock = Lock()
        self.callers = []
        # create Proxies
        _logger.debug("init")
        self.proxyList = Queue(nConnection)
Пример #30
0
import re
import os
import warnings
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandaserver.config import panda_config

try:
    long
except NameError:
    long = int

warnings.filterwarnings('ignore')

# logger
_logger = PandaLogger().getLogger('WrappedCursor')


# convert SQL and parameters in_printf format
def convert_query_in_printf_format(sql, var_dict, sql_conv_map):
    if sql in sql_conv_map:
        sql = sql_conv_map[sql]
    else:
        old_sql = sql
        # %
        sql = re.sub(r'%', r'%%', sql)
        # current date except for being used for interval
        if re.search(r'CURRENT_DATE\s*[\+-]', sql,
                     flags=re.IGNORECASE) is None:
            sql = re.sub(r'CURRENT_DATE',
                         r'CURRENT_TIMESTAMP',
Пример #31
0
"""
provide web service for DDM

"""

import sys
from pandaserver.taskbuffer.WrappedPickle import WrappedPickle
from pandacommon.pandalogger.PandaLogger import PandaLogger

# logger
_logger = PandaLogger().getLogger('DataService')


class DataService:
    # constructor
    def __init__(self):
        self.taskBuffer = None

    # set taskbuffer
    def init(self, taskBuffer):
        self.taskBuffer = taskBuffer


# Singleton
dataService = DataService()
del DataService
'''
web interface

'''
Пример #32
0
import sys
import time
import datetime

from pandajedi.jedicore.ThreadUtils import ListWithLock, ThreadPool, WorkerThread
from pandajedi.jedicore import Interaction
from pandajedi.jedicore.MsgWrapper import MsgWrapper
from pandajedi.jedicore.FactoryBase import FactoryBase
from .JediKnight import JediKnight

from pandajedi.jediconfig import jedi_config

# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger

logger = PandaLogger().getLogger(__name__.split('.')[-1])


# worker class to refine TASK_PARAM to fill JEDI tables
class TaskBroker(JediKnight, FactoryBase):

    # constructor
    def __init__(self, commuChannel, taskBufferIF, ddmIF, vos,
                 prodSourceLabels):
        self.vos = self.parseInit(vos)
        self.prodSourceLabels = self.parseInit(prodSourceLabels)
        JediKnight.__init__(self, commuChannel, taskBufferIF, ddmIF, logger)
        FactoryBase.__init__(self, self.vos, self.prodSourceLabels, logger,
                             jedi_config.taskbroker.modConfig)

    # main
Пример #33
0
import re
import sys
import glob
import time
import os.path
import datetime
import threading
from pandaserver.config import panda_config
from pandaserver.taskbuffer.TaskBuffer import taskBuffer
from pandaserver.brokerage import SiteMapper
from pandaserver.dataservice.EventPicker import EventPicker
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandaserver.srvcore.CoreUtils import commands_get_status_output

# logger
_logger = PandaLogger().getLogger('evpPD2P')

_logger.debug("===================== start =====================")

# overall timeout value
overallTimeout = 300
# prefix of evp files
prefixEVP = 'evp.'
# file pattern of evp files
evpFilePatt = panda_config.cache_dir + '/' + prefixEVP + '*'

# kill old process
try:
    # time limit
    timeLimit = datetime.datetime.utcnow() - datetime.timedelta(minutes=overallTimeout)
    # get process list