Exemplo n.º 1
0
 def renew_credential(self):
     # make logger
     mainLog = self.make_logger(_logger, method_name='renew_credential')
     # make communication channel to PanDA
     com = CommunicatorPool()
     proxy, msg = com.get_proxy(self.voms,
                                (self.inCertFile, self.inCertFile))
     if proxy is not None:
         pFile = open(self.outCertFile, 'w')
         pFile.write(proxy)
         pFile.close()
     else:
         mainLog.error(
             'failed to renew credential with a server message : {0}'.
             format(msg))
     return proxy is not None, msg
Exemplo n.º 2
0
 def __init__(self, single_mode=False, stop_event=None, daemon_mode=True):
     # initialize database and config
     self.singleMode = single_mode
     self.stopEvent = stop_event
     self.daemonMode = daemon_mode
     from pandaharvester.harvestercore.communicator_pool import CommunicatorPool
     self.communicatorPool = CommunicatorPool()
     from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper
     self.queueConfigMapper = QueueConfigMapper()
     from pandaharvester.harvestercore.db_proxy_pool import DBProxyPool as DBProxy
     dbProxy = DBProxy()
     dbProxy.make_tables(self.queueConfigMapper)
Exemplo n.º 3
0
            print('value for jobType not valid, defaulted to {0}'.format(
                jobType))

        # resourceType should be 'SCORE', 'SCORE_HIMEM', 'MCORE', 'MCORE_HIMEM'. If not specified defaults to single core
        if sys.argv[3] in ('SCORE', 'SCORE_HIMEM', 'MCORE', 'MCORE_HIMEM'):
            resourceType = sys.argv[3]
        else:
            print('value for resourceType not valid, defaulted to {0}'.format(
                resourceType))

    print('Running with queueName:{0}, jobType:{1}, resourceType:{2}'.format(
        queueName, jobType, resourceType))

    pluginFactory = PluginFactory()

    com = CommunicatorPool()

    # get job
    jobSpecList = []
    if queueConfig.mapType != WorkSpec.MT_NoJob:
        jobs, errStr = com.get_jobs(queueConfig.queueName, 'nodeName',
                                    queueConfig.prodSourceLabel,
                                    'computingElement', 1, None)
        if len(jobs) == 0:
            print("Failed to get jobs at {0} due to {1}".format(
                queueConfig.queueName, errStr))
            sys.exit(0)

        jobSpec = JobSpec()
        jobSpec.convert_job_json(jobs[0])
Exemplo n.º 4
0
        stdoutHandler.setFormatter(loggerObj.handlers[0].formatter)
        loggerObj.addHandler(stdoutHandler)

msgStr = "plugin={0}".format(preparatorCore.__class__.__name__)
tmpLog.debug(msgStr)
msgStr = "Initial queueConfig.preparator = {}".format(
    initial_queueConfig_preparator)
tmpLog.debug(msgStr)
msgStr = "Modified queueConfig.preparator = {}".format(
    modified_queueConfig_preparator)
tmpLog.debug(msgStr)

scope = 'panda'

proxy = DBProxy()
communicator = CommunicatorPool()
cacher = Cacher(communicator, single_mode=True)
cacher.run()

tmpLog.debug("plugin={0}".format(preparatorCore.__class__.__name__))
tmpLog.debug("BasePath from preparator configuration: %s " %
             preparatorCore.basePath)

# get all jobs in table in a preparing substate
tmpLog.debug('try to get all jobs in a preparing substate')
jobSpec_list = proxy.get_jobs_in_sub_status('preparing', 2000, None, None,
                                            None, None, None, None)
tmpLog.debug('got {0} jobs'.format(len(jobSpec_list)))
# loop over all found jobs
if len(jobSpec_list) > 0:
    for jobSpec in jobSpec_list:
Exemplo n.º 5
0
    os.remove(harvester_config.db.database_filename)
except Exception:
    pass

for loggerName, loggerObj in iteritems(logging.Logger.manager.loggerDict):
    if loggerName.startswith('panda.log'):
        if len(loggerObj.handlers) == 0:
            continue
        if loggerName.split('.')[-1] in ['db_proxy']:
            continue
        stdoutHandler = logging.StreamHandler(sys.stdout)
        stdoutHandler.setFormatter(loggerObj.handlers[0].formatter)
        loggerObj.addHandler(stdoutHandler)

queueConfigMapper = QueueConfigMapper()

proxy = DBProxy()
proxy.make_tables(queueConfigMapper)

job = JobSpec()
job.PandaID = 1

job.modificationTime = datetime.datetime.now()
proxy.insert_jobs([job])

newJob = proxy.get_job(1)

a = CommunicatorPool()
a.get_jobs('siteName', 'nodeName', 'prodSourceLabel', 'computingElement', 1,
           {})
Exemplo n.º 6
0
def cacher_refresh(arguments):
    from pandaharvester.harvestercore.communicator_pool import CommunicatorPool
    from pandaharvester.harvesterbody.cacher import Cacher
    communicatorPool = CommunicatorPool()
    cacher = Cacher(communicatorPool)
    cacher.execute(force_update=True, skip_lock=True, n_thread=4)
Exemplo n.º 7
0
import os
import sys

from pandaharvester.harvestercore.communicator_pool import CommunicatorPool


pandaid = int(sys.argv[1])
jobsetid = int(sys.argv[2])
taskid = int(sys.argv[3])

try:
    n = int(sys.argv[4])
except Exception:
    n = 1

data = {pandaid: {'pandaID': pandaid,
                  'taskID': taskid,
                  'jobsetID': jobsetid,
                  'nRanges': n}
        }

a = CommunicatorPool()
o = a.get_event_ranges(data, False, os.getcwd())
Exemplo n.º 8
0
except Exception:
    pass

for loggerName, loggerObj in iteritems(logging.Logger.manager.loggerDict):
    if loggerName.startswith('panda.log'):
        if len(loggerObj.handlers) == 0:
            continue
        if loggerName.split('.')[-1] in ['db_proxy']:
            continue
        stdoutHandler = logging.StreamHandler(sys.stdout)
        stdoutHandler.setFormatter(loggerObj.handlers[0].formatter)
        loggerObj.addHandler(stdoutHandler)

queueConfigMapper = QueueConfigMapper()

proxy = DBProxy()
proxy.make_tables(queueConfigMapper)

job = JobSpec()
job.PandaID = 1


job.modificationTime = datetime.datetime.now()
proxy.insert_jobs([job])

newJob = proxy.get_job(1)


a = CommunicatorPool()
a.get_jobs('siteName', 'nodeName', 'prodSourceLabel', 'computingElement', 1, {})
Exemplo n.º 9
0
import sys

from pandaharvester.harvestercore.db_proxy_pool import DBProxyPool as DBProxy
from pandaharvester.harvestercore.communicator_pool import CommunicatorPool
from pandaharvester.harvestermessenger import shared_file_messenger

workerID = int(sys.argv[1])

proxy = DBProxy()
workSpec = proxy.get_worker_with_id(workerID)
jobSpec = proxy.get_jobs_with_worker_id(workerID, None)[0]

accessPoint = workSpec.get_access_point()

try:
    os.makedirs(accessPoint)
except:
    pass

node = {}
node['pandaID'] = jobSpec.PandaID
node['jobsetID'] = jobSpec.jobParams['jobsetID']
node['taskID'] = jobSpec.taskID


a = CommunicatorPool()
tmpStat, tmpVal = a.getEventRanges(node)

mess = shared_file_messenger.SharedFileMessenger()
mess.feed_events(workSpec, tmpVal)
Exemplo n.º 10
0
import sys

from pandaharvester.harvestercore.communicator_pool import CommunicatorPool


pandaid = int(sys.argv[1])
jobsetid = int(sys.argv[2])
taskid = int(sys.argv[3])

try:
    n = int(sys.argv[4])
except Exception:
    n = 1

data = {pandaid: {'pandaID': pandaid,
                  'taskID': taskid,
                  'jobsetID': jobsetid,
                  'nRanges': n}
        }

a = CommunicatorPool()
o = a.get_event_ranges(data)
Exemplo n.º 11
0
import sys

from pandaharvester.harvestercore.communicator_pool import CommunicatorPool

pandaid = int(sys.argv[1])
jobsetid = int(sys.argv[2])
taskid = int(sys.argv[3])

try:
    n = int(sys.argv[4])
except Exception:
    n = 1

data = {
    pandaid: {
        'pandaID': pandaid,
        'taskID': taskid,
        'jobsetID': jobsetid,
        'nRanges': n
    }
}

a = CommunicatorPool()
o = a.get_event_ranges(data)
Exemplo n.º 12
0
from pandaharvester.harvestercore.communicator_pool import CommunicatorPool
from pandaharvester.harvestercore.job_spec import JobSpec
from pandaharvester.harvestermisc import signal_utils

fork_child_pid = os.fork()
if fork_child_pid != 0:
    signal_utils.set_suicide_handler(None)
    os.wait()
else:
    queueName = sys.argv[1]
    queueConfigMapper = QueueConfigMapper()
    queueConfig = queueConfigMapper.get_queue(queueName)

    pluginFactory = PluginFactory()

    com = CommunicatorPool()

    # get job
    jobSpecList = []
    if queueConfig.mapType != WorkSpec.MT_NoJob:
        jobs, errStr = com.get_jobs(queueConfig.queueName, 'nodeName', queueConfig.prodSourceLabel,
                                    'computingElement', 1, None)
        if len(jobs) == 0:
            print ("Failed to get jobs at {0} due to {1}".format(queueConfig.queueName, errStr))
            sys.exit(0)

        jobSpec = JobSpec()
        jobSpec.convert_job_json(jobs[0])

        # set input file paths
        inFiles = jobSpec.get_input_file_attributes()
import sys
import uuid
from pandaharvester.harvestercore.job_spec import JobSpec
from pandaharvester.harvestercore.file_spec import FileSpec
from pandaharvester.harvestercore.event_spec import EventSpec

from pandaharvester.harvestercore.communicator_pool import CommunicatorPool

rID = sys.argv[1]
taskid = rID.split('-')[0]
pandaid = long(rID.split('-')[1])

job = JobSpec()
job.PandaID = pandaid
event = EventSpec()
file = FileSpec()
file.status = 'finished'
file.objstoreID = 9575
file.pathConvention = 1000
file.lfn = str(uuid.uuid4().hex) + '.zip'
file.fsize = 555
file.chksum = '0d2a9dc9'
event.eventRangeID = rID
event.eventStatus = 'finished'
job.zipEventMap = {1: {'events':[event],
                       'zip':file}}


a = CommunicatorPool()
a.update_jobs([job])
Exemplo n.º 14
0
import sys
import logging
from future.utils import iteritems

from pandaharvester.harvestercore.communicator_pool import CommunicatorPool

for loggerName, loggerObj in iteritems(logging.Logger.manager.loggerDict):
    if loggerName.startswith('panda.log'):
        if len(loggerObj.handlers) == 0:
            continue
        if loggerName.split('.')[-1] in ['db_proxy']:
            continue
        stdoutHandler = logging.StreamHandler(sys.stdout)
        stdoutHandler.setFormatter(loggerObj.handlers[0].formatter)
        loggerObj.addHandler(stdoutHandler)

a = CommunicatorPool()
return_object = a.check_panda()
print (return_object)