loggerObj.addHandler(stdoutHandler) msgStr = "plugin={0}".format(preparatorCore.__class__.__name__) tmpLog.debug(msgStr) msgStr = "Initial queueConfig.preparator = {}".format( initial_queueConfig_preparator) tmpLog.debug(msgStr) msgStr = "Modified queueConfig.preparator = {}".format( modified_queueConfig_preparator) tmpLog.debug(msgStr) scope = 'panda' proxy = DBProxy() communicator = CommunicatorPool() cacher = Cacher(communicator, single_mode=True) cacher.run() tmpLog.debug("plugin={0}".format(preparatorCore.__class__.__name__)) tmpLog.debug("BasePath from preparator configuration: %s " % preparatorCore.basePath) # get all jobs in table in a preparing substate tmpLog.debug('try to get all jobs in a preparing substate') jobSpec_list = proxy.get_jobs_in_sub_status('preparing', 2000, None, None, None, None, None, None) tmpLog.debug('got {0} jobs'.format(len(jobSpec_list))) # loop over all found jobs if len(jobSpec_list) > 0: for jobSpec in jobSpec_list: tmpLog.debug(' PandaID = %d status = %s subStatus = %s lockedBy = %s' %
stdoutHandler = logging.StreamHandler(sys.stdout) stdoutHandler.setFormatter(loggerObj.handlers[0].formatter) loggerObj.addHandler(stdoutHandler) msgStr = "plugin={0}".format(stagerCore.__class__.__name__) tmpLog.debug(msgStr) msgStr = "Initial queueConfig.stager = {}".format(initial_queueConfig_stager) tmpLog.debug(msgStr) msgStr = "Modified queueConfig.stager = {}".format(modified_queueConfig_stager) tmpLog.debug(msgStr) scope = 'panda' proxy = DBProxy() communicator = CommunicatorPool() cacher = Cacher(communicator, single_mode=True) cacher.run() # check if db lock exits locked = stagerCore.dbInterface.get_object_lock('dummy_id_for_out_0',lock_interval=120) if not locked: tmpLog.debug('DB Already locked by another thread') # now unlock db unlocked = stagerCore.dbInterface.release_object_lock('dummy_id_for_out_0') if unlocked : tmpLog.debug('unlocked db') else: tmpLog.debug(' Could not unlock db')
def start(self): # thread list thrList = [] # Credential Manager from pandaharvester.harvesterbody.cred_manager import CredManager thr = CredManager(single_mode=self.singleMode) thr.set_stop_event(self.stopEvent) thr.execute() thr.start() thrList.append(thr) # Command manager from pandaharvester.harvesterbody.command_manager import CommandManager thr = CommandManager(self.communicatorPool, self.queueConfigMapper, single_mode=self.singleMode) thr.set_stop_event(self.stopEvent) thr.start() thrList.append(thr) # Cacher from pandaharvester.harvesterbody.cacher import Cacher thr = Cacher(self.communicatorPool, single_mode=self.singleMode) thr.set_stop_event(self.stopEvent) thr.execute(force_update=True, skip_lock=True) thr.start() thrList.append(thr) # Watcher from pandaharvester.harvesterbody.watcher import Watcher thr = Watcher(single_mode=self.singleMode) thr.set_stop_event(self.stopEvent) thr.start() thrList.append(thr) # Job Fetcher from pandaharvester.harvesterbody.job_fetcher import JobFetcher nThr = harvester_config.jobfetcher.nThreads for iThr in range(nThr): thr = JobFetcher(self.communicatorPool, self.queueConfigMapper, single_mode=self.singleMode) thr.set_stop_event(self.stopEvent) thr.start() thrList.append(thr) # Propagator from pandaharvester.harvesterbody.propagator import Propagator nThr = harvester_config.propagator.nThreads for iThr in range(nThr): thr = Propagator(self.communicatorPool, self.queueConfigMapper, single_mode=self.singleMode) thr.set_stop_event(self.stopEvent) thr.start() thrList.append(thr) # Monitor from pandaharvester.harvesterbody.monitor import Monitor nThr = harvester_config.monitor.nThreads for iThr in range(nThr): thr = Monitor(self.queueConfigMapper, single_mode=self.singleMode) thr.set_stop_event(self.stopEvent) thr.start() thrList.append(thr) # Preparator from pandaharvester.harvesterbody.preparator import Preparator nThr = harvester_config.preparator.nThreads for iThr in range(nThr): thr = Preparator(self.communicatorPool, self.queueConfigMapper, single_mode=self.singleMode) thr.set_stop_event(self.stopEvent) thr.start() thrList.append(thr) # Submitter from pandaharvester.harvesterbody.submitter import Submitter nThr = harvester_config.submitter.nThreads for iThr in range(nThr): thr = Submitter(self.queueConfigMapper, single_mode=self.singleMode) thr.set_stop_event(self.stopEvent) thr.start() thrList.append(thr) # Stager from pandaharvester.harvesterbody.stager import Stager nThr = harvester_config.stager.nThreads for iThr in range(nThr): thr = Stager(self.queueConfigMapper, single_mode=self.singleMode) thr.set_stop_event(self.stopEvent) thr.start() thrList.append(thr) # EventFeeder from pandaharvester.harvesterbody.event_feeder import EventFeeder nThr = harvester_config.eventfeeder.nThreads for iThr in range(nThr): thr = EventFeeder(self.communicatorPool, self.queueConfigMapper, single_mode=self.singleMode) thr.set_stop_event(self.stopEvent) thr.start() thrList.append(thr) # Sweeper from pandaharvester.harvesterbody.sweeper import Sweeper nThr = harvester_config.sweeper.nThreads for iThr in range(nThr): thr = Sweeper(self.queueConfigMapper, single_mode=self.singleMode) thr.set_stop_event(self.stopEvent) thr.start() thrList.append(thr) ################## # loop on stop event to be interruptable since thr.join blocks signal capture in python 2.7 while True: if self.singleMode or not self.daemonMode: break self.stopEvent.wait(1) if self.stopEvent.is_set(): break ################## # join if self.daemonMode: for thr in thrList: thr.join()
def cacher_refresh(arguments): from pandaharvester.harvestercore.communicator_pool import CommunicatorPool from pandaharvester.harvesterbody.cacher import Cacher communicatorPool = CommunicatorPool() cacher = Cacher(communicatorPool) cacher.execute(force_update=True, skip_lock=True, n_thread=4)
import sys import logging from future.utils import iteritems from pandaharvester.harvesterbody.cacher import Cacher from pandaharvester.harvestercore.db_proxy_pool import DBProxyPool as DBProxy from pandaharvester.harvestercore.communicator_pool import CommunicatorPool for loggerName, loggerObj in iteritems(logging.Logger.manager.loggerDict): if loggerName.startswith('panda.log'): if len(loggerObj.handlers) == 0: continue if loggerName.split('.')[-1] not in ['cacher']: continue stdoutHandler = logging.StreamHandler(sys.stdout) stdoutHandler.setFormatter(loggerObj.handlers[0].formatter) loggerObj.addHandler(stdoutHandler) proxy = DBProxy() communicator = CommunicatorPool() cacher = Cacher(communicator, single_mode=True) cacher.execute(force_update=True, skip_lock=True)