cacher.run() tmpLog.debug("plugin={0}".format(preparatorCore.__class__.__name__)) tmpLog.debug("BasePath from preparator configuration: %s " % preparatorCore.basePath) # get all jobs in table in a preparing substate #tmpLog.debug('try to get all jobs in a preparing substate') #jobSpec_list = proxy.get_jobs_in_sub_status('preparing',2000,None,None,None,None,None,None) # get all jobs if job_id > 0: tmpLog.debug('try to get job ID - {}'.format(job_id)) jobSpec_list = [proxy.get_job(job_id)] else: tmpLog.debug('try to get all jobs') jobSpec_list = proxy.get_jobs() tmpLog.debug('got {0} jobs'.format(len(jobSpec_list))) # loop over all found jobs if len(jobSpec_list) > 0: for jobSpec in jobSpec_list: # if user entered a job id check for it if job_id > 0: if jobSpec.PandaID != job_id: continue tmpLog.debug( ' PandaID = %d status = %s subStatus = %s lockedBy = %s' % (jobSpec.PandaID, jobSpec.status, jobSpec.subStatus, jobSpec.lockedBy)) # get the transfer groups
from pandaharvester.harvestercore.db_proxy_pool import DBProxyPool as DBProxy from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper for loggerName, loggerObj in iteritems(logging.Logger.manager.loggerDict): if loggerName.startswith('panda.log'): if len(loggerObj.handlers) == 0: continue if loggerName.split('.')[-1] in ['db_proxy']: continue stdoutHandler = logging.StreamHandler(sys.stdout) stdoutHandler.setFormatter(loggerObj.handlers[0].formatter) loggerObj.addHandler(stdoutHandler) queueName = sys.argv[1] queueConfigMapper = QueueConfigMapper() queueConfig = queueConfigMapper.get_queue(queueName) proxy = DBProxy() # get all jobs in table print('try to get all jobs') alljobs = proxy.get_jobs() print('got {0} jobs'.format(len(alljobs))) # loop over all found jobs if len(alljobs) > 0: for jobSpec in alljobs: print(' PandaID = %d status = %s subStatus = %s lockedBy = %s' % (jobSpec.PandaID, jobSpec.status, jobSpec.subStatus, jobSpec.lockedBy))
cacher = Cacher(communicator, single_mode=True) cacher.run() tmpLog.debug("plugin={0}".format(preparatorCore.__class__.__name__)) tmpLog.debug("BasePath from preparator configuration: %s " % preparatorCore.basePath) # get all jobs in table in a preparing substate #tmpLog.debug('try to get all jobs in a preparing substate') #jobSpec_list = proxy.get_jobs_in_sub_status('preparing',2000,None,None,None,None,None,None) # get all jobs if job_id > 0 : tmpLog.debug('try to get job ID - {}'.format(job_id)) jobSpec_list = [proxy.get_job(job_id)] else : tmpLog.debug('try to get all jobs') jobSpec_list = proxy.get_jobs() tmpLog.debug('got {0} jobs'.format(len(jobSpec_list))) # loop over all found jobs if len(jobSpec_list) > 0 : for jobSpec in jobSpec_list: # if user entered a job id check for it if job_id > 0 : if jobSpec.PandaID != job_id : continue tmpLog.debug(' PandaID = %d status = %s subStatus = %s lockedBy = %s' % (jobSpec.PandaID,jobSpec.status,jobSpec.subStatus,jobSpec.lockedBy)) # get the transfer groups groups = jobSpec.get_groups_of_input_files(skip_ready=True)
from pandaharvester.harvestercore.db_proxy_pool import DBProxyPool as DBProxy from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper for loggerName, loggerObj in iteritems(logging.Logger.manager.loggerDict): if loggerName.startswith('panda.log'): if len(loggerObj.handlers) == 0: continue if loggerName.split('.')[-1] in ['db_proxy']: continue stdoutHandler = logging.StreamHandler(sys.stdout) stdoutHandler.setFormatter(loggerObj.handlers[0].formatter) loggerObj.addHandler(stdoutHandler) queueName = sys.argv[1] queueConfigMapper = QueueConfigMapper() queueConfig = queueConfigMapper.get_queue(queueName) proxy = DBProxy() # get all jobs in table print ('try to get all jobs') alljobs = proxy.get_jobs() print ('got {0} jobs'.format(len(alljobs))) # loop over all found jobs if len(alljobs) > 0 : for jobSpec in alljobs: print (' PandaID = %d status = %s subStatus = %s lockedBy = %s' % (jobSpec.PandaID,jobSpec.status,jobSpec.subStatus,jobSpec.lockedBy))