Beispiel #1
0
    random.choice(string.ascii_uppercase + string.digits)
    for _ in range(assFileSpec.fsize)))
oFile.close()
fileSpec.add_associated_file(assFileSpec)

jobSpec = JobSpec()
jobSpec.jobParams = {
    'outFiles': fileSpec.lfn + ',log',
    'scopeOut': 'panda',
    'scopeLog': 'panda',
    'logFile': 'log',
    'realDatasets': 'panda.' + fileSpec.lfn,
    'ddmEndPointOut': 'BNL-OSG2_DATADISK',
}
jobSpec.computingSite = queueName
jobSpec.PandaID = job_id
jobSpec.add_out_file(fileSpec)

print "file to transfer - {}".format(assFileSpec.path)
print "dump(jobSpec)"
#dump(jobSpec)

pluginFactory = PluginFactory()

# get stage-out plugin
stagerCore = pluginFactory.get_plugin(queueConfig.stager)
print "plugin={0}".format(stagerCore.__class__.__name__)

print "testing zip"
tmpStat, tmpOut = stagerCore.zip_output(jobSpec)
if tmpStat:
unlocked = stagerCore.dbInterface.release_object_lock('dummy_id_for_out_0')
if unlocked :
   tmpLog.debug('unlocked db')
else:
   tmpLog.debug(' Could not unlock db')

# loop over the job id's creating various JobSpecs
jobSpec_list = []
for job_id in range(begin_job_id,end_job_id+1):
   jobSpec = JobSpec()
   jobSpec.jobParams = {
                        'scopeLog': 'panda',
                        'logFile': 'log',
                        }
   jobSpec.computingSite = queueName
   jobSpec.PandaID = job_id
   jobSpec.modificationTime = datetime.datetime.now()
   realDataset = 'panda.sgotest.' + uuid.uuid4().hex
   ddmEndPointOut = 'BNL-OSG2_DATADISK'
   outFiles_scope_str = ''
   outFiles_str = ''
   realDatasets_str = ''
   ddmEndPointOut_str = ''
   # create up 5 files for output
   for index in range(random.randint(1, 5)):
      fileSpec = FileSpec()
      assFileSpec = FileSpec()
      fileSpec.fileType = 'es_output'
      assFileSpec.lfn = 'panda.sgotest.' + uuid.uuid4().hex
      fileSpec.lfn = assFileSpec.lfn + '.gz'
      fileSpec.scope = 'panda'
Beispiel #3
0
    os.remove(harvester_config.db.database_filename)
except Exception:
    pass

for loggerName, loggerObj in iteritems(logging.Logger.manager.loggerDict):
    if loggerName.startswith('panda.log'):
        if len(loggerObj.handlers) == 0:
            continue
        if loggerName.split('.')[-1] in ['db_proxy']:
            continue
        stdoutHandler = logging.StreamHandler(sys.stdout)
        stdoutHandler.setFormatter(loggerObj.handlers[0].formatter)
        loggerObj.addHandler(stdoutHandler)

queueConfigMapper = QueueConfigMapper()

proxy = DBProxy()
proxy.make_tables(queueConfigMapper)

job = JobSpec()
job.PandaID = 1

job.modificationTime = datetime.datetime.now()
proxy.insert_jobs([job])

newJob = proxy.get_job(1)

a = CommunicatorPool()
a.get_jobs('siteName', 'nodeName', 'prodSourceLabel', 'computingElement', 1,
           {})
    'TXT.19772875._044894.tar.gz.1',
    'scopeIn':
    'mc15_13TeV',
    'fsize':
    '658906675',
    'GUID':
    '7e3776f9bb0af341b03e59d3de895a13',
    'checksum':
    'ad:3734bdd9',
    'ddmEndPointIn':
    'BNL-OSG2_DATADISK',
    'realDatasetsIn':
    'mc15_13TeV.363638.MGPy8EG_N30NLO_Wmunu_Ht500_700_BFilter.merge.DAOD_STDM4.e4944_s2726_r7772_r7676_p2842_tid09596175_00',
}
jobSpec.computingSite = queueName
jobSpec.PandaID = '11111'

from pandaharvester.harvestercore.plugin_factory import PluginFactory

pluginFactory = PluginFactory()

# get plugin
preparatorCore = pluginFactory.get_plugin(queueConfig.preparator)
print("plugin={0}".format(preparatorCore.__class__.__name__))

print(jobSpec)

print("testing stagein:")
print("BasePath from preparator configuration: %s " % preparatorCore.basePath)
preparatorCore.basePath = preparatorCore.basePath + "/testdata/"
print("basePath redifuned for test data: %s " % preparatorCore.basePath)
Beispiel #5
0
except Exception:
    pass

for loggerName, loggerObj in iteritems(logging.Logger.manager.loggerDict):
    if loggerName.startswith('panda.log'):
        if len(loggerObj.handlers) == 0:
            continue
        if loggerName.split('.')[-1] in ['db_proxy']:
            continue
        stdoutHandler = logging.StreamHandler(sys.stdout)
        stdoutHandler.setFormatter(loggerObj.handlers[0].formatter)
        loggerObj.addHandler(stdoutHandler)

queueConfigMapper = QueueConfigMapper()

proxy = DBProxy()
proxy.make_tables(queueConfigMapper)

job = JobSpec()
job.PandaID = 1


job.modificationTime = datetime.datetime.now()
proxy.insert_jobs([job])

newJob = proxy.get_job(1)


a = CommunicatorPool()
a.get_jobs('siteName', 'nodeName', 'prodSourceLabel', 'computingElement', 1, {})
import sys
import uuid
from pandaharvester.harvestercore.job_spec import JobSpec
from pandaharvester.harvestercore.file_spec import FileSpec
from pandaharvester.harvestercore.event_spec import EventSpec

from pandaharvester.harvestercore.communicator_pool import CommunicatorPool

rID = sys.argv[1]
taskid = rID.split('-')[0]
pandaid = long(rID.split('-')[1])

job = JobSpec()
job.PandaID = pandaid
event = EventSpec()
file = FileSpec()
file.status = 'finished'
file.objstoreID = 9575
file.pathConvention = 1000
file.lfn = str(uuid.uuid4().hex) + '.zip'
file.fsize = 555
file.chksum = '0d2a9dc9'
event.eventRangeID = rID
event.eventStatus = 'finished'
job.zipEventMap = {1: {'events':[event],
                       'zip':file}}


a = CommunicatorPool()
a.update_jobs([job])