def lumi_calc(opts, workDir, jobList, splitter): (lumiDict, readDict, writeDict) = process_jobs(opts, workDir, jobList, splitter) activity = utils.ActivityLog('Simplifying lumi sections') lumis = {} for sample in lumiDict: for run in lumiDict[sample]: for lumi in lumiDict[sample][run]: lumis.setdefault(sample, []).append(([run, lumi], [run, lumi])) for sample in lumiDict: lumis[sample] = mergeLumi(lumis[sample]) activity.finish() for sample, lumi_list in lumis.items(): print('Sample: %s' % sample) if opts.job_events: print('=========================================') print('Number of events processed: %12s' % readDict.get(sample)) print(' Number of events written: %12d' % sum(writeDict.get(sample, {}).values())) if writeDict.get(sample, None): sys.stdout.write('\n') head = [(0, ' Output filename'), (1, 'Events')] utils.printTabular(head, lmap(lambda pfn: {0: pfn, 1: writeDict[sample][pfn]}, writeDict[sample])) if opts.job_json: json_fn = os.path.join(opts.output_dir or workDir, 'processed_%s.json' % sample) outputJSON(lumi_list, open(json_fn, 'w')) print('Saved processed lumi sections in ' + json_fn) if opts.job_gc: sys.stdout.write('\n') print('List of processed lumisections:') print('-----------------------------------------') outputGC(lumi_list) sys.stdout.write('\n')
def lumi_calc(opts, workDir, jobList, splitter): (lumiDict, readDict, writeDict) = process_jobs(opts, workDir, jobList, splitter) activity = utils.ActivityLog('Simplifying lumi sections') lumis = {} for sample in lumiDict: for run in lumiDict[sample]: for lumi in lumiDict[sample][run]: lumis.setdefault(sample, []).append(([run, lumi], [run, lumi])) for sample in lumiDict: lumis[sample] = mergeLumi(lumis[sample]) activity.finish() for sample, lumi_list in lumis.items(): print('Sample: %s' % sample) if opts.job_events: print('=========================================') print('Number of events processed: %12s' % readDict.get(sample)) print(' Number of events written: %12d' % sum(writeDict.get(sample, {}).values())) if writeDict.get(sample, None): sys.stdout.write('\n') head = [(0, ' Output filename'), (1, 'Events')] utils.printTabular( head, lmap(lambda pfn: { 0: pfn, 1: writeDict[sample][pfn] }, writeDict[sample])) if opts.job_json: json_fn = os.path.join(opts.output_dir or workDir, 'processed_%s.json' % sample) outputJSON(lumi_list, open(json_fn, 'w')) print('Saved processed lumi sections in ' + json_fn) if opts.job_gc: sys.stdout.write('\n') print('List of processed lumisections:') print('-----------------------------------------') outputGC(lumi_list) sys.stdout.write('\n')
#!/usr/bin/env python # | Copyright 2010-2016 Karlsruhe Institute of Technology # | # | Licensed under the Apache License, Version 2.0 (the "License"); # | you may not use this file except in compliance with the License. # | You may obtain a copy of the License at # | # | http://www.apache.org/licenses/LICENSE-2.0 # | # | Unless required by applicable law or agreed to in writing, software # | distributed under the License is distributed on an "AS IS" BASIS, # | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # | See the License for the specific language governing permissions and # | limitations under the License. try: from xmlrpclib import ServerProxy except ImportError: from xmlrpc.client import ServerProxy import sys from gcSupport import utils from grid_control_cms.lumi_tools import formatLumi, mergeLumi, parseLumiFromJSON server = ServerProxy('http://pccmsdqm04.cern.ch/runregistry/xmlrpc') data = server.DataExporter.export('RUNLUMISECTION', 'GLOBAL', 'json', {'groupName': 'Collisions10'}) runs = parseLumiFromJSON(data) sys.stdout.write('lumi filter = %s\n' % utils.wrapList(formatLumi(mergeLumi(runs)), 60, ',\n\t'))
#!/usr/bin/env python #-# Copyright 2010 Karlsruhe Institute of Technology #-# #-# Licensed under the Apache License, Version 2.0 (the "License"); #-# you may not use this file except in compliance with the License. #-# You may obtain a copy of the License at #-# #-# http://www.apache.org/licenses/LICENSE-2.0 #-# #-# Unless required by applicable law or agreed to in writing, software #-# distributed under the License is distributed on an "AS IS" BASIS, #-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #-# See the License for the specific language governing permissions and #-# limitations under the License. from gcSupport import utils from grid_control_cms.lumi_tools import formatLumi, parseLumiFromJSON, mergeLumi import xmlrpclib server = xmlrpclib.ServerProxy('http://pccmsdqm04.cern.ch/runregistry/xmlrpc') data = server.DataExporter.export('RUNLUMISECTION', 'GLOBAL', 'json', {'groupName': 'Collisions10'}) runs = parseLumiFromJSON(data) utils.vprint("lumi filter = %s" % utils.wrapList(formatLumi(mergeLumi(runs)), 60, ',\n\t'), -1)
if "#" in opts.remove: eraseBlock(opts.remove) else: for block in api.listBlocks(opts.remove.split("#")[0]): eraseBlock(block["Name"]) api.deleteProcDS(opts.remove.split("#")[0]) elif opts.listlumis: allrl = [] for fileInfo in api.listFiles(opts.listlumis, retriveList=['retrive_lumi']): lfn = fileInfo['LogicalFileName'] rl = [] for lumi in fileInfo['LumiList']: rl.append(([int(lumi["RunNumber"]), int(lumi["LumiSectionNumber"])], [int(lumi["RunNumber"]), int(lumi["LumiSectionNumber"])])) print lfn print utils.wrapList(formatLumi(mergeLumi(rl)), 70, ',\n\t') allrl.extend(rl) print "\nComplete dataset:" print utils.wrapList(formatLumi(mergeLumi(allrl)), 70, ',\n\t') elif opts.list: for block in api.listBlocks(opts.list): print block["Name"] elif opts.files: for f in api.listFiles(opts.files, retriveList=['retrive_block', 'retrive_run', 'retrive_lumi']): print f print elif opts.dump: print api.listDatasetContents(opts.dump.split("#")[0], opts.dump)
def main(): if opts.save_jobjson or opts.save_jobgc or opts.get_events: (workDir, nJobs, jobList) = getWorkJobs(args) (log, incomplete, splitter, splitInfo) = (None, False, None, {}) (lumiDict, readDict, writeDict) = ({}, {}, {}) try: splitter = DataSplitter.loadState(os.path.join(workDir, 'datamap.tar')) except Exception: pass jobList = sorted(jobList) for jobNum in jobList: del log log = utils.ActivityLog('Reading job logs - [%d / %d]' % (jobNum, jobList[-1])) jobInfo = getJobInfo(workDir, jobNum, lambda retCode: retCode == 0) if not jobInfo: if not incomplete: print 'WARNING: Not all jobs have finished - results will be incomplete!' incomplete = True continue if not parameterized: if splitter: splitInfo = splitter.getSplitInfo(jobNum) outputName = splitInfo.get(DataSplitter.Nickname, splitInfo.get(DataSplitter.DatasetID, 0)) else: outputName = jobInfo['file'].split()[2].replace("_%d_" % jobNum, '_').replace('/', '_').replace('__', '_') # Read framework report files to get number of events try: outputDir = os.path.join(workDir, 'output', 'job_' + str(jobNum)) for fwkXML in getCMSSWInfo(os.path.join(outputDir, 'cmssw.dbs.tar.gz')): for run in fwkXML.getElementsByTagName('Run'): for lumi in run.getElementsByTagName('LumiSection'): run_id = int(run.getAttribute('ID')) lumi_id = int(lumi.getAttribute('ID')) lumiDict.setdefault(outputName, {}).setdefault(run_id, set()).add(lumi_id) for outFile in fwkXML.getElementsByTagName('File'): pfn = outFile.getElementsByTagName('PFN')[0].childNodes[0].data if pfn not in writeDict.setdefault(outputName, {}): writeDict[outputName][pfn] = 0 writeDict[outputName][pfn] += int(outFile.getElementsByTagName('TotalEvents')[0].childNodes[0].data) for inFile in fwkXML.getElementsByTagName('InputFile'): if outputName not in readDict: readDict[outputName] = 0 readDict[outputName] += int(inFile.getElementsByTagName('EventsRead')[0].childNodes[0].data) except KeyboardInterrupt: sys.exit(os.EX_OK) except Exception: raise print 'Error while parsing framework output of job %s!' % jobNum continue del log log = utils.ActivityLog('Simplifying lumi sections') lumis = {} for sample in lumiDict: for run in lumiDict[sample]: for lumi in lumiDict[sample][run]: lumis.setdefault(sample, []).append(([run, lumi], [run, lumi])) for sample in lumiDict: lumis[sample] = mergeLumi(lumis[sample]) del log for sample, lumis in lumis.items(): print 'Sample:', sample print '=========================================' print 'Number of events processed: %12d' % readDict[sample] print ' Number of events written: %12d' % sum(writeDict.get(sample, {}).values()) if writeDict.get(sample, None): print head = [(0, ' Output filename'), (1, 'Events')] utils.printTabular(head, map(lambda pfn: {0: pfn, 1: writeDict[sample][pfn]}, writeDict[sample])) if opts.save_jobjson: outputJSON(lumis, open(os.path.join(workDir, 'processed_%s.json' % sample), 'w')) print 'Saved processed lumi sections in', os.path.join(workDir, 'processed_%s.json' % sample) if opts.save_jobgc: print print 'List of processed lumisections:' print '-----------------------------------------' outputGC(lumis) print ########################### # Lumi filter manuipulation ########################### if opts.save_exprgc or opts.save_exprjson or opts.save_exprfull: if len(args) == 0: raise Exception('No arguments given!') try: lumis = parseLumiFilter(str.join(' ', args)) except Exception: raise Exception('Could not parse: %s' % str.join(' ', args)) if opts.save_exprgc: outputGC(lumis) if opts.save_exprjson: outputJSON(lumis) if opts.save_exprfull: result = {} for rlrange in lumis: start, end = rlrange assert(start[0] == end[0]) llist = result.setdefault(start[0], []).extend(range(start[1], end[1] + 1)) print result