Beispiel #1
0
 def test_runReco_tf(self):
     cmd = ['Reco_tf.py']
     cmd.extend(['--AMI', 'q222'])
     cmd.extend(['--maxEvents', '24'])
     cmd.append('--athenaopts=--nprocs=4')  
     cmd.extend(['--athenaMPMergeTargetSize', 'ESD:0'])  
         
     msg.info('Will run this transform: {0}'.format(cmd))
     p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1)
     while p.poll() is None:
         line = p.stdout.readline()
         sys.stdout.write(line)
     # Hoover up remaining buffered output lines
     for line in p.stdout:
         sys.stdout.write(line)
     self.assertEqual(p.returncode, 0)
     
     # Now load metadata and test a few important values
     with open('jobReport.json') as jr:
         md = json.load(jr)
         self.assertEqual(isinstance(md, dict), True)
         dataDict = pyJobReportToFileDict(md)
         self.assertTrue('ESD' in dataDict.keys())
         self.assertTrue('AOD' in dataDict.keys())
         self.assertTrue('HIST' in dataDict.keys())
         self.assertTrue(len(dataDict['ESD']['subFiles']), 4)
         self.assertEqual(dataDict['AOD']['subFiles'][0]['nentries'], 24)
         self.assertEqual(dataDict['HIST']['subFiles'][0]['nentries'], 24)
Beispiel #2
0
 def test_runReco_tf(self):
     cmd = ['FullChain_tf.py', '--inputEVNTFile', sourceFile]
     cmd.extend(['--outputAODFile', 'test.AOD.pool.root'])
     cmd.extend(['--geometryVersion', 'ATLAS-GEO-20-00-01'])
     cmd.extend(['--conditionsTag', 'OFLCOND-MC12-SIM-00'])
     cmd.extend(['--randomSeed', '10'])
     cmd.extend(['--skipEvents', '0'])
     cmd.extend(['--maxEvents', '2']) # 2 events - this is a slow job in G4
     ## Event counting currently broken for multi-step transforms
     msg.info('Will run this transform: {0}'.format(cmd))
     p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1)
     while p.poll() is None:
         line = p.stdout.readline()
         sys.stdout.write(line)
     # Hoover up remaining buffered output lines
     for line in p.stdout:
         sys.stdout.write(line)
     self.assertEqual(p.returncode, 0)
     
     # Now load metadata and test a few important values
     with open('jobReport.json') as jr:
         md = json.load(jr)
         self.assertEqual(isinstance(md, dict), True)
         dataDict = pyJobReportToFileDict(md)
         self.assertEqual(dataDict["AOD"]['subFiles'][0]['nentries'], 2) 
Beispiel #3
0
    def test_runReco_tf(self):
        inputs = glob.glob(sourceFiles)
        self.assertEqual(len(inputs), 1)
        cmd = ['Reco_tf.py', '--inputRDOFile']
        cmd.extend(inputs)
        cmd.extend([
            '--outputESDFile', 'my.ESD.pool.root', '--autoConfiguration',
            'everything'
        ])
        cmd.extend(['--outputAODFile', 'my.AOD.pool.root'])
        cmd.extend(['--outputHISTFile', 'my.HIST.root'])
        cmd.extend(['--maxEvents', '10'])
        #cmd.extend(['--preExec', 'rec.doTrigger=False'])  # This is temporary while trigger doesn't work in r19
        msg.info('Will run this transform: {0}'.format(cmd))
        p = subprocess.Popen(cmd,
                             shell=False,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             bufsize=1)
        while p.poll() is None:
            line = p.stdout.readline()
            sys.stdout.write(line.decode())
        # Hoover up remaining buffered output lines
        for line in p.stdout:
            sys.stdout.write(line)
        self.assertEqual(p.returncode, 0)

        # Now load metadata and test a few important values
        with open('jobReport.json') as jr:
            md = json.load(jr)
            self.assertEqual(isinstance(md, dict), True)
            dataDict = pyJobReportToFileDict(md)
            self.assertTrue('ESD' in dataDict)
            self.assertTrue('AOD' in dataDict)
            self.assertTrue('HIST' in dataDict)
            self.assertEqual(dataDict['ESD']['subFiles'][0]['nentries'], 10)
            self.assertEqual(dataDict['ESD']['subFiles'][0]['geometry'],
                             'ATLAS-GEO-20-00-01')
            self.assertEqual(dataDict['ESD']['subFiles'][0]['conditions_tag'],
                             'COMCOND-BLKPA-006-01')
            self.assertEqual(dataDict['ESD']['subFiles'][0]['beam_type'],
                             ['collisions'])
            self.assertEqual(dataDict['ESD']['subFiles'][0]['name'],
                             'my.ESD.pool.root')
            self.assertEqual(dataDict['AOD']['subFiles'][0]['nentries'], 10)
            self.assertEqual(dataDict['AOD']['subFiles'][0]['geometry'],
                             'ATLAS-GEO-20-00-01')
            self.assertEqual(dataDict['AOD']['subFiles'][0]['conditions_tag'],
                             'COMCOND-BLKPA-006-01')
            self.assertEqual(dataDict['AOD']['subFiles'][0]['beam_type'],
                             ['collisions'])
            self.assertEqual(dataDict['AOD']['subFiles'][0]['name'],
                             'my.AOD.pool.root')
            self.assertEqual(dataDict['HIST']['subFiles'][0]['nentries'], 10)
Beispiel #4
0
    def test_Reco_Tier0_tf(self):
        pFile = 'job.pkl'

        cmd = "Reco_tf.py --inputBSFile /afs/cern.ch/atlas/project/rig/referencefiles/dataStreams_high_mu/data12_8TeV/data12_8TeV.00201556.physics_JetTauEtmiss.merge.RAW._lb0423._SFO-1._0001.1 --maxEvents 5 --autoConfiguration everything --preExec 'rec.doDetailedAuditor=True' 'rec.doNameAuditor=True' 'rec.doCalo=False' 'rec.doInDet=False' 'rec.doMuon=False' 'rec.doJetMissingETTag=False' 'rec.doEgamma=False' 'rec.doMuonCombined=False' 'rec.doTau=False' 'rec.doTrigger=False' --outputESDFile myESD.pool.root --dumpPickle {0}".format(
            pFile).split()
        msg.info('Will run this transform: {0}'.format(cmd))
        p = subprocess.Popen(cmd,
                             shell=False,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             bufsize=1)
        while p.poll() is None:
            line = p.stdout.readline()
            sys.stdout.write(line)
        # Hoover up remaining buffered output lines
        for line in p.stdout:
            sys.stdout.write(line)
        self.assertEqual(p.returncode, 0)

        # Check the pickle was produced
        self.assertEqual(os.access(pFile, os.R_OK), True)
        unpickleFile = open(pFile, 'r')
        contents = pickle.load(unpickleFile)
        unpickleFile.close()
        self.assertEqual(isinstance(contents, dict), True)

        # Now configure and run the transform from the pickle file
        cmd = "Reco_tf.py --argdict {0} --outputESDFile newESD.pool.root".format(
            pFile).split()
        msg.info('Will run this transform: {0}'.format(cmd))
        p = subprocess.Popen(cmd,
                             shell=False,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             bufsize=1)
        while p.poll() is None:
            line = p.stdout.readline()
            sys.stdout.write(line)
        # Hoover up remaining buffered output lines
        for line in p.stdout:
            sys.stdout.write(line)
        self.assertEqual(p.returncode, 0)

        # Now load metadata and test a few important values
        with open('jobReport.json') as jr:
            md = json.load(jr)
            self.assertEqual(isinstance(md, dict), True)
            dataDict = pyJobReportToFileDict(md)
            self.assertTrue('ESD' in dataDict.keys())
            self.assertEqual(dataDict['ESD']['subFiles'][0]['nentries'], 5)
            self.assertEqual(dataDict['ESD']['subFiles'][0]['name'],
                             'newESD.pool.root')
Beispiel #5
0
    def test_runReco_tf(self):
        inputs = glob.glob(sourceFiles)
        self.assertEqual(len(inputs), 1)
        cmd = ['Reco_tf.py', '--inputBSFile']
        cmd.extend(inputs)
        cmd.extend([
            '--outputESDFile', 'my.ESD.pool.root', '--autoConfiguration',
            'everything'
        ])
        cmd.extend(['--outputAODFile', 'my.AOD.pool.root'])
        cmd.extend(['--outputHISTFile', 'my.HIST.root'])
        cmd.extend(['--outputTAGFile', 'my.TAG.pool.root'])
        cmd.extend(['--maxEvents', '10'])
        cmd.extend(['--preExec', 'rec.doTrigger=False'
                    ])  # This is temporary while trigger doesn't work in r19
        ## Event counting currently broken for multi-step transforms
        cmd.extend(['--checkEventCount', 'true'])
        msg.info('Will run this transform: {0}'.format(cmd))
        p = subprocess.Popen(cmd,
                             shell=False,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             bufsize=1)
        while p.poll() is None:
            line = p.stdout.readline()
            sys.stdout.write(line)
        # Hoover up remaining buffered output lines
        for line in p.stdout:
            sys.stdout.write(line)
        self.assertEqual(p.returncode, 0)

        # Now load metadata and test a few important values
        with open('jobReport.json') as jr:
            md = json.load(jr)
            self.assertEqual(isinstance(md, dict), True)
            dataDict = pyJobReportToFileDict(md)
            self.assertTrue('ESD' in dataDict.keys())
            self.assertTrue('AOD' in dataDict.keys())
            self.assertTrue('HIST' in dataDict.keys())
            self.assertTrue('TAG' in dataDict.keys())
            self.assertEqual(dataDict['ESD']['subFiles'][0]['nentries'], 10)
            self.assertEqual(dataDict['ESD']['subFiles'][0]['name'],
                             'my.ESD.pool.root')
            self.assertEqual(dataDict['AOD']['subFiles'][0]['nentries'], 10)
            self.assertEqual(dataDict['AOD']['subFiles'][0]['name'],
                             'my.AOD.pool.root')
            self.assertEqual(dataDict['HIST']['subFiles'][0]['nentries'], 10)
            self.assertEqual(dataDict['TAG']['subFiles'][0]['nentries'], 10)
Beispiel #6
0
    def test_runReco_tf(self):
        cmd = ['AtlasG4_tf.py', '--inputEvgenFile', sourceFile]
        cmd.extend(['--outputHITSFile', 'test.HITS.pool.root'])
        cmd.extend(['--geometryVersion', 'ATLAS-GEO-20-00-01'])
        cmd.extend(['--conditionsTag', 'OFLCOND-MC12-SIM-00'])
        cmd.extend(['--randomSeed', '10'])
        cmd.extend(['--skipEvents', '0'])
        cmd.extend(['--maxEvents', '2'])  # 2 events - this is a slow job
        ## Event counting currently broken for multi-step transforms
        msg.info('Will run this transform: {0}'.format(cmd))
        p = subprocess.Popen(cmd,
                             shell=False,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             bufsize=1)
        while p.poll() is None:
            line = p.stdout.readline()
            sys.stdout.write(line.decode())
        # Hoover up remaining buffered output lines
        for line in p.stdout:
            sys.stdout.write(line)
        self.assertEqual(p.returncode, 0)

        # Now load metadata and test a few important values
        with open('jobReport.json') as jr:
            md = json.load(jr)
            self.assertEqual(isinstance(md, dict), True)
            dataDict = pyJobReportToFileDict(md)
            # Change in SimuJobTransforms, but be compatible with type = hits and HITS
            dataKey = None
            if 'hits' in dataDict:
                dataKey = 'hits'
            elif 'HITS' in dataDict:
                dataKey = 'HITS'
            self.assertNotEqual(dataKey, None)
            self.assertEqual(dataDict[dataKey]['subFiles'][0]['nentries'], 2)
            self.assertEqual(dataDict[dataKey]['subFiles'][0]['geometry'],
                             'ATLAS-GEO-20-00-01')
            self.assertEqual(
                dataDict[dataKey]['subFiles'][0]['conditions_tag'],
                'OFLCOND-MC12-SIM-00')