def test_runReco_tf(self): cmd = ['FullChain_tf.py', '--inputEVNTFile', sourceFile] cmd.extend(['--outputAODFile', 'test.AOD.pool.root']) cmd.extend(['--geometryVersion', 'ATLAS-GEO-20-00-01']) cmd.extend(['--conditionsTag', 'OFLCOND-MC12-SIM-00']) cmd.extend(['--randomSeed', '10']) cmd.extend(['--skipEvents', '0']) cmd.extend(['--maxEvents', '2']) # 2 events - this is a slow job in G4 ## Event counting currently broken for multi-step transforms msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Now load metadata and test a few important values with open('jobReport.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True) dataDict = pyJobReportToFileDict(md) self.assertEqual(dataDict["AOD"]['subFiles'][0]['nentries'], 2)
def test_tarball(self): try: os.symlink( '/afs/cern.ch/work/g/graemes/ddm/ddo.000001.Atlas.Ideal.DBRelease.v220701/DBRelease-22.7.1.tar.gz', 'DBRelease-22.7.1.tar.gz') except (IOError, OSError) as xxx_todo_changeme: # Ignore file exists - if that happens the link was already there (errno, errMsg) = xxx_todo_changeme.args # Ignore file exists - if that happens the link was already there if errno == 17: pass else: raise cmd = ['Athena_tf.py', '--DBRelease', 'DBRelease-22.7.1.tar.gz'] msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line.decode()) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0)
def test_runReco_tf(self): cmd = ['Reco_tf.py'] cmd.extend(['--AMI', 'q222']) cmd.extend(['--maxEvents', '24']) cmd.append('--athenaopts=--nprocs=4') cmd.extend(['--athenaMPMergeTargetSize', 'ESD:0']) msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Now load metadata and test a few important values with open('jobReport.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True) dataDict = pyJobReportToFileDict(md) self.assertTrue('ESD' in dataDict.keys()) self.assertTrue('AOD' in dataDict.keys()) self.assertTrue('HIST' in dataDict.keys()) self.assertTrue(len(dataDict['ESD']['subFiles']), 4) self.assertEqual(dataDict['AOD']['subFiles'][0]['nentries'], 24) self.assertEqual(dataDict['HIST']['subFiles'][0]['nentries'], 24)
def main(): msg.info('This is %s' % sys.argv[0]) #note that indata (outdata) can not be the same and must be the same text, ['**'], as in the argument input**File (output**File) trfMT = transform(trfName='Trig_trf', executor=trigExecutor(name='athena', exe='athenaMT.py', exeArgs=['athenaoptsMT'], inData=['BS'], outData=['BS_MT'])) addAthenaArguments(trfMT.parser) addTriggerArgs(trfMT.parser) trfMT.parseCmdLineArgs(sys.argv[1:]) #any debug statements will work from here onwards if using --verbose or --loglevel DEBUG #Convert arg names and carry out operations from any of the options that are to be done before running trigPreRun(trfMT) #Run the transform and generate final report trfMT.execute() trfMT.generateReport() #Carry out operations from any of the options that are to be done after running trigPostRun(trfMT) msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trfMT.exitCode)) sys.exit(trfMT.exitCode)
def test_runNTUPMerge(self): inputs = glob.glob(sourceFiles) self.assertEqual(len(inputs), 3) cmd = ['NTUPMerge_tf.py', '--inputNTUP_ZPRIMEEEFile'] cmd.extend(inputs) cmd.extend([ '--outputNTUP_ZPRIMEEE_MRGFile', 'merged.NTUP_ZPRIMEEE.pool.root' ]) msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line.decode()) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Now load metadata and test a few important values with open('jobReport.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True) self.assertEqual( md['files']['output'][0]['subFiles'][0]['nentries'], 23254) self.assertEqual(md['files']['output'][0]['subFiles'][0]['name'], 'merged.NTUP_ZPRIMEEE.pool.root')
def main(): msg.info('This is %s' % sys.argv[0]) trf = getTransform() trf.parseCmdLineArgs(sys.argv[1:]) # EventIndex: update datasetname if outputfile has the form "datasetname#filename" # and it has not been specified within the options. args = trf.argdict if not args.has_key("eidsname"): dsname = None for filetype in ('AOD_MRG', 'EI'): if args.has_key('output' + filetype + 'File'): outputFile = args['output' + filetype + 'File'] dsname = outputFile.dataset break if dsname is not None: obj = trfArgClasses.argString(dsname) args['eidsname'] = obj trf.execute() trf.generateReport() msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode)) sys.exit(trf.exitCode)
def test_runAODMergeSlow(self): inputs = glob.glob(sourceFiles) self.assertEqual(len(inputs), 5) cmd = ['AODMerge_tf.py', '--inputAODFile'] cmd.extend(inputs) cmd.extend(['--outputAOD_MRGFile', 'slowmerged.AOD.pool.root']) cmd.extend(['--outputTAGFile', 'slowTAG.pool.root']) cmd.extend(['--fastPoolMerge', 'False']) cmd.extend(['--reportName', 'jobReportSlow']) msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line.decode()) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Now load metadata and test a few important values with open('jobReportSlow.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True) self.assertEqual(md['files']['output']['common']['nentries'], 2500)
def main(): msg.info('This is {}'.format(sys.argv[0])) trf = getTransform() trf.parseCmdLineArgs(sys.argv[1:]) # update datasetname if inputfile has the form "datasetname#filename" # and it has not been specified within the options. args = trf.argdict if not args.has_key("eidsname"): dsname=None for filetype in ('POOL', 'AOD', 'ESD', 'EVNT', 'HITS', 'RDO'): if args.has_key('input'+filetype+'File'): inputFile = args['input'+filetype+'File'] dsname = inputFile.dataset if dsname is not None: obj=trfArgClasses.argString(dsname) args['eidsname'] = obj trf.execute() trf.generateReport() msg.info("{} stopped at {}, trf exit code {:d}".format(sys.argv[0], time.asctime(), trf.exitCode)) sys.exit(trf.exitCode)
def test_runReco_q222_tf(self): cmd = ['Reco_tf.py'] cmd.extend(['--AMI', 'q222']) cmd.extend(['--maxEvents', '2']) msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line.decode()) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Now load metadata and test a few important values with open('jobReport.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True) self.assertTrue('resource' in md) self.assertEqual(md['resource']['executor']['AODtoTAG']['nevents'], 2) self.assertEqual(md['resource']['executor']['ESDtoAOD']['nevents'], 2) self.assertEqual(md['resource']['executor']['RAWtoESD']['nevents'], 2)
def main(): msg.info("This is %s" % sys.argv[0]) trf = getTransform() trf.parseCmdLineArgs(sys.argv[1:]) trf.execute() trf.generateReport() msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode)) sys.exit(trf.exitCode)
def test_runReco_tf(self): inputs = glob.glob(sourceFiles) self.assertEqual(len(inputs), 1) cmd = ['Reco_tf.py', '--inputRDOFile'] cmd.extend(inputs) cmd.extend([ '--outputESDFile', 'my.ESD.pool.root', '--autoConfiguration', 'everything' ]) cmd.extend(['--outputAODFile', 'my.AOD.pool.root']) cmd.extend(['--outputHISTFile', 'my.HIST.root']) cmd.extend(['--maxEvents', '10']) #cmd.extend(['--preExec', 'rec.doTrigger=False']) # This is temporary while trigger doesn't work in r19 msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line.decode()) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Now load metadata and test a few important values with open('jobReport.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True) dataDict = pyJobReportToFileDict(md) self.assertTrue('ESD' in dataDict) self.assertTrue('AOD' in dataDict) self.assertTrue('HIST' in dataDict) self.assertEqual(dataDict['ESD']['subFiles'][0]['nentries'], 10) self.assertEqual(dataDict['ESD']['subFiles'][0]['geometry'], 'ATLAS-GEO-20-00-01') self.assertEqual(dataDict['ESD']['subFiles'][0]['conditions_tag'], 'COMCOND-BLKPA-006-01') self.assertEqual(dataDict['ESD']['subFiles'][0]['beam_type'], ['collisions']) self.assertEqual(dataDict['ESD']['subFiles'][0]['name'], 'my.ESD.pool.root') self.assertEqual(dataDict['AOD']['subFiles'][0]['nentries'], 10) self.assertEqual(dataDict['AOD']['subFiles'][0]['geometry'], 'ATLAS-GEO-20-00-01') self.assertEqual(dataDict['AOD']['subFiles'][0]['conditions_tag'], 'COMCOND-BLKPA-006-01') self.assertEqual(dataDict['AOD']['subFiles'][0]['beam_type'], ['collisions']) self.assertEqual(dataDict['AOD']['subFiles'][0]['name'], 'my.AOD.pool.root') self.assertEqual(dataDict['HIST']['subFiles'][0]['nentries'], 10)
def test_illegalName(self): cmd = ['Athena_tf.py', '--DBRelease', 'FailMeHarder'] msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, trfExit.nameToCode('TRF_DBRELEASE_PROBLEM'))
def test_athenaNormalScan(self): cmd = ['Athena_tf.py'] msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0)
def test_athenaDisableFilter(self): cmd = ['Athena_tf.py', '--ignoreFiles', 'None'] msg.info('Will run this transform: {0}'.format(cmd)) cmd.extend(['--reportName', 'jobReportDisable']) p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 68)
def test_Reco_Tier0_tf(self): pFile = 'job.pkl' cmd = "Reco_tf.py --inputBSFile /afs/cern.ch/atlas/project/rig/referencefiles/dataStreams_high_mu/data12_8TeV/data12_8TeV.00201556.physics_JetTauEtmiss.merge.RAW._lb0423._SFO-1._0001.1 --maxEvents 5 --autoConfiguration everything --preExec 'rec.doDetailedAuditor=True' 'rec.doNameAuditor=True' 'rec.doCalo=False' 'rec.doInDet=False' 'rec.doMuon=False' 'rec.doJetMissingETTag=False' 'rec.doEgamma=False' 'rec.doMuonCombined=False' 'rec.doTau=False' 'rec.doTrigger=False' --outputESDFile myESD.pool.root --dumpPickle {0}".format( pFile).split() msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Check the pickle was produced self.assertEqual(os.access(pFile, os.R_OK), True) unpickleFile = open(pFile, 'r') contents = pickle.load(unpickleFile) unpickleFile.close() self.assertEqual(isinstance(contents, dict), True) # Now configure and run the transform from the pickle file cmd = "Reco_tf.py --argdict {0} --outputESDFile newESD.pool.root".format( pFile).split() msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Now load metadata and test a few important values with open('jobReport.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True) dataDict = pyJobReportToFileDict(md) self.assertTrue('ESD' in dataDict.keys()) self.assertEqual(dataDict['ESD']['subFiles'][0]['nentries'], 5) self.assertEqual(dataDict['ESD']['subFiles'][0]['name'], 'newESD.pool.root')
def test_athenaManualRegexp(self): cmd = ['Athena_tf.py', '--ignoreFiles', 'None'] cmd.extend(['--ignorePatterns', 'An? (ERROR|FATAL) .*message']) cmd.extend(['--reportName', 'jobReportRegexp']) msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0)
def main(): msg.info('This is %s' % sys.argv[0]) if "--help" in sys.argv[1:]: # just looking for help, skip r2a limited parser RAWtoALL = False else: RAWtoALL = detectRAWtoALL(sys.argv[1:]) if RAWtoALL: msg.info("RAWtoALL workflow detected") else: msg.info("no RAWtoALL") trf = getTransform(RAWtoALL) trf.parseCmdLineArgs(sys.argv[1:]) # Just add a note here that this is the place to insert extra checks or manipulations # after the arguments are known, but before the transform tries to trace the graph # path or actually execute (e.g., one can add some steering based on defined arguments) trf.execute() trf.generateReport() msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode)) sys.exit(trf.exitCode)
def main(): print '\ntesting if this is the correct transform...\n\n' msg.info('This is %s' % sys.argv[0]) trf = getTransform() trf.parseCmdLineArgs(sys.argv[1:]) trf.execute() trf.generateReport() msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode)) sys.exit(trf.exitCode)
def test_Signatures(self): cmd = ['makeTrfJSONSignatures.py', '--output', 'test.json'] msg.info('Will generate transform signatures: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0)
def test_runReco_tf(self): inputs = glob.glob(sourceFiles) self.assertEqual(len(inputs), 1) cmd = ['Reco_tf.py', '--inputBSFile'] cmd.extend(inputs) cmd.extend([ '--outputESDFile', 'my.ESD.pool.root', '--autoConfiguration', 'everything' ]) cmd.extend(['--outputAODFile', 'my.AOD.pool.root']) cmd.extend(['--outputHISTFile', 'my.HIST.root']) cmd.extend(['--outputTAGFile', 'my.TAG.pool.root']) cmd.extend(['--maxEvents', '10']) cmd.extend(['--preExec', 'rec.doTrigger=False' ]) # This is temporary while trigger doesn't work in r19 ## Event counting currently broken for multi-step transforms cmd.extend(['--checkEventCount', 'true']) msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Now load metadata and test a few important values with open('jobReport.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True) dataDict = pyJobReportToFileDict(md) self.assertTrue('ESD' in dataDict.keys()) self.assertTrue('AOD' in dataDict.keys()) self.assertTrue('HIST' in dataDict.keys()) self.assertTrue('TAG' in dataDict.keys()) self.assertEqual(dataDict['ESD']['subFiles'][0]['nentries'], 10) self.assertEqual(dataDict['ESD']['subFiles'][0]['name'], 'my.ESD.pool.root') self.assertEqual(dataDict['AOD']['subFiles'][0]['nentries'], 10) self.assertEqual(dataDict['AOD']['subFiles'][0]['name'], 'my.AOD.pool.root') self.assertEqual(dataDict['HIST']['subFiles'][0]['nentries'], 10) self.assertEqual(dataDict['TAG']['subFiles'][0]['nentries'], 10)
def test_suicideScript(self): cmd = ['ExeWrap_tf.py'] cmd.extend(['--exe', 'Suicide.py']) msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) p.wait() for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 65) # Now load metadata and test a few important values with open('jobReport.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True) self.assertTrue("SIGKILL signal" in md['exitMsg']) self.assertEqual(md['exitCode'], 65)
def main(): msg.info('This is %s' % sys.argv[0]) trf = getTransform() trf.parseCmdLineArgs(sys.argv[1:]) # Need to update what we want to execute after the command line is parsed # LHS is the slightly convoluted way to get at the single member of a set # (which, of course, itself has no index) list(trf._executors)[0].exe = trf.argdict['exe'].value trf.execute() trf.generateReport() msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode)) sys.exit(trf.exitCode)
def main(): if len(sys.argv) < 2: msg.error('No test argument was given') sys.exit(1) if 'DATAPATH' not in os.environ: msg.error( 'There is no DATAPATH to search along - is the release setup?') sys.exit(1) testScript = os.path.join('JobTransforms/test', sys.argv[1]) pathToTestScript = findFile(os.environ['DATAPATH'], testScript) if pathToTestScript is None: msg.error('Test {0} was not found along DATAPATH'.format(testScript)) sys.exit(1) msg.info('Found test {0} here: {1}'.format(sys.argv[1], pathToTestScript)) os.execv(pathToTestScript, sys.argv[1:])
def test_runEcho(self): cmd = ['Sleep_tf.py', '--dust', '10'] msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Now load metadata and test a few important values with open('jobReport.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True)
def test_runReco_tf(self): cmd = ['AtlasG4_tf.py', '--inputEvgenFile', sourceFile] cmd.extend(['--outputHITSFile', 'test.HITS.pool.root']) cmd.extend(['--geometryVersion', 'ATLAS-GEO-20-00-01']) cmd.extend(['--conditionsTag', 'OFLCOND-MC12-SIM-00']) cmd.extend(['--randomSeed', '10']) cmd.extend(['--skipEvents', '0']) cmd.extend(['--maxEvents', '2']) # 2 events - this is a slow job ## Event counting currently broken for multi-step transforms msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line.decode()) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Now load metadata and test a few important values with open('jobReport.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True) dataDict = pyJobReportToFileDict(md) # Change in SimuJobTransforms, but be compatible with type = hits and HITS dataKey = None if 'hits' in dataDict: dataKey = 'hits' elif 'HITS' in dataDict: dataKey = 'HITS' self.assertNotEqual(dataKey, None) self.assertEqual(dataDict[dataKey]['subFiles'][0]['nentries'], 2) self.assertEqual(dataDict[dataKey]['subFiles'][0]['geometry'], 'ATLAS-GEO-20-00-01') self.assertEqual( dataDict[dataKey]['subFiles'][0]['conditions_tag'], 'OFLCOND-MC12-SIM-00')
def test_runEcho(self): cmd = ['Echo_tf.py'] cmd.extend(['--testInt', '1234']) cmd.extend(['--testFloat', '-1.212']) cmd.extend(['--testIntList', '1,2,3,4,5,6']) cmd.extend(['--testSubstepList', 'all:juice', 'jane:apple', 'bob:orange', 'alice:pear']) cmd.extend(['--testSubstepInt', 'all:34', 'jane:1', 'bob:2', 'alice:-3']) cmd.extend(['--testSubstepBool', 'all:True', 'jane:false', 'bob:tRuE', 'alice:FaLse']) msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line.decode()) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Now load metadata and test a few important values with open('jobReport.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True)
class DBReleasetest(unittest.TestCase): # Standard setup using CVMFS def test_cvmfsStandard(self): cmd = ['Athena_tf.py', '--DBRelease', '23.3.1'] msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Setup using CVMFS 'current' def test_cvmfsCurrent(self): cmd = ['Athena_tf.py', '--DBRelease', 'current'] msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0) # Test using a DBRelease file which exists def test_tarball(self): try: os.symlink('/afs/cern.ch/work/g/graemes/ddm/ddo.000001.Atlas.Ideal.DBRelease.v220701/DBRelease-22.7.1.tar.gz', 'DBRelease-22.7.1.tar.gz') except (IOError, OSError) as (errno, errMsg): # Ignore file exists - if that happens the link was already there if errno == 17: pass else: raise cmd = ['Athena_tf.py', '--DBRelease', 'DBRelease-22.7.1.tar.gz'] msg.info('Will run this transform: {0}'.format(cmd)) p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) while p.poll() is None: line = p.stdout.readline() sys.stdout.write(line) # Hoover up remaining buffered output lines for line in p.stdout: sys.stdout.write(line) self.assertEqual(p.returncode, 0)
def main(): msg.info('This is %s' % sys.argv[0]) trf = getTransform() trf.parseCmdLineArgs(sys.argv[1:]) trf.execute() if 'outputFileValidation' in trf._argdict and trf._argdict['outputFileValidation'].value is False: msg.info('Skipping report generation') else: trf.generateReport() msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode)) sys.exit(trf.exitCode)
def main(): msg.info('This is %s' % sys.argv[0]) if sys.argv[1:] == []: msg.info("%s stopped at %s, no input parameters given" % (sys.argv[0], time.asctime())) trf = getTransform() trf.parseCmdLineArgs(sys.argv[1:]) trf.execute() trf.generateReport() msg.info("%s stopped at %s, tf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode)) sys.exit(trf.exitCode)
def getTransform(): exeSet = set() msg.info("Transform arguments %s" % sys.argv[1:]) if "--outputEVNTFile" in str(sys.argv[1:]): exeSet.add( EvgenExecutor(name="generate", skeleton="EvgenJobTransforms/skel.GENtoEVGEN.py", inData=["inNULL"], outData=["EVNT", "EVNT_Pre", "TXT"])) msg.info("Output EVNT file") elif "--outputTXTFile" in str(sys.argv[1:]): exeSet.add( EvgenExecutor(name="generate", skeleton="EvgenJobTransforms/skel.GENtoTXT.py", inData=["inNULL"], outData=["TXT"])) msg.info("Output TXT file") else: msg.error("Output cannot be recognised") exeSet.add( EvgenExecutor(name="afterburn", skeleton="EvgenJobTransforms/skel.ABtoEVGEN.py", inData=["EVNT_Pre"], outData=["EVNT"])) exeSet.add( athenaExecutor(name="AODtoDPD", skeletonFile="PATJobTransforms/skeleton.AODtoDPD_tf.py", substep="a2d", inData=["EVNT"], outData=["NTUP_TRUTH"], perfMonFile="ntuple_AODtoDPD.pmon.gz")) trf = transform(executor=exeSet) addAthenaArguments(trf.parser, maxEventsDefaultSubstep='all') addStdEvgenArgs(trf.parser) return trf