def split_data(files, robset, output_dir):
  events = eformat.istream(files)
  logging.info("Reading %d event(s) from file '%s' and dumping to %d files." % 
      (len(events), os.path.basename(files), len(robset)))

  # out will contain a stream to all possible ROB ids.
  out = {}
  for r in robset: 
    out[r] = eformat.ostream(directory=output_dir, core_name='0x%08X' % r)
    
  for ife in events:
    toc = {}
    for k in ife.toc(): toc[k.key()] = k.data()
    model = toc[toc.keys()[0]] #gets the first rob as a model for the others
    diff = robset.difference(toc.keys())
    for source_id in diff: toc[source_id] = None 
    for source_id, ostream in out.iteritems(): 
      ostream.write(surround_with_fullevent(toc[source_id], ife))
    sys.stdout.write('.')
    sys.stdout.flush()
      
  sys.stdout.write('\n')
  sys.stdout.flush()

  # we return a map with the list of files generated.
  retval = {}
  for rob_id, ostr in out.iteritems(): retval[rob_id] = ostr.last_filename()
  return retval
  def test02_WriteReadMany(self):

    # For this test we use a trick from the library.
    # We write each time the same event with changed
    # L1 identifiers only.
    
    out = eformat.ostream()
    
    event = eformat.write.FullEventFragment()
    event.lvl1_id(0x53)

    out.write(event)
    event.lvl1_id(0x7)
    out.write(event)
    event.lvl1_id(0x22)
    out.write(event)
    outfile = out.last_filename()
    del out
    
    input = eformat.istream(outfile)
    self.assertEqual(3, len(input))
    read = []
    for e in input: read.append(e)
    self.assertEqual(read[0].lvl1_id(), 0x53)
    self.assertEqual(read[1].lvl1_id(), 0x7)
    self.assertEqual(read[2].lvl1_id(), 0x22)
    self.assertEqual(read[0].checksum(), True)
    self.assertEqual(read[1].checksum(), True)
    self.assertEqual(read[2].checksum(), True)
    
    os.unlink(outfile)
Пример #3
0
def main():
    from optparse import OptionParser
    import os

    log.basicConfig(level=log.DEBUG, format='%(levelname)s %(message)s')

    parser = OptionParser(description=__doc__, usage='%prog infile outfile')

    (opt, args) = parser.parse_args()

    if len(args) != 2:
        parser.print_help()
        return 1

    bsfile = eformat.istream([args[0]])

    ostr = eformat.ostream()
    for event in bsfile:
        ro_event = modify(event)
        rw_event = eformat.write.FullEventFragment(ro_event)
        ostr.write(rw_event)

    if ostr:
        storage_filename = ostr.last_filename()
        del ostr  # makes sure we flush all buffers
        os.rename(storage_filename, args[1])

    return
Пример #4
0
  def test02_header_access(self):
    cpu_time_used = 0
    validation = 0
    events_read = 0
    robs_read = 0
    for e in eformat.istream([INPUT]):
      tmp = 0
      start = time.time()
      for r in e:
        tmp += r.rod_lvl1_id()
        robs_read += 1
      cpu_time_used += time.time() - start
      start = time.time()
      e.check_tree()
      validation += time.time() - start
      events_read += 1

    cpu_time_used *= 1000
    print " Statistics for (python) ROB header access:" 
    print " ------------------------------------------"
    print "  - Total reading time: %.2e millisecs" % cpu_time_used
    print "  - Reading time per ROB (%d): %.2e microsecs" % \
        (robs_read, 1000*cpu_time_used/robs_read)
    print "  - Validation per event (after header access): %2.e millisecs" % \
        (1000*validation/events_read)
Пример #5
0
def my_dump(bsfile):
    """Runs the dumping routines"""

    # open a file
    print "=" * 100
    print "Opening", bsfile

    events = {}

    input = eformat.istream(bsfile)
    subdet = eformat.helper.SubDetector.TDAQ_EVENT_FILTER if opt.ef else eformat.helper.SubDetector.TDAQ_LVL2

    for event in input:
        ev = (event.global_id(), event.lvl1_id())

        chains = {}
        for f in event.children():
            if f.source_id().subdetector_id() == subdet:
                res.load(f)
                chains_data = list(res.getChainResult())
                #nchains = chains_data[0]
                for c in chains_data[1:]:
                    chain = HLTChain(c)
                    chains[chain.getChainCounter()] = chain.isPrescaled()
                break

        events[ev] = chains

    output = file(opt.pickle, "w")
    cPickle.dump(events, output)
    output.close()
Пример #6
0
def form_rod():
  m=roses(0)
  print_roses(m)
  counter = 1
  #istr = eformat.istream('data10_7TeV.00167661.physics_Egamma.daq.RAW._lb0000._SFO-1._0001.data')
  #istr = eformat.istream('data11_cos.00178514.physics_CosmicCalo.daq.RAW._lb0026._SFO-5._0001.data')
  #istr = eformat.istream('data11_7TeV.00177682.physics_EnhancedBias.daq.RAW._lb0550._SFO-4._0001.data')
  istr = eformat.istream([
'data11_7TeV.00177682.physics_Egamma.daq.RAW._lb0566._SFO-11._0001.data',
])
  output = eformat.ostream()
  for e in istr:
    # take one rob to help build virtual robs
    newevent= eformat.write.FullEventFragment(e)
    
    for ros_id,list_robs_ids in m.iteritems():
      if True:
        payload=[0]
        DSPmode=0
        count_rob=0
        list_robs = e.children()
        arob = eformat.write.ROBFragment()
        found_any=False
        for rob in list_robs:
          if rob.source_id() in list_robs_ids:
            arob=rob
            found_any=True
            source_id = int(rob.source_id())
            [feb1_ff,ex1,ey1,ez1,feb2_ff,ex2,ey2,ez2,DSPfirmware] = extract_febinfo(rob)
            if ( DSPfirmware > 0 ) : DSPmode=DSPfirmware
            sume1=ex1+ey1;
            sume2=ex2+ey2;
            payload.append(feb1_ff)
            payload.append(ex1)
            payload.append(ey1)
            payload.append(ez1)
            payload.append(sume1)
            payload.append(feb2_ff)
            payload.append(ex2)
            payload.append(ey2)
            payload.append(ez2)
            payload.append(sume2)
            count_rob=count_rob+1
        payload[0] = (count_rob | 0xa<<16)
        if found_any:
           newrob=eformat.write.ROBFragment(arob)
           newrob.source_id(eformat.helper.SourceIdentifier(ros_id) )
           newrob.minor_version(12)
           newrob.rod_minor_version(12)

           newrob.rod_data(payload)
           newrob.status([])
           newevent.append(newrob)
    output.write(newevent)

    print 'show counter = ', counter
    if ( counter == -1 ):
         break
    counter = counter+1
 def test04_CanRetrieveSliceFields(self):
   input = eformat.istream(INPUT)
   for event in input:
     status = event.status()
     ef_info = event.event_filter_info()
     stream_tag = event.stream_tag()
     del event
     self.assertEqual(type(status), eformat.u32slice)
  def test01_WriteCompressedFile(self):
    ins = eformat.istream(INPUT)
    out = eformat.compstream(compression=eformat.EventStorage.CompressionType.ZLIB)
    for e in ins:
      out.write(e)

    fname = out.last_filename()
    del out
    os.rename(fname,OUTPUT)
 def test03_CanRetrieveSimpleFields(self):
   input = eformat.istream(INPUT)
   for event in input:
     lvl1_id = event.lvl1_id()
     checksum_type = event.checksum_type()
     header_size = event.header_size_word()
     self.assertEqual(type(event.source_id()), eformat.helper.SourceIdentifier)
     self.assertEqual(type(event.run_type()), eformat.helper.RunType)
     self.assertEqual(type(event.version()), eformat.helper.Version)
  def test04_WriteReadBrokenNormally(self):
  
    rodData = [1,2,3,4,5,6]
    rodStatus = [7,8,9,10,11,12,13,14,15]
    
    sid_rob = eformat.helper.SourceIdentifier(eformat.helper.SubDetector.TDAQ_LVL2, 0x1)

    rob = eformat.write.ROBFragment()
    rob.source_id(sid_rob)
    rob.rod_status(rodStatus)
    rob.rod_data(rodData)

    # now we do some screw-up on these data
    serial_rob = rob.__raw__()
    serial_rob[-1] = 0
    serial_rob[-2] = 0
    serial_rob[-3] = 0
    for k in range(len(serial_rob)):
      if serial_rob[k] == eformat.helper.HeaderMarker.ROD: serial_rob[k] = 0
    test_this = eformat.write.ROBFragment(eformat.ROBFragment(serial_rob))

    event = eformat.write.FullEventFragment()
    event.append(test_this) #appends unchecked ROB
    event.append(rob) #appends checked ROB
    event.checksum_type(eformat.helper.CheckSum.ADLER32)

    out = eformat.ostream()
    out.write(event)
    outfile = out.last_filename()
    del out #so we close the file

    input = eformat.istream(outfile)
    self.assertEqual(1, len(input))
    read = input[0] #there should be only one
    self.assertEqual(read.nchildren(), 2)
    self.assertEqual(len(read), len(event))
    self.assertEqual(read.checksum(), True)
    self.assertEqual(event, read)

    # truncated ROD shall not cause a failure
    self.assertEqual(read.check_tree_noex(), True)

    # make sure the ROB is still bad
    self.assertEqual(read[0].check_noex(), False) # bad ROB
    self.assertEqual(read[0].check_rod_noex(), False) # bad ROB
    self.assertEqual(read[0].check_rob_noex(), True) # bad ROB

    # retrieve ROB problems
    self.assertEqual(read[0].problems(),
        [eformat.helper.FragmentProblem.WRONG_ROD_MARKER, eformat.helper.FragmentProblem.WRONG_ROD_FRAGMENT_SIZE])

    # compare the damaged ROB and the one serialized word after word
    self.assertEqual(serial_rob, read[0].__raw__())
    
    os.unlink(outfile)
  def test03_WriteReadBroken(self):
  
    rodData = [1,2,3,4,5,6]
    rodStatus = [7,8,9,10,11,12,13,14,15]
    
    sid_rob = eformat.helper.SourceIdentifier(eformat.helper.SubDetector.TDAQ_LVL2, 0x1)

    rob = eformat.write.ROBFragment()
    rob.source_id(sid_rob)
    rob.rod_status(rodStatus)
    rob.rod_data(rodData)

    # now we do some screw-up on these data
    serial_rob = rob.__raw__()
    serial_rob[-1] = 0
    serial_rob[-2] = 0
    serial_rob[-3] = 0
    test_this = eformat.ROBFragment(serial_rob)

    # if it pass this point, we have a damaged ROB
    self.assertRaises(RuntimeError, test_this.check)
    self.assertRaises(RuntimeError, test_this.check_rod)
    self.assertEqual(test_this.check_rob(), True)

    event = eformat.write.FullEventFragment()
    event.append_unchecked(test_this) #appends unchecked ROB
    event.append(rob) #appends checked ROB
    event.checksum_type(eformat.helper.CheckSum.ADLER32)
    
    out = eformat.ostream()
    out.write(event)
    outfile = out.last_filename()
    del out #so we close the file

    input = eformat.istream(outfile)
    self.assertEqual(1, len(input))
    read = input[0] #there should be only one
    self.assertEqual(read.checksum(), True)
    self.assertEqual(event, read)

    #check_tree should not fail for a truncated ROD fragment
    self.assertEqual(read.check_tree(), True)
    self.assertEqual(read.check_tree_noex(), True)

    # make sure the ROB is still bad
    self.assertRaises(RuntimeError, read[1].check) # bad ROB
    self.assertEqual(read[1].check_noex(), False) # bad ROB
    self.assertRaises(RuntimeError, read[1].check_rod) # bad ROB
    self.assertEqual(read[1].check_rod_noex(), False) # bad ROB
    self.assertEqual(read[1].check_rob_noex(), True) # bad ROB
    
    # compare the damaged ROB and the one serialized word after word
    self.assertEqual(serial_rob, read[1].__raw__())
    
    os.unlink(outfile)
Пример #12
0
def dbgPostRun(inputFileList, outputFileList):
    msg.info('Running debug_stream analysis PostRun operations on files :{0} '.
             format(inputFileList))
    msg.info(
        'Running debug_stream analysis PostRun, histogram output in :{0} '.
        format(outputFileList))

    total = 0
    #open root output file
    out_file = outputFileList[0]
    hfile = TFile(out_file, 'UPDATE')
    #inicialize dbgEventInfo,  this is the main event analysis class
    eventInfo = dbgEventInfo("_Pos", inputFileList.value[0])
    data = []
    l1Info = []
    hltInfo = []
    relInfo = str()
    for inputFile in inputFileList.value:

        if not os.path.isfile(inputFile):
            msg.error('No BS file created with file name :{0} '.format(
                inputFileList))
            continue
        bsfile = eformat.istream(inputFile)
        events = len(bsfile)
        total += events
        n = 0
        isFirstEvent = True

        for event in bsfile:
            # if fist event get l1 and hlt counter and chain info from DB or XML file
            if isFirstEvent:
                #e = bsfile[0]
                l1Info, hltInfo, relInfo = TriggerDBInfo(event.run_no())
                isFirstEvent = False
            n += 1
            if n < 5:
                data = [
                    event.run_no(),
                    event.lumi_block(),
                    event.global_id(),
                    event.lvl1_id(),
                    event.bc_time_seconds(),
                    event.bc_time_nanoseconds()
                ]
                msg.info('Event details :{0}'.format(data))
            #Run debug event analysis and fill output TTree
            eventInfo.event_count(event)
            eventInfo.event_info(event, l1Info, hltInfo)
            eventInfo.fillTree()
    #close output TFile
    hfile.Write()
    hfile.Close()
    msg.info('Finished running debug_stream analysis PostRun operations')
Пример #13
0
def dbgPreRun(inputFileList,outputFileList):
     
    msg.info('Running debug_stream analysis PreRun operations on files :{0} '.format(inputFileList))
    msg.info('Running debug_stream analysis PreRun, histogram output in :{0} '.format(outputFileList))
     
    total = 0
    #open root output file
    out_file = outputFileList[0]
    hfile = TFile( out_file , 'RECREATE' )
    #inicialize dbgEventInfo,  this is the main event analysis class
    eventInfo = dbgEventInfo("_Pre",inputFileList.value[0])
    data = []
    l1Info = []
    hltInfo = []
    relInfo = str()
    runInfo = 0
    for inputFile in inputFileList.value:
        
        bsfile = eformat.istream(inputFile)
        events = len(bsfile)
        total += events
        n = 0 
        isFirstEvent = True

        for event in bsfile:
            # if fist event get l1 and hlt counter and chain info from DB or XML file
            if isFirstEvent:
                if event.run_no() == 0 : runInfo = int(inputFile.split(".")[1])
                else : runInfo =  event.run_no() 
                #e = bsfile[0] 
                l1Info, hltInfo, relInfo = TriggerDBInfo(runInfo)        
                isFirstEvent = False
                #runInfo.append(relInfo)  
            n += 1
            if n < 5:
                data = [event.run_no(),event.lumi_block(),event.global_id(),event.lvl1_id(),event.bc_time_seconds(),event.bc_time_nanoseconds()]
                msg.info('Event details :{0}'.format(data))
            #Run debug event analysis and fill output TTree
            eventInfo.event_count(event)
            eventInfo.event_info(event, l1Info, hltInfo)
            eventInfo.fillTree()
    #close output TFile
    hfile.Write()
    hfile.Close() 
    #Release format should be good, if relInfo is 'uknown' then print this error 
    if not re.match(r'(\d+\.{0,1})+$',relInfo):
        msg.error('Not able to find release from DB (or it was badly formatted), release : %s' % relInfo )
        msg.error('Problem with DB configuration in COOL DB, most likely during data-taking' )
        
    msg.info('Finished running debug_stream analysis PreRun operations')     
    #returns the local asetupString from runs in input files and to be used by asetup 
    return getAsetupString(relInfo)
Пример #14
0
def ReplaceMUCTPI(input_file, output_file):
    input = eformat.istream([input_file])
    dr = EventStorage.pickDataReader(input_file)
    output = eformat.ostream(core_name="subset",
                             run_number=dr.runNumber(),
                             trigger_type=dr.triggerType(),
                             detector_mask=dr.detectorMask(),
                             beam_type=dr.beamType(),
                             beam_energy=dr.beamEnergy())
    for event in input:
        output.write(modify(event))
    tmp_file_name = output.last_filename()
    del output
    os.rename(tmp_file_name, output_file)
Пример #15
0
def my_conf(argv):
  """Runs the merging routines"""

  import eformat, logging 
  import EventApps.myopt as myopt

  option = {}
  option['output'] = {'short': 'o', 'arg': True,
                      'default': '',
                      'description': 'Filename of the output file'}
  option['verbosity'] = {'short': 'V', 'arg': True,
                         'default': logging.INFO,
                         'description': 'From which level to print system messag es [%d, %d]. For details please consult the documentation of python\'s "logging" module' % (logging.NOTSET, logging.CRITICAL)}
                                                       
  parser = myopt.Parser(extra_args=True)
  for (k,v) in option.items():
    parser.add_option(k, v['short'], v['description'], v['arg'], v['default'])
  
  if len(sys.argv) == 1:
    print parser.usage('global "%s" options:' % sys.argv[0])
    sys.exit(1)

  #process the global options
  (kwargs, extra) = parser.parse(sys.argv[1:], prefix='global "%s" options:' % sys.argv[0])

  #now the things which require global defaults
  logging.getLogger('').setLevel(kwargs['verbosity'])

  stream = eformat.istream(extra)
  ostream = eformat.ostream()

  total = 0
  for e in stream: 
    ostream.write(e)
    sys.stdout.write('.')
    sys.stdout.flush()
    total += 1

  sys.stdout.write('\n')
  sys.stdout.flush()

  oname = ostream.last_filename()
  if len(kwargs['output']) != 0:
    del ostream
    os.rename(oname, kwargs['output'])
    oname = kwargs['output']

  logging.info('Wrote %d events in %s\n' % (total, oname))
  sys.exit(0)
  def test01_WriteReadOne(self):
  
    rodData = [1,2,3,4,5,6]
    rodStatus = [7,8,9,10,11,12,13,14,15]
    lvl2Info = [16, 17]
    efInfo = [18]
    status = [19,20,21]
    stream = []
    
    for i in range(5):
      a = eformat.helper.StreamTag()
      a.name = 'Name-%d' % i
      a.type = 'calibration'
      a.obeys_lumiblock = bool(i % 3)
      if i==3:
        a.robs.append(0xff)
      if i==5:
        a.dets.append(eformat.helper.SubDetector.TDAQ_BEAM_CRATE)
      
      stream.append(a)

    sid_rob = eformat.helper.SourceIdentifier(eformat.helper.SubDetector.TDAQ_LVL2, 0x1)

    rob = eformat.write.ROBFragment()
    rob.source_id(sid_rob)
    rob.rod_status(rodStatus)
    rob.rod_data(rodData)

    event = eformat.write.FullEventFragment()
    event.append(rob)
    event.lvl2_trigger_info(lvl2Info)
    event.event_filter_info(efInfo)
    event.status(status)
    event.stream_tag(stream)
    event.checksum_type(eformat.helper.CheckSum.ADLER32)
    
    out = eformat.ostream()
    out.write(event)
    outfile = out.last_filename()
    del out #so we close the file

    input = eformat.istream(outfile)
    self.assertEqual(1, len(input))
    read = input[0] #there should be only one
    self.assertEqual(event, read)
    self.assertEqual(read.checksum(), True)
    
    os.unlink(outfile)
def my_dump(argv):
  """Runs the dumping routines"""

  import EventApps.myopt as myopt
  import eformat, logging
  import eformat.dump

  option = {}
  option['start-event'] = {'short': 'a', 'arg': True,
                           'default': 0,
                           'description': 'The number of events I should skip from the begin'}
  option['number-of-events'] = {'short': 'n', 'arg': True,
                               'default': 0,
                               'description': 'The number of events to dump/analyze (0 means all)'}
  option['output'] = {'short': 'o', 'arg': True,
                      'default': '',
                      'description': 'The output file to use'}
  option['verbosity'] = {'short': 'v', 'arg': True,
                         'default': 2,
                         'description': 'Up to which level to dump (0, 1, 2)'}
  option['debug'] = {'short': 'd', 'arg': True,
                     'default': logging.INFO,
                     'description': 'Up to which level to print debug messages (0, 1, 2, 3)'}
  
  parser = myopt.Parser(extra_args=True)
  for (k,v) in option.items():
    parser.add_option(k, v['short'], v['description'], v['arg'], v['default'])
  
  if len(sys.argv) == 1:
    print parser.usage('global "%s" options:' % sys.argv[0])
    sys.exit(1)

  #process the global options
  (kwargs, extra) = parser.parse(sys.argv[1:], prefix='global "%s" options:' % sys.argv[0])

  #now the things which require global defaults
  logging.getLogger('').setLevel(kwargs['debug'])
  #os.environ['TDAQ_ERS_DEBUG_LEVEL'] = str(kwargs['debug'])

  stream = eformat.istream(extra)
  
  if kwargs['verbosity'] > 0:
    eformat.dump.event_callback.append(('.+', eformat.dump.fullevent_handler))
  if kwargs['verbosity'] > 1:
    eformat.dump.rob_callback.append(('.+', eformat.dump.rob_handler))

  eformat.dump.dump(stream, kwargs['start-event'], kwargs['number-of-events'])
  sys.exit(0)
def calculate_robset(files):
  """Given a list of input files, generate a python set of ROB identifiers.

  The generated list is like a logical union of all available ROB identifiers
  on all files.
  
  keyword parameters:
  files -- A python list of file names
  """
  
  retval = set() 
  for ife in eformat.istream(files):
    retval = retval.union([k.key() for k in ife.toc()])
    sys.stdout.write('.')
    sys.stdout.flush()
  sys.stdout.write('\n') 
  return retval
Пример #19
0
def my_conf(argv):
  """Runs the checking routines"""

  import eformat, logging
  import EventApps.myopt as myopt

  option = {}
  option['verbosity'] = {'short': 'V', 'arg': True,
                         'default': logging.INFO,
                         'description': 'From which level to print system messag es [%d, %d]. For details please consult the documentation of python\'s "logging" module' % (logging.NOTSET, logging.CRITICAL)}
  
  parser = myopt.Parser(extra_args=True)
  for (k,v) in option.items():
    parser.add_option(k, v['short'], v['description'], v['arg'], v['default'])
  
  if len(sys.argv) == 1:
    print parser.usage('global "%s" options:' % sys.argv[0])
    sys.exit(1)

  #process the global options
  (kwargs, extra) = parser.parse(sys.argv[1:], prefix='global "%s" options:' % sys.argv[0])

  #now the things which require global defaults
  logging.getLogger('').setLevel(kwargs['verbosity'])

  stream = eformat.istream(extra)

  #reading will check the events in the stream...
  logging.info('Checking %d fragment(s)' % len(stream))
  for e in stream:
    if not e.check_noex():
      logging.warn("Skipping event %d because it is *invalid*: %s" % \
          (e.lvl1_id(), [str(k) for k in e.problems()]))
      continue
    sys.stdout.write('.')
    sys.stdout.flush()
    for rob in e:
      if not rob.check_noex():
        logging.warn("rob 0x%08x (%s) of event %d is *invalid*: %s" % \
          (rob.source_id().code(), rob.source_id(), e.lvl1_id(),
            [str(k) for k in rob.problems()]))
      if not rob.checksum(): 
        logging.warn("rob 0x%08x (%s) of event %d does not checksum" % \
          (rob.source_id().code(),rob.source_id(),e.lvl1_id()))
  sys.stdout.write('\n')
Пример #20
0
def dump_events_with_long_roi_frag():
    import sys
    import eformat
    import libpyevent_storage as EventStorage
    fname='data15_cos.00262656.debug_DcmL1IdMismatchError.daq.RAW._lb0000._SFO-1._0001.data'
    streamT = eformat.istream(fname)
    sid=eformat.helper.SourceIdentifier(0x910081)
    events=[ (eventT.header(),eventT.payload()) for eventT in streamT if len((eventT.find(sid)).rod_data())==14 ]
    
    f=open('evdump.txt','w')
    f.write('header:\n')
    for w in events[0][0]:
        f.write('0x%08x\n' % w)
        
    f.write('payload:\n')
    for w in events[0][1]:
        f.write('0x%08x\n' % w)
            
    f.close()
Пример #21
0
 def test01_data_read(self):
   cpu_time_used = 0
   elements_read = 0
   robs_read = 0
   for e in eformat.istream([INPUT]):
     tmp = 0
     start = time.time()
     for r in e:
       data = r.rod_data()
       tmp += sum(data)
       elements_read += len(data)
       robs_read += 1
     cpu_time_used += time.time() - start
   print " Statistics for (python) ROB data access:" 
   print " ----------------------------------------"
   print "  - Total reading time: %.2e seconds" % cpu_time_used
   print "  - Reading time per ROB (%d): %.2e millisecs" % \
       (robs_read, 1000*cpu_time_used/robs_read)
   print "  - Reading time per data word in a ROB (%d): %.2e microsecs" % \
       (elements_read, 1000000*cpu_time_used/elements_read)
Пример #22
0
def dump_info(bsfile, args):
    log.info('Opening %s', bsfile)
    input = eformat.istream(bsfile)
    offset = args.skip if args.skip else 0
    max_events = min(args.events, len(input)) if args.events else len(input)
    event_count = 0
    events = []

    # Loop over events
    for event in input:
        event_count += 1
        if event_count <= offset:
            continue
        if event_count > offset+max_events:
            break
        events.append(event)

        # Print header info
        print('{sep:s} Event: {:{width}d}, {:s} {sep:s}'.format(
              event_count, header_info(event),
              sep='='*20, width=len(str(max_events))))

        # Print L1/L2/HLT bits
        if args.l1:
            print(lvl1_bits(event))
        if args.l2:
            print(hlt_bits(event, l2=True))
        if args.ef:
            print(hlt_bits(event))

        # Print Stream Tags
        if args.stag:
            print(stream_tags(event))

        # HLT Result
        if args.efres or args.sizes:
            print(hlt_result(event, args.sizes))

    # Size summary (after the loop over events)
    if args.sizeSummary:
        print(size_summary(events))
Пример #23
0
def main():
    import argparse

    parser = argparse.ArgumentParser(description=__doc__)

    parser.add_argument('file', metavar='FILE', nargs=1, help='input file')
    parser.add_argument('-o',
                        '--output',
                        type=str,
                        help='output base file name')
    parser.add_argument('-n',
                        '--events',
                        type=int,
                        default=-1,
                        help='number of events to process')
    parser.add_argument('-r', '--runNumber', type=int, help='set run number')
    parser.add_argument(
        '-l',
        '--eventsPerLB',
        type=int,
        help='increment lumiblock number in steps [%(default)s]')
    parser.add_argument('--firstLB',
                        type=int,
                        default=1,
                        help='first lumiblock number [%(default)s]')
    parser.add_argument(
        '--incLB',
        type=int,
        default=1,
        help='increment steps for lumiblock number [%(default)s]')
    parser.add_argument('-t',
                        '--timestamp',
                        type=int,
                        help='set timestamp in seconds [%(default)s]')
    parser.add_argument('--removeRobs',
                        metavar='PATTERN',
                        type=str,
                        help='regex for removing specific ROB IDs')

    args = parser.parse_args()

    Config.firstLB = Store.currentLB = args.firstLB
    Config.incLB = args.incLB
    Config.eventsPerLB = args.eventsPerLB
    Config.runNumber = args.runNumber
    Config.bc_sec = args.timestamp

    log.info('Opening file %s', args.file[0])
    dr = eformat.EventStorage.pickDataReader(args.file[0])
    ostr = eformat.ostream(core_name=args.output or dr.fileNameCore(),
                           run_number=Config.runNumber or dr.runNumber(),
                           trigger_type=dr.triggerType(),
                           detector_mask=dr.detectorMask(),
                           beam_type=dr.beamType(),
                           beam_energy=dr.beamEnergy())

    bsfile = eformat.istream([args.file[0]])
    i = 0
    for event in bsfile:
        i += 1
        if args.events > 0 and i > args.events:
            break
        ro_event = modify(event)
        if args.removeRobs:
            rw_event = eformat.write.FullEventFragment(
                ro_event, re.compile(args.removeRobs))
        else:
            rw_event = eformat.write.FullEventFragment(ro_event)
        ostr.write(rw_event)

    return
Пример #24
0
    def _process_bs_file (self, fname, evtmax=1, full_details=True):
        msg = self.msg()
        import eformat as ef

        data_reader = ef.EventStorage.pickDataReader(fname)
        assert data_reader, \
               'problem picking a data reader for file [%s]'%fname

        beam_type   = '<beam-type N/A>'
        try:
            beam_type = data_reader.beamType()
        except Exception:
            msg.warning ("problem while extracting beam-type information")
            pass

        beam_energy = '<beam-energy N/A>'
        try:
            beam_energy = data_reader.beamEnergy()
        except Exception:
            msg.warning ("problem while extracting beam-type information")
            pass

        bs = ef.istream(fname)

        file_infos = _create_file_infos()
        nentries = bs.total_events
        file_infos['nentries'] = nentries
        import uuid
        def _uuid():
            return str(uuid.uuid4()).upper()
        bs_metadata = {}
        for md in data_reader.freeMetaDataStrings():
            if md.startswith('Event type:'):
                k = 'evt_type'
                v = []
                if 'is sim' in md:   v.append('IS_SIMULATION')
                else:                v.append('IS_DATA')
                if 'is atlas' in md: v.append('IS_ATLAS')
                else:                v.append('IS_TESTBEAM')
                if 'is physics' in md: v.append('IS_PHYSICS')
                else:                  v.append('IS_CALIBRATION')
                bs_metadata[k] = tuple(v)
            elif md.startswith('GeoAtlas:'):
                k = 'geometry'
                v = md.split('GeoAtlas:')[1].strip()
                bs_metadata[k] = v
            elif md.startswith('IOVDbGlobalTag:'):
                k = 'conditions_tag'
                v = md.split('IOVDbGlobalTag:')[1].strip()
                bs_metadata[k] = v
            elif '=' in md:
                k,v = md.split('=')
                bs_metadata[k] = v

        # for bwd/fwd compat...
        # see: https://savannah.cern.ch/bugs/?73208
        for key_name,fct_name in (
            ('GUID','GUID'),
            ('Stream','stream'),
            ('Project', 'projectTag'),
            ('LumiBlock', 'lumiblockNumber'),
            ('run_number', 'runNumber'),
            ):
            if key_name in bs_metadata:
                # no need: already in bs metadata dict
                continue
            if hasattr(data_reader, fct_name):
                v = getattr(data_reader, fct_name)()
                bs_metadata[key_name] = v
        # for bwd/fwd compat... -- END
            
        # fix for ATEAM-122
        if len(bs_metadata.get('evt_type','')) == 0 : # see: ATMETADATA-6
            evt_type = ['IS_DATA', 'IS_ATLAS']
            if   bs_metadata.get('Stream', '').startswith('physics_'):
                evt_type.append('IS_PHYSICS')
            elif bs_metadata.get('Stream', '').startswith('calibration_'):
                evt_type.append('IS_CALIBRATION')
            elif bs_metadata.get('Project', '').endswith('_calib'):        
                evt_type.append('IS_CALIBRATION')
            else:
                evt_type.append('Unknown')
            bs_metadata['evt_type'] = evt_type

        file_infos['file_guid'] = bs_metadata.get('GUID', _uuid())
        file_infos['evt_type']  = bs_metadata.get('evt_type', [])
        file_infos['geometry']  = bs_metadata.get('geometry', None)
        file_infos['conditions_tag'] = bs_metadata.get('conditions_tag', None)
        file_infos['bs_metadata'] = bs_metadata

        if not data_reader.good():
            # event-less file...
            file_infos['run_number'].append(bs_metadata.get('run_number', 0))
            file_infos['lumi_block'].append(bs_metadata.get('LumiBlock', 0))
            # FIXME: not sure how to do that...
            return file_infos
        
        if evtmax == -1:
            evtmax = nentries
            
        ievt = iter(bs)
        for i in range(evtmax):
            try:
                evt = next(ievt)
                evt.check() # may raise a RuntimeError
                stream_tags = [dict(stream_type=tag.type,
                                    stream_name=tag.name,
                                    obeys_lbk=bool(tag.obeys_lumiblock))
                               for tag in evt.stream_tag()]
                file_infos['run_number'].append(evt.run_no())
                file_infos['evt_number'].append(evt.global_id())
                file_infos['lumi_block'].append(evt.lumi_block())
                file_infos['run_type'].append(ef.helper.run_type2string(evt.run_type()))
                file_infos['beam_type'].append(beam_type)
                file_infos['beam_energy'].append(beam_energy)
                file_infos['stream_tags'].extend(stream_tags)

            except RuntimeError as err:
                print ("** WARNING ** detected a corrupted bs-file:\n",err)
        """
        detailed dump how-to:
        ---------------------
        import eformat as ef
        import eformat.dump as edump
        edump.event_callback.append (('.+', edump.fullevent_handler))
        edump.dump (stream=ef.istream(fname), skip=0, total=0)
        """
        return file_infos
Пример #25
0
def main():
    parser = argparse.ArgumentParser(description=__doc__)

    parser.add_argument('-f',
                        '--file',
                        metavar='FILE',
                        nargs='*',
                        default=[],
                        help='local file or castor path')

    parser.add_argument('-g',
                        '--globalid',
                        type=int,
                        action='store',
                        nargs='*',
                        help='Global event ID')

    parser.add_argument('-l',
                        '--lvl1id',
                        type=int,
                        action='store',
                        nargs='*',
                        help='LVL1 ID')

    parser.add_argument('-t',
                        '--time',
                        action=StoreTime,
                        nargs='*',
                        help='Nanosecond time stamp (seconds:nanoseconds)')

    parser.add_argument('-d',
                        '--debug',
                        type=int,
                        action='store',
                        metavar='RUN',
                        help='Find event in debug streams of RUN')

    parser.add_argument('-s',
                        '--save',
                        metavar='OUTFILE',
                        nargs='?',
                        action='store',
                        const='trigbs_findevent',
                        help='Save selected events in OUTFILE')

    parser.add_argument('--debugPath',
                        action='store',
                        default='/castor/cern.ch/grid/atlas/DAQ/2012',
                        help='Path to debug stream events %(default)s')

    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Be verbose')

    args = parser.parse_args()

    files = []
    for f in args.file:
        if f.find('castor') != -1: files += nsls(f)
        else: files += [f]

    if args.debug != None:
        # Set/reset castor environment for debug stream access
        stage_host = os.environ.get('STAGE_HOST', None)
        stage_svcclass = os.environ.get('STAGE_SVCCLASS', None)
        os.environ['STAGE_HOST'] = 'castoratlas'
        os.environ['STAGE_SVCCLASS'] = 'atlcal'
        debug_dirs = nsls(
            os.path.join(args.debugPath, '%08d' % args.debug, 'debug_*'))
        for d in debug_dirs:
            files += nsls(os.path.join(d, '*.data'))

        if stage_host: os.environ['STAGE_HOST'] = stage_host
        if stage_svcclass: os.environ['STAGE_SVCCLASS'] = stage_svcclass

    ofs = None
    if args.save != None:
        ofs = eformat.ostream(core_name=args.save)

    for f in files:
        ifs = eformat.istream(f)
        if args.verbose == True: print '==%s' % f
        for e in ifs:
            found = True
            if ofs: ofs.write(e)
            if args.globalid != None and e.global_id() not in args.globalid:
                found = False
            if args.lvl1id != None and e.lvl1_id() not in args.lvl1id:
                found = False
            if args.time != None and (
                    e.bc_time_seconds(),
                    e.bc_time_nanoseconds()) not in args.time:
                found = False
            if found:
                print f, fmtEvent(e, args.time != None)
Пример #26
0
#!/usr/bin/env tdaq_python
# $Id: reading.py 44327 2008-01-22 13:00:34Z rabello $
# Created by Andre DOS ANJOS <*****@*****.**>, 03-Aug-2006

import time
import sys
import eformat

if len(sys.argv) == 1:
    print "usage: %s <data-file> [+data-file]" % sys.argv[0]
    sys.exit(1)
    
stream = eformat.istream(sys.argv[1:])
print 'The total number of events available is %d' % len(stream)

volume = 0
start = time.time()
rob_list = {} #a dictionary is more efficient!
start = time.time()
for event in stream:
  volume += len(event)
  for subdetector in event:
    #print s.source_id().human_detector()
    for ros in subdetector:
      for rob in ros:
        rob_list[rob.source_id().code()] = 1

rob_list = rob_list.keys()
total_time = time.time() - start
print 'Total processing time was %f seconds' % total_time
print 'Total event data is %.2f Mb' % ((float(volume) * 4)/(1024*1024))
Пример #27
0
      data = rob.rod_data()
      newdata=reducedLARFEB(data)
      if len(data)>data[0]:
        newdata+=[data[data[0]+ii] for ii in range(7)] #middle "ROD" header
        data=data[data[0]+7:]
        newdata+=reducedLARFEB(data)
      newrob.rod_data(newdata)
    new_event.append(newrob)
  return new_event.readonly()

if __name__ == "__main__":
  if len(sys.argv)!=3:
    print('usage: %s <infile> <outfile>' % sys.argv[0])
    sys.exit(1)
  input_file = sys.argv[1]
  output_file = sys.argv[2]

  input = eformat.istream([input_file])
  dr=EventStorage.pickDataReader(input_file)
  output = eformat.ostream(core_name="subset",
                           run_number=dr.runNumber(),
                           trigger_type=dr.triggerType(),
                           detector_mask=dr.detectorMask(),
                           beam_type=dr.beamType(),
                           beam_energy=dr.beamEnergy())
  for event in input:
    output.write(modify(event))
  tmp_file_name = output.last_filename()
  del output
  os.rename(tmp_file_name,output_file)
  def test02_ReadCompressedFile(self):
    ins = eformat.istream(OUTPUT)

    for e in ins:
      _ = len(e)
Пример #29
0
def read_metadata(filenames,
                  file_type=None,
                  mode='lite',
                  promote=None,
                  meta_key_filter=[],
                  unique_tag_info_values=True):
    """
    This tool is independent of Athena framework and returns the metadata from a given file.
    :param filenames: the input file from which metadata needs to be extracted.
    :param file_type: the type of file. POOL or BS (bytestream: RAW, DRAW) files.
    :param mode: if true, will return all metadata associated with the filename. By default, is false and this will
    return a "tiny" version which have only the following keys: 'file_guid', 'file_size', 'file_type', 'nentries'.
    :return: a dictionary of metadata for the given input file.
    """

    from RootUtils import PyROOTFixes  # noqa F401

    # Check if the input is a file or a list of files.
    if isinstance(filenames, str):
        filenames = [filenames]

    # Check if file_type is an allowed value
    if file_type is not None:
        if file_type not in ('POOL', 'BS'):
            raise NameError(
                'Allowed values for \'file_type\' parameter are: "POOL" or "BS": you provided "'
                + file_type + '"')
        else:
            msg.info('Forced file_type: {0}'.format(file_type))

    # Check the value of mode parameter
    if mode not in ('tiny', 'lite', 'full', 'peeker'):
        raise NameError(
            'Allowed values for "mode" parameter are: "tiny", "lite", "peeker" or "full"'
        )
    msg.info('Current mode used: {0}'.format(mode))
    msg.info('Current filenames: {0}'.format(filenames))

    if mode != 'full' and len(meta_key_filter) > 0:
        raise NameError(
            'It is possible to use the meta_key_filter option only for full mode'
        )
    if meta_key_filter:
        msg.info('Filter used: {0}'.format(meta_key_filter))

    # create the storage object for metadata.
    meta_dict = {}

    # ----- retrieve metadata from all filename or filenames --------------------------------------------------------#
    for filename in filenames:
        meta_dict[filename] = {}
        current_file_type = None
        # Determine the file_type of the input and store this information into meta_dict
        if not file_type:
            if os.path.isfile(filename):
                with open(filename, 'rb') as binary_file:
                    magic_file = binary_file.read(4)

                    if magic_file == 'root' or magic_file == b'root':
                        current_file_type = 'POOL'
                        meta_dict[filename]['file_type'] = 'POOL'

                    else:
                        current_file_type = 'BS'
                        meta_dict[filename]['file_type'] = 'BS'

                    # add information about the file_size of the input filename
                    meta_dict[filename]['file_size'] = os.path.getsize(
                        filename)

            # determine the file type for the remote input files
            else:
                if regex_BS_files.match(filename):
                    current_file_type = 'BS'
                    meta_dict[filename]['file_type'] = 'BS'
                else:
                    current_file_type = 'POOL'
                    meta_dict[filename]['file_type'] = 'POOL'

                # add information about the file_size of the input filename
                meta_dict[filename][
                    'file_size'] = None  # None -> we can't read the file size for a remote file

        else:
            current_file_type = file_type

        # ----- retrieves metadata from POOL files ------------------------------------------------------------------#
        if current_file_type == 'POOL':
            import ROOT
            # open the file using ROOT.TFile
            current_file = ROOT.TFile.Open(_get_pfn(filename))

            # open the tree 'POOLContainer' to read the number of entries
            if current_file.GetListOfKeys().Contains('POOLContainer'):
                meta_dict[filename]['nentries'] = current_file.Get(
                    'POOLContainer').GetEntriesFast()
            else:
                meta_dict[filename]['nentries'] = None

            # open the tree 'CollectionTree' to read auto flush
            if current_file.GetListOfKeys().Contains('CollectionTree'):
                meta_dict[filename]['auto_flush'] = current_file.Get(
                    'CollectionTree').GetAutoFlush()

            # read and add the 'GUID' value
            meta_dict[filename]['file_guid'] = _read_guid(filename)

            # read and add compression level and algorithm
            meta_dict[filename][
                'file_comp_alg'] = current_file.GetCompressionAlgorithm()
            meta_dict[filename][
                'file_comp_level'] = current_file.GetCompressionLevel()

            # ----- read extra metadata required for 'lite' and 'full' modes ----------------------------------------#
            if mode != 'tiny':
                # selecting from all tree the only one which contains metadata, respectively "MetaData"
                metadata_tree = current_file.Get('MetaData')
                # read all list of branches stored in "MetaData" tree
                metadata_branches = metadata_tree.GetListOfBranches()
                nr_of_branches = metadata_branches.GetEntriesFast()

                # object to store the names of metadata containers and their corresponding class name.
                meta_dict[filename]['metadata_items'] = {}

                # create a container for the list of filters used for the lite version
                meta_filter = {}

                # set the filters for name
                if mode == 'lite':
                    meta_filter = {
                        '/TagInfo': 'IOVMetaDataContainer_p1',
                        'IOVMetaDataContainer_p1__TagInfo':
                        'IOVMetaDataContainer_p1',
                        '*': 'EventStreamInfo_p*'
                    }

                # set the filters for name
                if mode == 'peeker':
                    meta_filter = {
                        '/TagInfo': 'IOVMetaDataContainer_p1',
                        'IOVMetaDataContainer_p1__TagInfo':
                        'IOVMetaDataContainer_p1',
                        '/Simulation/Parameters': 'IOVMetaDataContainer_p1',
                        '/Digitization/Parameters': 'IOVMetaDataContainer_p1',
                        '/EXT/DCS/MAGNETS/SENSORDATA':
                        'IOVMetaDataContainer_p1',
                        'TriggerMenu': 'DataVector<xAOD::TriggerMenu_v1>',
                        'TriggerMenuAux.': 'xAOD::TriggerMenuAuxContainer_v1',
                        '*': 'EventStreamInfo_p*'
                    }

                if mode == 'full' and meta_key_filter:
                    meta_filter = {f: '*' for f in meta_key_filter}
                # store all persistent classes for metadata container existing in a POOL/ROOT file.
                persistent_instances = {}

                for i in range(0, nr_of_branches):
                    branch = metadata_branches.At(i)
                    name = branch.GetName()

                    class_name = branch.GetClassName()

                    if regexIOVMetaDataContainer.match(class_name):
                        name = name.replace('IOVMetaDataContainer_p1_',
                                            '').replace('_', '/')

                    if regexIOVMetaDataContainer.match(class_name):
                        meta_dict[filename]['metadata_items'][
                            name] = 'IOVMetaDataContainer'
                    elif regexByteStreamMetadataContainer.match(class_name):
                        meta_dict[filename]['metadata_items'][
                            name] = 'ByteStreamMetadataContainer'
                    elif regexEventStreamInfo.match(class_name):
                        meta_dict[filename]['metadata_items'][
                            name] = 'EventStreamInfo'
                    else:
                        meta_dict[filename]['metadata_items'][
                            name] = class_name

                    if len(meta_filter) > 0:
                        keep = False
                        for filter_key, filter_class in meta_filter.items():
                            if (filter_key.replace('/', '_') == name.replace(
                                    '/', '_')
                                    or filter_key == '*') and fnmatchcase(
                                        class_name, filter_class):
                                keep = True
                                break

                        if not keep:
                            continue

                    # assign the corresponding persistent class based of the name of the metadata container
                    if regexEventStreamInfo.match(class_name):
                        if class_name.endswith('_p1'):
                            persistent_instances[
                                name] = ROOT.EventStreamInfo_p1()
                        elif class_name.endswith('_p2'):
                            persistent_instances[
                                name] = ROOT.EventStreamInfo_p2()
                        else:
                            persistent_instances[
                                name] = ROOT.EventStreamInfo_p3()
                    elif regexIOVMetaDataContainer.match(class_name):
                        persistent_instances[
                            name] = ROOT.IOVMetaDataContainer_p1()
                    elif regexXAODEventFormat.match(class_name):
                        persistent_instances[name] = ROOT.xAOD.EventFormat_v1()
                    elif regexXAODTriggerMenu.match(class_name):
                        persistent_instances[
                            name] = ROOT.xAOD.TriggerMenuContainer_v1()
                    elif regexXAODTriggerMenuAux.match(class_name):
                        persistent_instances[
                            name] = ROOT.xAOD.TriggerMenuAuxContainer_v1()

                    if name in persistent_instances:
                        branch.SetAddress(
                            ROOT.AddressOf(persistent_instances[name]))

                metadata_tree.GetEntry(0)

                # clean the meta-dict if the meta_key_filter flag is used, to return only the key of interest
                if meta_key_filter:
                    meta_dict[filename] = {}

                # read the metadata
                for name, content in persistent_instances.items():
                    key = name

                    if hasattr(content, 'm_folderName'):
                        key = getattr(content, 'm_folderName')

                    aux = None
                    if key == 'TriggerMenu' and 'TriggerMenuAux.' in persistent_instances:
                        aux = persistent_instances['TriggerMenuAux.']
                    elif key == 'TriggerMenuAux.':
                        continue

                    meta_dict[filename][key] = _convert_value(content, aux)

            # This is a required workaround which will temporarily be fixing ATEAM-560 originated from  ATEAM-531
            # ATEAM-560: https://its.cern.ch/jira/browse/ATEAM-560
            # ATEAM-531: https://its.cern.ch/jira/browse/ATEAM-531
            # This changes will remove all duplicates values presented in some files due
            # to the improper merging of two IOVMetaDataContainers.
            if unique_tag_info_values:
                msg.info(
                    'MetaReader is called with the parameter "unique_tag_info_values" set to True. '
                    'This is a workaround to remove all duplicate values from "/TagInfo" key'
                )
                if '/TagInfo' in meta_dict[filename]:
                    for key, value in meta_dict[filename]['/TagInfo'].items():
                        if isinstance(value, list):
                            unique_list = list(set(value))
                            meta_dict[filename]['/TagInfo'][key] = unique_list[
                                0] if len(unique_list) == 1 else unique_list

            if promote is None:
                promote = mode == 'lite' or mode == 'peeker'

            # Filter the data and create a prettier output for the 'lite' mode
            if mode == 'lite':
                meta_dict = make_lite(meta_dict)

            if mode == 'peeker':
                meta_dict = make_peeker(meta_dict)

            if promote:
                meta_dict = promote_keys(meta_dict)

        # ----- retrieves metadata from bytestream (BS) files (RAW, DRAW) ------------------------------------------#
        elif current_file_type == 'BS':
            import eformat

            # store the number of entries
            bs = eformat.istream(filename)
            meta_dict[filename]['nentries'] = bs.total_events

            # store the 'guid' value
            data_reader = eformat.EventStorage.pickDataReader(filename)
            assert data_reader, 'problem picking a data reader for file [%s]' % filename

            if hasattr(data_reader, 'GUID'):
                meta_dict[filename]['file_guid'] = getattr(
                    data_reader, 'GUID')()

            # if the flag full is set to true then grab all metadata
            # ------------------------------------------------------------------------------------------------------#
            if mode != "tiny":
                bs_metadata = {}

                for md in data_reader.freeMetaDataStrings():
                    if md.startswith('Event type:'):
                        k = 'eventTypes'
                        v = []
                        if 'is sim' in md:
                            v.append('IS_SIMULATION')
                        else:
                            v.append('IS_DATA')

                        if 'is atlas' in md:
                            v.append('IS_ATLAS')
                        else:
                            v.append('IS_TESTBEAM')

                        if 'is physics' in md:
                            v.append('IS_PHYSICS')
                        else:
                            v.append('IS_CALIBRATION')

                        bs_metadata[k] = tuple(v)

                    elif md.startswith('GeoAtlas:'):
                        k = 'geometry'
                        v = md.split('GeoAtlas:')[1].strip()
                        bs_metadata[k] = v

                    elif md.startswith('IOVDbGlobalTag:'):
                        k = 'conditions_tag'
                        v = md.split('IOVDbGlobalTag:')[1].strip()
                        bs_metadata[k] = v

                    elif '=' in md:
                        k, v = md.split('=')
                        bs_metadata[k] = v

                bs_metadata['runNumbers'] = getattr(data_reader, 'runNumber')()
                bs_metadata['lumiBlockNumbers'] = getattr(
                    data_reader, 'lumiblockNumber')()
                bs_metadata['projectTag'] = getattr(data_reader,
                                                    'projectTag')()
                bs_metadata['stream'] = getattr(data_reader, 'stream')()
                #bs_metadata['beamType'] = getattr(data_reader, 'beamType')()
                beamTypeNbr = getattr(data_reader, 'beamType')()
                #According to info from Rainer and Guiseppe the beam type is
                #O: no beam
                #1: protons
                #2: ions
                if (beamTypeNbr == 0): bs_metadata['beamType'] = 'cosmics'
                elif (beamTypeNbr == 1 or beamTypeNbr == 2):
                    bs_metadata['beamType'] = 'collisions'
                else:
                    bs_metadata['beamType'] = 'unknown'

                bs_metadata['beamEnergy'] = getattr(data_reader,
                                                    'beamEnergy')()

                meta_dict[filename]['eventTypes'] = bs_metadata.get(
                    'eventTypes', [])
                meta_dict[filename]['GeoAtlas'] = bs_metadata.get(
                    'geometry', None)
                meta_dict[filename]['conditions_tag'] = bs_metadata.get(
                    'conditions_tag', None)

                # Promote up one level
                meta_dict[filename]['runNumbers'] = [
                    bs_metadata.get('runNumbers', None)
                ]
                meta_dict[filename]['lumiBlockNumbers'] = [
                    bs_metadata.get('lumiBlockNumbers', None)
                ]
                meta_dict[filename]['beam_type'] = bs_metadata.get(
                    'beamType', None)
                meta_dict[filename]['beam_energy'] = bs_metadata.get(
                    'beamEnergy', None)
                meta_dict[filename]['stream'] = bs_metadata.get('stream', None)

                if not data_reader.good():
                    # event-less file...
                    meta_dict[filename]['runNumbers'].append(
                        bs_metadata.get('run_number', 0))
                    meta_dict[filename]['lumiBlockNumbers'].append(
                        bs_metadata.get('LumiBlock', 0))

                ievt = iter(bs)
                evt = next(ievt)
                evt.check()  # may raise a RuntimeError
                processing_tags = [
                    dict(stream_type=tag.type,
                         stream_name=tag.name,
                         obeys_lbk=bool(tag.obeys_lumiblock))
                    for tag in evt.stream_tag()
                ]
                meta_dict[filename]['processingTags'] = [
                    x['stream_name'] for x in processing_tags
                ]
                meta_dict[filename]['evt_number'] = [evt.global_id()]
                meta_dict[filename]['run_type'] = [
                    eformat.helper.run_type2string(evt.run_type())
                ]

                # fix for ATEAM-122
                if len(bs_metadata.get('eventTypes',
                                       '')) == 0:  # see: ATMETADATA-6
                    evt_type = ['IS_DATA', 'IS_ATLAS']
                    if bs_metadata.get('stream', '').startswith('physics_'):
                        evt_type.append('IS_PHYSICS')
                    elif bs_metadata.get('stream',
                                         '').startswith('calibration_'):
                        evt_type.append('IS_CALIBRATION')
                    elif bs_metadata.get('projectTag', '').endswith('_calib'):
                        evt_type.append('IS_CALIBRATION')
                    else:
                        evt_type.append('Unknown')

                    meta_dict[filename]['eventTypes'] = evt_type

                if mode == 'full':
                    meta_dict[filename]['bs_metadata'] = bs_metadata

        # ------ Throw an error if the user provide other file types -------------------------------------------------#
        else:
            msg.error(
                'Unknown filetype for {0} - there is no metadata interface for type {1}'
                .format(filename, current_file_type))
            return None

    return meta_dict
Пример #30
0
def main(filelist, EventsList):

    ####### Input-Output Info follows

    input_file = filelist
    max = len(EventsList)
    tmpdir = commands.getoutput("echo $TMPDIR")

    if (os.path.exists(tmpdir)):
        print '..', tmpdir, "already exists"
    else:
        print ".. Generating", tmpdir
        os.system("mkdir $TMPDIR")

    currentTime = datetime.now().strftime("%Y-%m-%d_%H%M%S")
    os.system("mkdir $TMPDIR/" + currentTime)

    output_dir = tmpdir + "/" + currentTime
    print
    print '****** Output dir is:', output_dir, '******'
    print

    flag_written = (0, 0)
    write_counter = 0

    print ".. Opening file: %s" % (input_file)

    file = open(input_file, 'r')

    line_counter = 0

    for line in file:

        #    print line.strip(), flag_written

        if (flag_written[0] == 1): break

        command_cp_from_CAF = 'xrdcp root://castoratlas/' + line.strip(
        ) + " " + tmpdir + '/Data.data'

        print '.... making local copy: ', command_cp_from_CAF

        copyOutput = commands.getoutput(command_cp_from_CAF)

        if "Permission denied" in copyOutput:
            print ". Permission denied, continue..."
            continue
        if "No such file or directory" in copyOutput:
            print ". No such file or directory, continue..."
            continue
        try:
            #    no_try = 1
            #    if (no_try==1):

            file_to_read = tmpdir + '/Data.data'

            print ".. Opening local copy of input file: %s" % line.strip()

            line_counter += 1

            input = eformat.istream(file_to_read.strip())

            ## Updated from Brian's script - this info needs to be attached in all output files.
            dr = EventStorage.pickDataReader(file_to_read.strip())
            output = eformat.ostream(core_name="subset",
                                     directory=output_dir,
                                     run_number=dr.runNumber(),
                                     trigger_type=dr.triggerType(),
                                     detector_mask=dr.detectorMask(),
                                     beam_type=dr.beamType(),
                                     beam_energy=dr.beamEnergy())

            (flag_written,
             write_counter) = event_analysis(input, output, write_counter,
                                             EventsList)

            print '... Processed File #', line_counter
            print '... Events written out so far', write_counter
            print

            command_delete = 'rm -rf ' + tmpdir + '/Data.data'

            print '.... cleaning up: ', command_delete

            os.system(command_delete)

            print
            print

            if (flag_written[1] == 1):
                print "*** Wrote", write_counter, "events"
                tmp_file_name = output.last_filename()
                del output
                output_name = output_dir + "/" + opt.run + "_" + str(
                    write_counter
                ) + "_" + opt.strtype + "_" + opt.strname + "_" + str(
                    line_counter)
                print "Writting output file: ", output_name, "with", write_counter, "events"
                os.rename(tmp_file_name, output_name)

            else:
                tmp_file_name = output.last_filename()
                del output
                os.remove(tmp_file_name)

            print
            print

        except NameError, IOError:
            print "OOPS! Input Data File Not Found - or a Bug..! (Please report it!)",
Пример #31
0
#!/usr/bin/env python
#
#Script to obtain the run number in the input data file for partition test,
#in order to use a consistent set of conditions wrt MT/PT test.

from RecExConfig.RecFlags import rec
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags as acf

import optparse

parser = optparse.OptionParser(description="Get run number from first event in file")

parser.add_option("--L2Input",
                  action = "store",
                  help = "BS input file for athenaMT")
# Now parse the options
(opts, args) = parser.parse_args()
run=""
file="" 
if not opts.L2Input:
    parser.error("need option --L2Input <BSfile>")

file = "%s" % opts.L2Input

import eformat
f = eformat.istream(file)
run = f[0].run_no()

#print 'run number : ' , run
print run
logging.getLogger('').setLevel(logging.INFO)
import sys
import os

version = eformat.helper.Version().human_major()

if len(sys.argv) == 1:
  print "Converts files from any older supported format to v%s" % version 
  print "Files are stored in the current working directory"
  print "usage: %s <data-file> [+data-file]" % sys.argv[0]
  sys.exit(1)

logging.info("Legend: 'x' => invalid fragments; '.' => converted correctly")

for f in sys.argv[1:]:
  istr = eformat.istream(f)
  logging.info("Working at file %s (%d fragments)" % (f, len(istr)))
  ostr = eformat.ostream()
  curr = istr.__iter__()
  try:
    while True:
      try:
        e = curr.next()
        e.check_tree()
      except RuntimeError, ex:
        sys.stdout.write('x')
        sys.stdout.write('\n')
        sys.stdout.write(str(ex))
        sys.stdout.write('\n => Fragment Ignored!\n')
        sys.stdout.flush()
      else:
Пример #33
0
def my_split(argv):
    """Runs the splitting routines"""

    import eformat, logging
    import EventApps.myopt as myopt

    option = {}
    option["start-event"] = {
        "short": "a",
        "arg": True,
        "default": 0,
        "description": "The number of events I should skip from the begin",
    }
    option["max-events"] = {
        "short": "n",
        "arg": True,
        "default": 0,
        "description": "The total maximum number of events you want in the output file. The special value 0 means up to the end.",
    }
    option["output"] = {"short": "o", "arg": True, "default": "", "description": "Filename of the output file"}
    option["verbosity"] = {
        "short": "d",
        "arg": True,
        "default": logging.INFO,
        "description": "How much I should say about the execution",
    }

    parser = myopt.Parser(extra_args=True)
    for (k, v) in option.items():
        parser.add_option(k, v["short"], v["description"], v["arg"], v["default"])

    if len(sys.argv) == 1:
        print parser.usage('global "%s" options:' % sys.argv[0])
        sys.exit(1)

    # process the global options
    (kwargs, extra) = parser.parse(sys.argv[1:], prefix='global "%s" options:' % sys.argv[0])

    # now the things which require global defaults
    logging.getLogger("").setLevel(kwargs["verbosity"])

    stream = eformat.istream(extra)
    ostream = eformat.ostream()

    total = 0
    for e in stream:
        if kwargs["start-event"] > 0:
            kwargs["start-event"] -= 1
            continue

        if kwargs["max-events"] > 0 and total >= kwargs["max-events"]:
            break

        ostream.write(e)
        sys.stdout.write(".")
        sys.stdout.flush()
        total += 1

    sys.stdout.write("\n")
    sys.stdout.flush()

    oname = ostream.last_filename()
    if len(kwargs["output"]) != 0:
        del ostream
        os.rename(oname, kwargs["output"])
        oname = kwargs["output"]

    sys.stdout.write("Wrote %d events in %s\n" % (total, oname))
    sys.exit(0)
 def test05_CanRetrieveStreamTags(self):
   input = eformat.istream(INPUT)
   for event in input:
     tag = event.stream_tag()
Пример #35
0
def my_dump(bsfile):
    """Runs the dumping routines"""

    global smk

    # open a file
    print "=" * 100
    print "Opening", bsfile

    input = eformat.istream(bsfile)

    if args.interactive:
        import code
        code.interact(local=locals())

    event_count = 0
    l2_event_count = 0
    ef_event_count = 0
    offset = args.skip if args.skip else 0
    for event in input:
        if offset > 0:
            offset -= 1
            continue

        event_count += 1

        if args.events != None and event_count > args.events: break

        print "======================= RunNumber : %d , Event: %d,  LB: %d, LVL1_ID: %d, Global_ID: %d bunch-x: %d TT: x%x ==========================" \
              % ( event.run_no(), event_count, event.lumi_block(), event.lvl1_id(), event.global_id(), event.bc_id(), event.lvl1_trigger_type())

        smk = args.smk
        if args.decodeItems and args.smk == 0:  # Need to get SMK from HLT result
            hltrob = [
                f for f in event.children()
                if f.source_id().subdetector_id() in [
                    eformat.helper.SubDetector.TDAQ_LVL2,
                    eformat.helper.SubDetector.TDAQ_EVENT_FILTER
                ]
            ]
            if len(hltrob) == 0:
                print "ERROR: Cannot find HLT result. Will not decode trigger item names."
                args.decodeItems = False
            else:
                res.load(hltrob[0])
                smk = res.getConfigSuperMasterKey()
                if smk == 0:
                    print "ERROR: No SMK stored in HLT result. Will not decode trigger item names."
                    args.decodeItems = False

        if args.l1:
            #print "L1 TriggerInfo: ", ["0x%x"%i for i in event.lvl1_trigger_info() ]
            words = Lvl1_Info(event)
            print "L1 CTP IDs - TBP: ", printL1Items(words[0], smk)
            print "L1 CTP IDs - TAP: ", printL1Items(words[1], smk)
            print "L1 CTP IDs - TAV: ", printL1Items(words[2], smk)

        if args.ctp:
            CTP_Info(event, int(args.ctp))

        if args.l2:
            print "L2 TriggerInfo: ", [
                "0x%x" % i for i in event.lvl2_trigger_info()
            ]

        # loop over the SubDetFragments and find LVL2
        if args.l2res or args.sizeSummary:
            found = False
            for f in event.children():
                if f.source_id().subdetector_id(
                ) == eformat.helper.SubDetector.TDAQ_LVL2:
                    print '.. %s %s %s bytes' % (f.__class__.__name__,
                                                 f.source_id(),
                                                 f.fragment_size_word() * 4)
                    res.load(f)
                    found = True
                    l2_event_count += 1

                    if args.l2res:
                        print_HLTResult(res, args)
                    if args.sizeSummary:
                        collect_feature_sizes(featureSizes, res)
                    print ".. EOF HLTResult for L2"
            if not found:
                print ".. No HLTResult for L2"

        if args.ef:
            print "EF TriggerInfo: ", [
                "0x%x" % i for i in event.event_filter_info()
            ]

        # loop over the SubDetFragments and find EF
        if args.efres or args.sizeSummary:
            found = False
            for f in event.children():
                if f.source_id().subdetector_id(
                ) == eformat.helper.SubDetector.TDAQ_EVENT_FILTER:
                    print '.. %s %s %s bytes' % (f.__class__.__name__,
                                                 f.source_id(),
                                                 f.fragment_size_word() * 4)
                    try:
                        res.load(f)
                        found = True
                        ef_event_count += 1

                        if args.efres:
                            print_HLTResult(res, args)
                        if args.sizeSummary:
                            collect_feature_sizes(featureSizes, res)
                    except Exception, ex:
                        print '... **** problems in analyzing payload', ex
                        print '... **** raw data[:10]', list(f.rod_data())[:10]
                    print ".. EOF HLTResult for EF"
            if not found:
                print ".. No HLTResult for EF"

        if args.stag:
            print "StreamTag: ", [(s.name, s.type) for s in event.stream_tag()]
Пример #36
0
def main():
    args = get_parser().parse_args()
    logging.basicConfig(stream=sys.stdout,
                        format='%(levelname)-8s %(message)s',
                        level=logging.DEBUG if args.verbose else logging.INFO)

    if args.copyFrom:
        logging.info('Reading events from %s and metadata from %s', args.file, args.copyFrom)
    else:
        logging.info('Reading events and metadata from %s', args.file)
    meta_input = args.copyFrom if args.copyFrom else args.file
    reader = EventStorage.pickDataReader(meta_input)
    input_stream = eformat.istream(args.file)

    # Read metadata from input file
    metadata_basic = {}  # arguments for eformat.ostream
    metadata_extra = {}  # metadata passed as dictionary
    metadata_basic['runNumber'] = reader.runNumber()
    metadata_basic['triggerType'] = reader.triggerType()
    metadata_basic['detectorMask'] = reader.detectorMask()
    metadata_basic['beamType'] = reader.beamType()
    metadata_basic['beamEnergy'] = reader.beamEnergy()
    metadata_extra['Stream'] = reader.stream()
    metadata_extra['Project'] = reader.projectTag()
    metadata_extra['LumiBlock'] = reader.lumiblockNumber()

    logging.debug('Input metadata_basic = %s', metadata_basic)
    logging.debug('Input metadata_extra = %s', metadata_extra)

    # Change metadata
    if args.runNumber:
        metadata_basic['runNumber'] = args.runNumber
    if args.triggerType:
        metadata_basic['triggerType'] = args.triggerType
    if args.detectorMask:
        metadata_basic['detectorMask'] = args.detectorMask
    if args.beamType:
        metadata_basic['beamType'] = beam_type_dict[args.beamType]
    if args.beamEnergy:
        metadata_basic['beamEnergy'] = args.beamEnergy
    if args.stream:
        metadata_extra['Stream'] = args.stream
    if args.projectTag:
        metadata_extra['Project'] = args.projectTag
    if args.lumiBlock:
        metadata_extra['LumiBlock'] = args.lumiBlock

    logging.debug('Updated metadata_basic = %s', metadata_basic)
    logging.debug('Updated metadata_extra = %s', metadata_extra)

    # Create new file name
    file_name_base = args.outputName
    if not file_name_base:
        # Get the name elements
        ptag = metadata_extra['Project']
        runno = metadata_basic['runNumber']
        stream = metadata_extra['Stream']
        lbn = metadata_extra['LumiBlock']
        # Build the name
        file_name_list = []
        file_name_list.append(ptag if ptag else 'data')
        file_name_list.append('{:08d}'.format(runno if runno else 0))
        file_name_list.append(stream if stream else 'unknown_stream')
        file_name_list.append('lb{:04d}'.format(lbn if lbn else 0))
        file_name_base = '.'.join(file_name_list)

    # Write the new file
    metadata_extra_strings = ['{:s}={:s}'.format(k, str(v)) for k, v in six.iteritems(metadata_extra)]
    output_stream = eformat.ostream(
        core_name         = file_name_base,
        run_number        = metadata_basic['runNumber'],
        trigger_type      = metadata_basic['triggerType'],
        detector_mask     = metadata_basic['detectorMask'],
        beam_type         = metadata_basic['beamType'],
        beam_energy       = metadata_basic['beamEnergy'],
        meta_data_strings = metadata_extra_strings)

    logging.info('Writing file %s', output_stream.current_filename().replace('.writing', '.data'))

    ievt = 0
    nmax = args.numEvents or -1
    for event in input_stream:
        ievt+=1
        if nmax >= 0 and ievt > nmax:
            break
        logging.debug('Writing event %d', ievt)
        output_stream.write(event)
Пример #37
0
def main():
    from optparse import OptionParser
    import eformat

    parser = OptionParser(usage='%prog FILE')
    parser.add_option('-m',
                      '--moduleid',
                      type='int',
                      action='store',
                      default=0,
                      help='Module ID of CTP fragment [%default]')

    (opt, args) = parser.parse_args()
    if len(args) != 1:
        parser.print_help()
        return 1

    for event in eformat.istream(args[0]):
        ctp_robs = [
            rob for rob in event.children()
            if rob.source_id().subdetector_id() == eformat.helper.SubDetector.
            TDAQ_CTP and rob.source_id().module_id() == opt.moduleid
        ]

        if len(ctp_robs) == 0:
            print("Cannot find CTP ROB with module ID %d" % opt.moduleid)
            continue

        rob = ctp_robs[0]
        fe = _CTPfragment.FolderEntry()
        fe.folderIndex = 1
        fe.lumiBlock = 54

        fe2 = _CTPfragment.FolderEntry()
        fe2.folderIndex = 2
        fe2.lumiBlock = 59

        #x = getExtraPayloadObject(rob)
        x = _CTPfragment.ExtraPayload()
        x.setL1PSK(255)
        x.updateFolder(fe)
        x.updateFolder(fe2)
        new_ctp_rob = setHltExtraPayloadWords(rob, [d for d in x.serialize()])
        new_event = eformat.write.FullEventFragment()
        new_event.copy_header(event)
        for r in event.children():
            if r.source_id().subdetector_id(
            ) != eformat.helper.SubDetector.TDAQ_CTP:
                new_event.append(eformat.write.ROBFragment(r))

        new_event.append(eformat.write.ROBFragment(new_ctp_rob))

        event = new_event.readonly()
        #new_ctp_rob = eformat.write.ROBFragment(new_ctp_rob)
        #setHltCounter(new_ctp_rob,100)
        rob = new_ctp_rob

        x = getExtraPayloadObject(rob)
        folderUpdates = _CTPfragment.getFolderUpdates(x)
        upd = ''
        for f in folderUpdates:
            upd += ('[%d,%d]' % (f.second.folderIndex, f.second.lumiBlock))

        print(
            "L1ID %10d, LB %4d, Version %d, Bunch %d, HLT counter: %3d, Payload #%d %s L1PSK %d BGK %d COOLUPD %s"
            % (event.lvl1_id(), event.lumi_block(), ctpFormatVersion(rob),
               lvl1AcceptBunch(rob), hltCounter(rob),
               numberHltExtraPayloadWords(rob), hltExtraPayloadWords(rob),
               x.getL1PSK(), x.getBGK(), upd))
Пример #38
0
        assert data_reader, \
               'problem picking a data reader for file [%s]'%fname

        beam_type   = '<beam-type N/A>'
        try:
            beam_type = data_reader.beamType()
        except Exception,err:
            msg.warning ("problem while extracting beam-type information")

        beam_energy = '<beam-energy N/A>'
        try:
            beam_energy = data_reader.beamEnergy()
        except Exception,err:
            msg.warning ("problem while extracting beam-type information")

        bs = ef.istream(fname)

        self._metadata['nentries'] = bs.total_events

        bs_metadata = {}
        
        for md in data_reader.freeMetaDataStrings():
            if md.startswith('Event type:'):
                k = 'evt_type'
                v = []
                if 'is sim' in md:   v.append('IS_SIMULATION')
                else:                v.append('IS_DATA')
                if 'is atlas' in md: v.append('IS_ATLAS')
                else:                v.append('IS_TESTBEAM')
                if 'is physics' in md: v.append('IS_PHYSICS')
                else:                  v.append('IS_CALIBRATION')
Пример #39
0
    def EDMDecodingVersion(flags):
        log.debug("Attempting to determine EDMDecodingVersion.")
        version = 3
        if flags.Input.Format == "BS":
            log.debug("EDMDecodingVersion: Input format is ByteStream")
            inputFileName = flags.Input.Files[0]
            if not inputFileName and flags.Common.isOnline():
                log.debug(
                    "EDMDecodingVersion: Online reconstruction, no input file. Return default version, i.e. AthenaMT."
                )
                return version

            log.debug("EDMDecodingVersion: Checking ROD version.")
            import eformat
            from libpyeformat_helper import SubDetector
            bs = eformat.istream(inputFileName)

            rodVersionM = -1
            rodVersionL = -1
            # Find the first HLT ROBFragment in the first event
            for robf in bs[0]:
                if robf.rob_source_id().subdetector_id(
                ) == SubDetector.TDAQ_HLT:
                    rodVersionM = robf.rod_minor_version() >> 8
                    rodVersionL = robf.rod_minor_version() & 0xFF
                    log.debug(
                        "EDMDecodingVersion: HLT ROD minor version from input file is {:d}.{:d}"
                        .format(rodVersionM, rodVersionL))
                    break

            if rodVersionM >= 1:
                version = 3
                return version
            log.info(
                "EDMDecodingVersion: Could not determine ROD version -- falling back to run-number-based determination"
            )

            # Use run number to determine decoding version
            runNumber = flags.Input.RunNumber[0]
            log.debug(
                "EDMDecodingVersion: Read run number {}.".format(runNumber))

            boundary_run12 = 230000
            boundary_run23 = 368000

            if runNumber <= 0:
                log.warning(
                    "EDMDecodingVersion: Cannot determine decoding version because run number {} is invalid. Leaving the default version."
                    .format(runNumber))
            elif runNumber < boundary_run12:
                # Run-1 data
                version = 1
            elif runNumber < boundary_run23:
                # Run-2 data
                version = 2
            else:
                # Run-3 data
                version = 3
        else:
            log.debug(
                "EDMDecodingVersion: Input format is POOL -- determine from input file collections."
            )
            # POOL files: decide based on HLT output type present in file
            if "HLTResult_EF" in flags.Input.Collections:
                version = 1
            elif "TrigNavigation" in flags.Input.Collections:
                version = 2
            elif "HLTNav_Summary" in flags.Input.Collections:
                version = 3
            elif flags.Input.Format == "POOL":
                # If running Trigger on RDO input (without previous trigger result), choose Run-3
                version = 3
        log.info("Determined EDMDecodingVersion to be {}.".format({
            1: "Run 1",
            2: "Run 2",
            3: "AthenaMT"
        }[version]))
        return version
Пример #40
0
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('file', metavar='FILE', nargs=1, help='input file')
    parser.add_argument('-n',
                        '--events',
                        type=int,
                        default=-1,
                        help='number of events to process')
    parser.add_argument('-o',
                        '--output',
                        type=str,
                        help='core output file name')

    args = parser.parse_args()
    dr = EventStorage.pickDataReader(args.file[0])
    output = eformat.ostream(core_name=args.output or dr.fileNameCore(),
                             run_number=dr.runNumber(),
                             trigger_type=dr.triggerType(),
                             detector_mask=dr.detectorMask(),
                             beam_type=dr.beamType(),
                             beam_energy=dr.beamEnergy(),
                             meta_data_strings=dr.freeMetaDataStrings(),
                             compression=dr.compression())

    i = 0
    for event in eformat.istream(args.file[0]):
        i += 1
        if args.events > 0 and i > args.events:
            break
        newevt = modify(event)
        output.write(newevt)
  def test03_CheckEventInCompressedFile(self):
    raw = eformat.istream(INPUT)
    comp = eformat.istream(OUTPUT)

    return all([a==b for a,b in zip(raw,comp)])
Пример #42
0
#!/usr/bin/env python

# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration

import sys
import eformat
from eformat import helper
from collections import defaultdict

filename = sys.argv[1]
bsfile = eformat.istream(filename)
print "Read file %s with %i events" % (filename, bsfile.total_events)

event = bsfile[0]

rob_by_subdet = defaultdict(list)
subdets = set()

subdetsData = [
    'LAR_EM_BARREL_A_SIDE',
    'LAR_EM_BARREL_C_SIDE',
    'LAR_EM_ENDCAP_A_SIDE',
    'LAR_EM_ENDCAP_C_SIDE',
    'LAR_FCAL_A_SIDE',
    'LAR_FCAL_C_SIDE',
    'LAR_HAD_ENDCAP_A_SIDE',
    'LAR_HAD_ENDCAP_C_SIDE',
    'MUON_MDT_BARREL_A_SIDE',
    'MUON_MDT_BARREL_C_SIDE',
    'MUON_MDT_ENDCAP_A_SIDE',
    'MUON_MDT_ENDCAP_C_SIDE',
Пример #43
0
def main():
    parser = argparse.ArgumentParser(description=__doc__)

    parser.add_argument('-f',
                        '--file',
                        metavar='FILE',
                        nargs='*',
                        default=[],
                        help='file name')

    parser.add_argument('-g',
                        '--globalid',
                        type=int,
                        action='store',
                        nargs='*',
                        help='Global event ID')

    parser.add_argument('-l',
                        '--lvl1id',
                        type=int,
                        action='store',
                        nargs='*',
                        help='LVL1 ID')

    parser.add_argument('-t',
                        '--time',
                        action=StoreTime,
                        nargs='*',
                        help='Nanosecond time stamp (seconds:nanoseconds)')

    parser.add_argument('-s',
                        '--save',
                        metavar='OUTFILE',
                        nargs='?',
                        action='store',
                        const='trigbs_findevent',
                        help='Save selected events in OUTFILE')

    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Be verbose')

    args = parser.parse_args()

    ofs = None
    if args.save is not None:
        ofs = eformat.ostream(core_name=args.save)

    for f in args.file:
        ifs = eformat.istream(f)
        if args.verbose:
            print('==%s' % f)
        for e in ifs:
            found = True
            if ofs:
                ofs.write(e)
            if args.globalid is not None and e.global_id(
            ) not in args.globalid:
                found = False
            if args.lvl1id is not None and e.lvl1_id() not in args.lvl1id:
                found = False
            if args.time is not None and (
                    e.bc_time_seconds(),
                    e.bc_time_nanoseconds()) not in args.time:
                found = False
            if found:
                print('%s %s' % (f, fmtEvent(e, args.time is not None)))
 def test01_CanCheckEvent(self):
   input = eformat.istream(INPUT)
   for event in input: event.check()
def my_conf(argv):
  """Runs the dumping routines"""
  import logging 
  from EventApps import myopt

  option = {}
  option['number-of-ross'] = {'short': 'n', 'arg': True,
                              'default': 1,
                              'description': 'The number of ROS in the system'}
  option['py'] = {'short': 'p', 'arg': False,
                  'default': None,
                  'description': 'Dumps as a python list/file'}
  option['ignore'] = {'short': 'v', 'arg': True,
                      'default': '0x007[3589abc].+|0x007[67]0001',
                      'description': 'A (python) regular expression of ROB hexadecimal numbers to ignore when making the robmap list. The comparision is done on a string basis, as a result of parsing each hit in a 0x%08x printf-like format'}
  
  option['verbosity'] = {'short': 'V', 'arg': True,
                         'default': logging.INFO,
                         'description': 'From which level to print system messages [%d, %d]. For details please consult the documentation of python\'s "logging" module' % (logging.NOTSET, logging.CRITICAL)}
  
  parser = myopt.Parser(extra_args=True)
  for (k,v) in option.items():
    parser.add_option(k, v['short'], v['description'], v['arg'], v['default'])
  
  if len(sys.argv) == 1:
    print parser.usage('global "%s" options:' % sys.argv[0])
    sys.exit(1)

  #process the global options
  (kwargs, extra) = parser.parse(sys.argv[1:], prefix='global "%s" options:' % sys.argv[0])

  #now the things which require global defaults
  logging.getLogger('').setLevel(kwargs['verbosity'])
  # os.environ['TDAQ_ERS_DEBUG_LEVEL'] = str(kwargs['debug'])
  import eformat

  stream = eformat.istream(extra)
  rob_list = set() 
  sys.stderr.write('Processing %s events' % len(stream))
  for event in stream:
    sys.stderr.write('.')
    sys.stderr.flush()
    for rob in event: rob_list.add(rob.source_id().code())
  sys.stderr.write('\n')

  if kwargs['py']:
    print "#!/usr/bin/env tdaq_python\n"
    
  print "# This ROB hitlist was generated automatically by %s" % \
      os.path.basename(sys.argv[0])
  print "# It is a python application you can customize. "
  print "# Consult the eformat wiki page. "
  print "# Current date and time is", datetime.datetime(1,1,1).now()
  print ""

  keys = list(rob_list)
  keys.sort()

  # checks if we need to apply filtering...
  if len(kwargs['ignore'].strip()) != 0:
    import re
    ignore = re.compile(kwargs['ignore'].strip())
    keys = [k for k in keys if not ignore.match('0x%08x' % k)]
    
  if kwargs['py']:
    print "# original filename list"
    print "filename = []\n"
    for k in extra: print "filename.append('%s')" % k
    print ""
    
    print "# uniq'fied robhit list"
    print "robhit = []\n"
    for k in keys:
      print "robhit.append(0x%08x)" % k
    print "\nimport logging"
    print "logging.info('ROB hit list (extracted from %d file/s) contains %d unique hits' % (len(filename), len(robhit)))"
    
  else:
    print "# These unique identifiers were extracted from:"
    for k in extra: print "# %s" % k
    print ""
    
    rob_map = random(keys, kwargs['number-of-ross'])
    for k in keys:
      print "0x%08x\tROS-%d" % (k, rob_map[k])
Пример #46
0
def ds_dump(bsfile):
    """Runs the dumping routines"""

    # open a file
    if (args.verbosity > 0):
        print "=" * 100
        print "Opening", bsfile

    input = eformat.istream(bsfile)

    if args.interactive:
        import code
        code.interact(local=locals())

    event_count = 0
    ds_ROB_counts = {}
    offset = args.skip if args.skip else 0
    for event in input:
        if offset > 0:
            offset -= 1
            continue

        event_count += 1
        if args.events != None and event_count > args.events:
            event_count -= 1
            break

        # Extract the DS module IDs and set the flag for finding the corresponding ROB
        ds_module_ids = {}
        for stag in event.stream_tag():
            if (stag.type == 'calibration') and (
                (stag.name).startswith('DataScouting_')):
                ds_module_ids[int(
                    (stag.name).split('_')[1])] = (False, stag.name)
                if not ds_ROB_counts.has_key(stag.name):
                    ds_ROB_counts[stag.name] = {
                        'ROBfound': 0,
                        'ROBnotFound': 0,
                        'noContainers': 0,
                        'emptyContainers': 0
                    }

        # loop over the SubDetFragments and find DS ROBs
        for f in event.children():
            if (f.source_id().subdetector_id()
                    == eformat.helper.SubDetector.TDAQ_HLT) and (
                        ds_module_ids.has_key(f.source_id().module_id())):
                if (args.verbosity >= 1):
                    print "======================= RunNumber : %d , Event: %d,  LB: %d, LVL1_ID: %d, Global_ID: %d bunch-x: %d TT: x%x ==========================" \
                        % ( event.run_no(), event_count, event.lumi_block(), event.lvl1_id(), event.global_id(), event.bc_id(), event.lvl1_trigger_type())
                # Print the stream tags
                if (args.verbosity >= 1):
                    print ".. Stream Tags:", [(s.name, s.type)
                                              for s in event.stream_tag()]

                featureSizes = {}
                if (args.verbosity >= 1):
                    print '.. %s %s %s bytes' % (f.__class__.__name__,
                                                 f.source_id(),
                                                 f.fragment_size_word() * 4)
                try:
                    res.load(f)
                    ds_module_ids[f.source_id().module_id()] = (
                        True, ds_module_ids[f.source_id().module_id()][1]
                    )  # The expected ROB was found
                    if (args.verbosity >= 2):
                        print_all_navigation(res)
                    for feature in res.nav_payload:
                        key = feature[0] + '#' + feature[1]
                        if not featureSizes.has_key(key):
                            featureSizes[key] = 0
                        featureSizes[key] += feature[2]
                except Exception, ex:
                    print '... **** problems in analyzing payload', ex
                    print '... **** raw data[:10]', list(f.rod_data())[:10]
                # check that feature containers are there and that they are not empty
                if (featureSizes == {}):
                    if (args.verbosity >= 1):
                        print " FATAL : No containers are available for Data Scouting HLTResult with module ID = ", f.source_id(
                        ).module_id(), ", Stream name = ", ds_module_ids[
                            f.source_id().module_id()][1]
                    ds_ROB_counts[ds_module_ids[f.source_id().module_id()]
                                  [1]]['noContainers'] += 1
                for item in featureSizes.iteritems():
                    if (item[1] == 0):
                        if (args.verbosity >= 1):
                            print " ERROR : Empty container for feature = ", item[
                                0]
                        ds_ROB_counts[ds_module_ids[f.source_id().module_id()]
                                      [1]]['emptyContainers'] += 1

                if (args.verbosity >= 2):
                    print ".. EOF DS HLTResult with module ID = ", f.source_id(
                    ).module_id()

        # check if all expected DS ROBs from the StreamTags were found
        for item in ds_module_ids.iteritems():
            if (item[1][0] == False):
                if (args.verbosity >= 1):
                    print " FATAL : No Data Scouting HLTResult found for expected module ID = ", item[
                        0], ", Stream name = ", item[1][1]
                ds_ROB_counts[item[1][1]]['ROBnotFound'] += 1
            else:
                ds_ROB_counts[item[1][1]]['ROBfound'] += 1
Пример #47
0
    if c.id == socket.gethostname():
        c.HW_Tag = '-'.join(os.environ['CMTCONFIG'].split('-')[:2])
        db.updateObjects([c])

## If an L2PU/PT dies make this an error
apps = db.getObject('PTTemplateApplication')
apps += db.getObject('L2PUTemplateApplication')

for a in apps:
    a.IfDies = 'Error'
    a.IfFailed = 'Error'

## Get number of events from data file
import robhit
import eformat
events = len(eformat.istream(robhit.filename))

## Increase LVL2 timeout, set max number of events
l2sv = db.getObject('L2SVConfiguration')
for a in l2sv:
    a.l2puTimeout_ms = 20000
    a.eventMax = events

## Increase timeout for stop transition
efd = db.getObject('EFD_TemplateApplication')
for a in efd:
    a.ActionTimeout = 600
    a.ShortTimeout = 600

l2pu = db.getObject('L2PUConfiguration')
for a in l2pu:
Пример #48
0
def my_conf(argv):
    """Runs the dumping routines"""
    import logging
    from EventApps import myopt

    option = {}
    option['ignore'] = {
        'short':
        'v',
        'arg':
        True,
        'default':
        '0x007[3589abc].+|0x007[67]0001',
        'description':
        'A (python) regular expression of ROB hexadecimal numbers to ignore when making the robmap list. The comparision is done on a string basis, as a result of parsing each hit in a 0x%08x printf-like format'
    }

    option['verbosity'] = {
        'short':
        'V',
        'arg':
        True,
        'default':
        logging.INFO,
        'description':
        'From which level to print system messages [%d, %d]. For details please consult the documentation of python\'s "logging" module'
        % (logging.NOTSET, logging.CRITICAL)
    }

    parser = myopt.Parser(extra_args=True)
    for (k, v) in option.items():
        parser.add_option(k, v['short'], v['description'], v['arg'],
                          v['default'])

    if len(sys.argv) == 1:
        print parser.usage('global "%s" options:' % sys.argv[0])
        sys.exit(1)

    #process the global options
    (kwargs, extra) = parser.parse(sys.argv[1:],
                                   prefix='global "%s" options:' % sys.argv[0])

    #now the things which require global defaults
    logging.getLogger('').setLevel(kwargs['verbosity'])
    # os.environ['TDAQ_ERS_DEBUG_LEVEL'] = str(kwargs['debug'])
    import eformat

    stream = eformat.istream(extra)
    rob_list = set()
    sys.stderr.write('Processing %s events' % len(stream))
    for event in stream:
        sys.stderr.write('.')
        sys.stderr.flush()
        for rob in event:
            rob_list.add(rob.source_id().code())
    sys.stderr.write('\n')

    print "# This ROB-ROS map was generated by %s" % \
        os.path.basename(sys.argv[0])
    print "# Current date and time is", datetime.datetime(1, 1, 1).now()
    print "#"

    keys = list(rob_list)
    keys.sort()

    # checks if we need to apply filtering...
    if len(kwargs['ignore'].strip()) != 0:
        import re
        ignore = re.compile(kwargs['ignore'].strip())
        keys = [k for k in keys if not ignore.match('0x%08x' % k)]

    print "# These unique identifiers were extracted from:"
    for k in extra:
        print "# %s" % k
    print "#"
    print "svcMgr.ROBDataProviderSvc.RobRosMapping = {"
    print "#"
    print "# ROB id   : ROS id = SubDetector Id "
    print "#"

    ros_count = 0
    old_sub_det = 0
    for k in keys:
        if old_sub_det != eformat.helper.SourceIdentifier(
                k).subdetector_id().real:
            old_sub_det = eformat.helper.SourceIdentifier(
                k).subdetector_id().real
            ros_count = ros_count + 1
            print "# %02i : Sub Detector = %s" % (
                ros_count, eformat.helper.SourceIdentifier(k).subdetector_id())
        print "  %s : %s ," % (hex(
            k), hex(eformat.helper.SourceIdentifier(k).subdetector_id().real))
    print "}"
 def test02_CanCheckTreeEvent(self):
   input = eformat.istream(INPUT)
   input = eformat.istream(INPUT)
   for event in input: event.check_tree()
Пример #50
0
if __name__ == "__main__":
    if len(sys.argv) <= 1:
        print "Syntax: trigbs_prescaleL1.py FILE"
        sys.exit(1)

    log.setLevel(logging.DEBUG)
    kwargs = {
        'configuration': {
            'db-server': 'TRIGGERDB_RUN1',
            'db-extra': {
                'lvl1key': 300
            }
        }
    }

    kwargs = {
        'configuration': {
            'db-server': 'TRIGGERDBREPR',
            'db-extra': {
                'lvl1key': 30
            }
        }
    }

    os = eformat.ostream()
    for e in eformat.istream(sys.argv[1]):
        kwargs['event'] = e
        new_event = modify_general(**kwargs)
        os.write(new_event)
def peb_writer(argv):
    """Runs the splitting routines"""

    import eformat, logging
    import EventApps.myopt as myopt

    option = {}

    # run mode options
    option['start-event'] = {
        'short': 'a',
        'arg': True,
        'default': 0,
        'group': 'Run mode',
        'description':
        'Number of events which should be skippped from the begin'
    }

    option['max-events'] = {
        'short':
        'n',
        'arg':
        True,
        'default':
        0,
        'group':
        'Run mode',
        'description':
        'Maximum number of events in the output file. 0 means, all useful events from the input.'
    }

    option['verbosity'] = {
        'short': 'v',
        'arg': True,
        'default': logging.INFO,
        'group': 'Run mode',
        'description': 'Log verbosity'
    }

    option['progress-bar'] = {
        'short': 'P',
        'arg': False,
        'default': None,
        'group': 'Run mode',
        'description': 'Show progress bar when running interactively'
    }

    option['output-dir'] = {
        'short': 'd',
        'arg': True,
        'default': '.',
        'group': 'Run mode',
        'description': 'Directory in which the output file should be written'
    }

    # stream tag options
    option['stream-name'] = {
        'short': 's',
        'arg': True,
        'default': 'DataScouting_05_Jets',
        'group': 'Stream Tag',
        'description': 'Name of stream which should be written out'
    }

    option['project-tag'] = {
        'short': 'p',
        'arg': True,
        'default': 'data18_13Tev',
        'group': 'Stream Tag',
        'description': 'Project tag which should be used for the output file'
    }

    option['lumi-block'] = {
        'short':
        'l',
        'arg':
        True,
        'default':
        0,
        'group':
        'Stream Tag',
        'description':
        'Lumiblock number used for the output file. Use 0 if multiple LB in file.'
    }

    parser = myopt.Parser(extra_args=True)
    for (k, v) in option.items():
        parser.add_option(k, v['short'], v['description'], v['arg'],
                          v['default'], v['group'])

    if len(sys.argv) == 1:
        print parser.usage('global "%s" options:' % sys.argv[0])
        sys.exit(1)

    # process the global options
    (kwargs, extra) = parser.parse(sys.argv[1:],
                                   prefix='global "%s" options:' % sys.argv[0])

    # global defaults
    logging.getLogger('').name = os.path.splitext(os.path.basename(
        sys.argv[0]))[0]
    logging.getLogger('').setLevel(kwargs['verbosity'])

    # input data stream
    stream = eformat.istream(extra)
    # input event counter
    totalEvents_in = 0

    # get metadata from inputfile
    dr = eformat.EventStorage.pickDataReader(extra[0])

    # parameters for building the output file name
    runNumber = dr.runNumber()
    outputDirectory = kwargs['output-dir']
    streamName = kwargs['stream-name']
    projectTag = kwargs['project-tag']
    lumiBlockNumber = kwargs[
        'lumi-block']  # if output file can have multiple lumi blocks, use 0
    applicationName = 'athenaHLT'
    productionStep = 'merge'  # output file with multiple lumi blocks
    streamType = 'unknown'  # the real stream type will be extracted from the matching stream tag

    # check the output directory if it exists
    if (not os.path.exists(outputDirectory)) or (
            not os.path.isdir(outputDirectory)):
        logging.fatal(' Output directory %s does not exist ' % outputDirectory)
        sys.exit(1)

    # output event counter
    totalEvents_out = 0

    # counter of skipped events
    totalEvents_skipped = 0

    # Loop over events
    for e in stream:
        totalEvents_in += 1

        # select events
        if kwargs['start-event'] > 0:
            kwargs['start-event'] -= 1
            totalEvents_skipped += 1
            continue

        if kwargs['max-events'] > 0 and totalEvents_in >= kwargs['max-events']:
            logging.info(' Maximum number of events reached : %d' %
                         kwargs['max-events'])
            break

        # find StreamTags and see if there is a match
        streamTags = e.stream_tag()
        logging.debug(' === New Event nr = %s (Run,Global ID) = (%d,%d) === ' %
                      (totalEvents_in, e.run_no(), e.global_id()))
        for tag in streamTags:
            if tag.name == streamName:
                # the event should be written out
                logging.debug(' Matching event found for stream tag = %s' %
                              tag)
                logging.debug('      Stream Tag:Robs = %s' %
                              [hex(r) for r in tag.robs])
                logging.debug('      Stream Tag:Dets = %s' %
                              [hex(d) for d in tag.dets])

                # check the lumi block number from the event against the lumi block number defined for the file
                # this check is only done if the lumi block number for the file is different from 0
                if lumiBlockNumber > 0:
                    if e.lumi_block() != lumiBlockNumber:
                        logging.error(
                            ' Event (Run,Global ID) = (%d,%d) has a lumi block number %d,'
                            ' which is different from LB = %d for the output file. Event skipped.'
                            % (e.run_no(), e.global_id(), e.lumi_block(),
                               lumiBlockNumber))
                        continue

                # check that all events have the same run number as the output file indicates otherwise skip event
                if e.run_no() != runNumber:
                    logging.error(
                        ' Event (Run,Global ID) = (%d,%d) has a run number,'
                        ' which is different from the run number = %d for the output file. Event skipped.'
                        % (e.run_no(), e.global_id(), runNumber))
                    continue

                # set the overall tag type for the first match
                if streamType != tag.type:
                    streamType = tag.type
                    logging.debug(' streamType set to = %s' % streamType)
                    # create the RAW output file name
                    outRawFile = eformat.EventStorage.RawFileName(
                        projectTag, runNumber, streamType, streamName,
                        lumiBlockNumber, applicationName, productionStep)
                    logging.debug(' set output file name = %s' %
                                  outRawFile.fileNameCore())

                    # create the output stream
                    ostream = eformat.ostream(
                        directory=outputDirectory,
                        core_name=outRawFile.fileNameCore(),
                        run_number=dr.runNumber(),
                        trigger_type=dr.triggerType(),
                        detector_mask=dr.detectorMask(),
                        beam_type=dr.beamType(),
                        beam_energy=dr.beamEnergy())

                # decide what to write out
                if streamType == 'physics' or streamType == 'express' or (len(
                        tag.robs) == 0 and len(tag.dets) == 0):
                    # write out the full event fragment
                    pbev = eformat.write.FullEventFragment(e)
                    logging.debug(' Write full event fragment ')
                else:
                    # select ROBs to write out
                    rob_output_list = []
                    logging.debug(' Write partial event fragment ')
                    for rob in e:
                        if rob.source_id().code() in tag.robs:
                            rob_output_list.append(rob)
                        if rob.source_id().subdetector_id() in tag.dets:
                            rob_output_list.append(rob)
                    # write out the partial event fragment
                    pbev = eformat.write.FullEventFragment()
                    pbev.copy_header(e)
                    for out_rob in rob_output_list:
                        pbev.append_unchecked(out_rob)

                # put the event onto the output stream
                ostream.write(pbev)
                if (logging.getLogger('').getEffectiveLevel() >
                        logging.DEBUG) and kwargs['progress-bar']:
                    sys.stdout.write('.')
                    sys.stdout.flush()

                # increase output event counter
                totalEvents_out += 1

    # print final statistics
    logging.info('Total number of events processed          = %d ' %
                 totalEvents_in)
    logging.info('Number of events skipped at the beginning = %d ' %
                 totalEvents_skipped)
    logging.info('Number of events written to output file   = %d ' %
                 totalEvents_out)
    if totalEvents_out > 0:
        logging.info('Output file                               = %s ' %
                     ostream.last_filename())

    sys.exit(0)
Пример #52
0
# root://eosatlas//eos/atlas/atlastier0/rucio/data15_13TeV/physics_EnhancedBias/00266904/data15_13TeV.00266904.physics_EnhancedBias.merge.RAW/data15_13TeV.00266904.physics_EnhancedBias.merge.RAW._lb0410._SFO-1._0001.1

import sys
import eformat
import libpyevent_storage as EventStorage

files=[]
#print len(sys.argv),sys.argv
if len(sys.argv)<=1:
    files=[topoFile]
else:
    files=sys.argv[1:]
maxevents=500

streamT = eformat.istream(files)
print 'The total number of events in files is %d' % (len(streamT))
if len(streamT)>maxevents:
    print 'Max events=',maxevents
print 'Files:', files
fragmentSizes={}
tt_mismatches=0
counter=0
sid_counts={}
for eventT in streamT:       
    counter+=1
    if counter>maxevents:
        break
    print "\n>>> counter, event global_id, LB, bc_id, lvl1_id, l1tt:", counter, eventT.global_id(), eventT.lumi_block(), eventT.bc_id(), eventT.lvl1_id(), eventT.lvl1_trigger_type()
    # CTP
    ctpSIDs= [r.source_id() for r in eventT.children() if r.source_id().subdetector_id()==eformat.helper.SubDetector.TDAQ_CTP]
 def test06_CanGetROBsFromEvent(self):
   input = eformat.istream(INPUT)
   for event in input:
     for i in range(event.nchildren()):
       rob = event[i]
       self.assertEqual(type(rob), eformat.ROBFragment)
Пример #54
0
def EDMDecodingVersion():

    log = logging.getLogger("EDMDecodingVersion")

    # BYTESTREAM: decide Run3 or later based on ROD version, decide Run1/Run2 based on run number
    if globalflags.InputFormat.is_bytestream():

        # Check HLT ROD version in first event of first input file
        from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
        inputFileName = athenaCommonFlags.FilesInput()[0]
        if not inputFileName and athenaCommonFlags.isOnline():
            log.info(
                "Online reconstruction mode, no input file available. Leaving default TriggerFlags.EDMDecodingVersion=%d",
                TriggerFlags.EDMDecodingVersion())
            return

        import eformat
        from libpyeformat_helper import SubDetector
        bs = eformat.istream(inputFileName)

        rodVersionM = -1
        rodVersionL = -1
        # Find the first HLT ROBFragment in the first event
        for robf in bs[0]:
            if robf.rob_source_id().subdetector_id() == SubDetector.TDAQ_HLT:
                rodVersionM = robf.rod_minor_version() >> 8
                rodVersionL = robf.rod_minor_version() & 0xFF
                log.debug("HLT ROD minor version from input file is %d.%d",
                          rodVersionM, rodVersionL)
                break

        if rodVersionM < 0 or rodVersionL < 0:
            log.warning(
                "Cannot determine HLT ROD version from input file, falling back to runNumber-based decision"
            )
        elif rodVersionM >= 1:
            TriggerFlags.EDMDecodingVersion = 3
            log.info(
                "Decoding version set to 3, because running on BS file with HLT ROD version %d.%d",
                rodVersionM, rodVersionL)
            return

        # Use run number to determine decoding version
        from RecExConfig.AutoConfiguration import GetRunNumber
        runNumber = GetRunNumber()

        boundary_run12 = 230000
        boundary_run23 = 368000

        if runNumber <= 0:
            log.error(
                "Cannot determine decoding version because run number %d is invalid. Leaving the default version %d",
                runNumber, TriggerFlags.EDMDecodingVersion())
        elif runNumber < boundary_run12:
            # Run-1 data
            TriggerFlags.EDMDecodingVersion = 1
            TriggerFlags.doMergedHLTResult = False
            log.info(
                "Decoding version set to 1 based on BS file run number (runNumber < %d)",
                boundary_run12)
        elif runNumber < boundary_run23:
            # Run-2 data
            TriggerFlags.EDMDecodingVersion = 2
            log.info(
                "Decoding version set to 2 based on BS file run number (%d < runNumber < %d)",
                boundary_run12, boundary_run23)
        else:
            # Run-3 data
            TriggerFlags.EDMDecodingVersion = 3
            log.info(
                "Decoding version set to 3 based on BS file run number (runNumber > %d)",
                boundary_run23)

    else:
        # POOL files: decide based on HLT output type present in file
        from RecExConfig.ObjKeyStore import cfgKeyStore
        from PyUtils.MetaReaderPeeker import convert_itemList
        cfgKeyStore.addManyTypesInputFile(convert_itemList(layout='#join'))

        TriggerFlags.doMergedHLTResult = True
        if cfgKeyStore.isInInputFile("HLT::HLTResult", "HLTResult_EF"):
            TriggerFlags.EDMDecodingVersion = 1
            TriggerFlags.doMergedHLTResult = False
            log.info(
                "Decoding version set to 1, because HLTResult_EF found in POOL file"
            )
        elif cfgKeyStore.isInInputFile("xAOD::TrigNavigation",
                                       "TrigNavigation"):
            TriggerFlags.EDMDecodingVersion = 2
            log.info(
                "Decoding version set to 2, because TrigNavigation found in POOL file"
            )
        elif cfgKeyStore.isInInputFile("xAOD::TrigCompositeContainer",
                                       "HLTNav_Summary"):
            TriggerFlags.EDMDecodingVersion = 3
            log.info(
                "Decoding version set to 3, because HLTNav_Summary found in POOL file"
            )
        elif rec.readRDO():
            # If running Trigger on RDO input (without previous trigger result), choose Run-2 or Run-3 based on doMT
            if TriggerFlags.doMT():
                TriggerFlags.EDMDecodingVersion = 3
                log.info(
                    "Decoding version set to 3, because running Trigger with doMT=True"
                )
            else:
                TriggerFlags.EDMDecodingVersion = 2
                log.info(
                    "Decoding version set to 2, because running Trigger with doMT=False"
                )
        else:
            log.warning(
                "Cannot recognise HLT EDM format, leaving default TriggerFlags.EDMDecodingVersion=%d",
                TriggerFlags.EDMDecodingVersion())
Пример #55
0
def main(filelist, chain_to_write, max, run_number):

####### Input-Output Info follows

  input_file = filelist

  tmpdir =commands.getoutput("echo $TMPDIR")

  if (os.path.exists(tmpdir)):
    print tmpdir,"already exists"
  else:
    print "Generating",tmpdir
    os.system("mkdir $TMPDIR")


  currentTime = datetime.now().strftime("%Y-%m-%d_%H%M%S")
  os.system("mkdir $TMPDIR/"+currentTime)
  
  output_dir = tmpdir+"/"+currentTime
  print
  print '****** Output dir is:',output_dir,'******'
  print
  
  flag_written=(0,0)
  write_counter = 0
  
  print "Opening file: %s" % (input_file)
  print "Will write to file chain: ", chain_to_write  

  file = open(input_file,'r')

  line_counter = 0
    
  for line in file:  

#    print line.strip(), flag_written

    if (flag_written[0]==1): break
      
    command_cp_from_CAF = 'rfcp '+line.strip()+" "+tmpdir+'/Data.data'
    
    print command_cp_from_CAF
    
    os.system(command_cp_from_CAF)
    
    try:
      
      file_to_read = tmpdir+'/Data.data'
    
      print "Opening file of input file: %s" % line.strip()
      
      line_counter+=1
      
      input = eformat.istream(file_to_read.strip())

      ## Updated from Brian's script - this info needs to be attached in all output files.
      dr=EventStorage.pickDataReader(file_to_read.strip())
      output = eformat.ostream(core_name="subset",
                               directory=output_dir,
                               run_number=dr.runNumber(),
                               trigger_type=dr.triggerType(),
                               detector_mask=dr.detectorMask(),
                               beam_type=dr.beamType(),
                               beam_energy=dr.beamEnergy())
      
      
      (flag_written,write_counter) =  event_analysis(input, output, chain_to_write, max, run_number, write_counter)
      
      
      print '... Processed File #',line_counter
      print '... Events written out so far',write_counter
      print
      
      command_delete = 'rm -rf '+tmpdir+'/Data.data' 
      
      print command_delete
      
      os.system(command_delete)    
      
      print
      print
      
    
      if (flag_written[1]==1):
        if (int(max)==-1):
          print "*** Wrote all available events", write_counter
        tmp_file_name = output.last_filename()
        del output
        output_name = output_dir+"/"+run_number+"_"+chain_to_write+"_"+max+"_"+stream_name+"_"+str(line_counter)
        print "Writting output file: ", output_name,"with",write_counter,"events"
        os.rename(tmp_file_name,output_name)
        
      print
      print
        
      os.system("rm -rf "+filelist)
      
    except NameError, IOError:
      print "OOPS! Input Data File Not Found - or a Bug..!"
Пример #56
0
    def _process_bs_file(self, fname, evtmax=1, full_details=True):
        import eformat as ef

        data_reader = ef.EventStorage.pickDataReader(fname)
        assert data_reader, \
               'problem picking a data reader for file [%s]'%fname

        beam_type = '<beam-type N/A>'
        try:
            beam_type = data_reader.beamType()
        except Exception:
            msg.warning("problem while extracting beam-type information")

        beam_energy = '<beam-energy N/A>'
        try:
            beam_energy = data_reader.beamEnergy()
        except Exception:
            msg.warning("problem while extracting beam-type information")

        bs = ef.istream(fname)

        self._metadata['nentries'] = bs.total_events

        bs_metadata = {}

        for md in data_reader.freeMetaDataStrings():
            if md.startswith('Event type:'):
                k = 'evt_type'
                v = []
                if 'is sim' in md: v.append('IS_SIMULATION')
                else: v.append('IS_DATA')
                if 'is atlas' in md: v.append('IS_ATLAS')
                else: v.append('IS_TESTBEAM')
                if 'is physics' in md: v.append('IS_PHYSICS')
                else: v.append('IS_CALIBRATION')
                bs_metadata[k] = tuple(v)
            elif md.startswith('GeoAtlas:'):
                k = 'geometry'
                v = md.split('GeoAtlas:')[1].strip()
                bs_metadata[k] = v
            elif md.startswith('IOVDbGlobalTag:'):
                k = 'conditions_tag'
                v = md.split('IOVDbGlobalTag:')[1].strip()
                bs_metadata[k] = v
            elif '=' in md:
                k, v = md.split('=')
                bs_metadata[k] = v

        # for bwd/fwd compat...
        # see: https://savannah.cern.ch/bugs/?73208
        # needed for very old BS
        for key_name, fn_name in (
            ('GUID', 'GUID'),
            ('Stream', 'stream'),
            ('Project', 'projectTag'),
            ('LumiBlock', 'lumiblockNumber'),
            ('run_number', 'runNumber'),
        ):
            if key_name in bs_metadata:
                # no need: already in bs metadata dict
                continue
            if hasattr(data_reader, fn_name):
                bs_metadata[key_name] = getattr(data_reader, fn_name)()

        self._metadata['file_guid'] = bs_metadata.get('GUID', None)
        self._metadata['evt_type'] = bs_metadata.get('evt_type', [])
        self._metadata['geometry'] = bs_metadata.get('geometry', None)
        self._metadata['conditions_tag'] = bs_metadata.get(
            'conditions_tag', None)
        self._metadata['bs_metadata'] = bs_metadata

        if not data_reader.good():
            # event-less file...
            self._metadata['run_number'].append(
                bs_metadata.get('run_number', 0))
            self._metadata['lumi_block'].append(bs_metadata.get(
                'LumiBlock', 0))
            return

        if evtmax == -1:
            evtmax = bs.total_events

        ievt = iter(bs)
        for i in range(evtmax):
            try:
                evt = next(ievt)
                evt.check()  # may raise a RuntimeError
                stream_tags = [
                    dict(stream_type=tag.type,
                         stream_name=tag.name,
                         obeys_lbk=bool(tag.obeys_lumiblock))
                    for tag in evt.stream_tag()
                ]
                self._metadata['run_number'].append(evt.run_no())
                self._metadata['evt_number'].append(evt.global_id())
                self._metadata['lumi_block'].append(evt.lumi_block())
                self._metadata['run_type'].append(
                    ef.helper.run_type2string(evt.run_type()))
                self._metadata['beam_type'].append(beam_type)
                self._metadata['beam_energy'].append(beam_energy)
                self._metadata['stream_tags'].extend(stream_tags)

            except RuntimeError as err:
                print("** WARNING ** detected a corrupted bs-file:\n", err)