Example #1
0
def main() :
    parser = optparse.OptionParser(usage=usage)
    parser.add_option('-g', '--group', help='group to be processed (used only in fill mode)')
    parser.add_option('--exclude-group', help='exclude group from processing (used only in fill mode)')
    parser.add_option('-f', '--input-fake', help='location of fake trees')
    parser.add_option('-O', '--input-other', help='location other trees')
    parser.add_option('-i', '--input-dir')
    parser.add_option('-o', '--output-dir')
    parser.add_option('--samples-dir', default='samples/',
                      help='directory with the list of samples; default ./samples/')
    parser.add_option('-s', '--syst', help="variations to process (default all)."
                      " Give a comma-sep list or say 'weight', 'object', or 'fake'")
    parser.add_option('--log-dir', help='directory where the batch logs will be (default log/...)')
    parser.add_option('-e', '--exclude', help="skip some systematics, example 'EL_FR_.*'")
    parser.add_option('-q', '--queue', default='atlas_all', help="batch queue, default atlas_all")
    parser.add_option('-T', '--tight-def', help='on-the-fly tight def, one of defs in fakeUtils.py: fakeu.lepIsTight_std, etc.')
    parser.add_option('--regions', default=None, help='comma-separated list of regions to consider')
    parser.add_option('--include-regions', default='.*', help='regexp to filter regions')
    parser.add_option('--exclude-regions', default=None, help='regext to exclude regions')
    # reminder: submit_batch_fill_job_per_group expects argument-less opt to default to False
    parser.add_option('--debug', action='store_true')
    parser.add_option('--verbose', action='store_true')
    parser.add_option('--unblind', action='store_true')
    parser.add_option('-b', '--batch',  action='store_true', help='submit to batch (used in fill mode)')
    parser.add_option('-l', '--list-systematics', action='store_true', help='list what is already in output_dir')
    parser.add_option('-L', '--list-all-systematics', action='store_true', help='list all possible systematics')
    parser.add_option('--list-all-regions', action='store_true', help='list all possible regions')
    parser.add_option('--require-tight-tight', action='store_true', help='fill histos only when both leps are tight')
    parser.add_option('--quick-test', action='store_true', help='run a quick test and fill only 1% of the events')
    parser.add_option('--disable-cache', action='store_true', help='disable the entry cache')
    parser.add_option('--format-aux', action='store_true', help='format plots for paper aux material')

    (opts, args) = parser.parse_args()
    if opts.list_all_systematics :
        print "All systematics:\n\t%s"%'\n\t'.join(systUtils.getAllVariations())
        return
    if opts.list_systematics :
        print listExistingSyst(opts.input_dir)
        return
    if opts.list_all_regions:
        print "All regions:\n\t%s"%'\n\t'.join(sorted(selection_formulas().keys()))
        return

    inOtherSpecified, inDirSpecified = opts.input_other!=None, opts.input_dir!=None
    eitherMode = inOtherSpecified != inDirSpecified
    if not eitherMode : parser.error("Run either in 'fill' or 'plot' mode")
    mode = 'fill' if inOtherSpecified else 'plot' if inDirSpecified else None
    if opts.quick_test : opts.disable_cache = True # don't write bogus entrylists
    requiredOptions = (['input_fake', 'input_other', 'output_dir'] if mode=='fill'
                       else ['input_dir', 'output_dir'])
    def optIsNotSpecified(o) : return not hasattr(opts, o) or getattr(opts,o) is None
    if any(optIsNotSpecified(o) for o in requiredOptions):
        parser.error('Missing required option\n'
                     +'\n'.join(["%s : %s"%(o, getattr(opts, o)) for o in requiredOptions]))
    if opts.verbose : utils.print_running_conditions(parser, opts)

    if   mode=='fill' : runFill(opts)
    elif mode=='plot' : runPlot(opts)
Example #2
0
def get_events_hdw_emu_offline_ef(filenames, options, hdw_emu_of_ef):
    verbose = options.verbose
    debug = options.debug
    if verbose:
        utils.print_running_conditions(parser, options)

    input_filenames = utils.read_filename_arguments(filenames, options)
    if verbose:
        print 'Input files:'
        print '\n'.join(input_filenames)
    chain = R.TChain(options.treename)
    for input_filename in input_filenames:
        chain.Add(
            input_filename
        )  #chain beomes an array with the various files .root introduced
    num_available = chain.GetEntries()
    num_skip = options.skip_events
    num_toprocess = number_of_entries_to_process(num_available, options)

    iEntry = 0
    Events = []
    for iEvent, event in enumerate(chain):
        if num_skip and iEvent < num_skip: continue
        if iEntry > num_toprocess: break

        if hdw_emu_of_ef == 'hdw':
            muons = [
                Muon(tob.pt / 1000., tob.eta, tob.phi)
                for tob in event.hdwMuonTOB if tob.bcn == 0
            ]  # only pick the ones from bunch crossing number 0
        elif hdw_emu_of_ef == 'emu':
            muons = [
                Muon(tob.pt / 1000., tob.eta, tob.phi)
                for tob in event.emuMuonTOB if tob.bcn == 0
            ]  # only pick the ones from bunch crossing number 0
        elif hdw_emu_of_ef == 'of':
            muons = [
                Muon(tob.Pt() / 1000., tob.Eta(), tob.Phi())
                for tob in event.recomuon
            ]
        else:
            muons = [
                Muon(tob.Pt() / 1000., tob.Eta(), tob.Phi())
                for tob in event.efmuon
            ]


#        muons = remove_equal_muons(muons)
        muons.sort(key=lambda x: x.p4.Eta())

        entry = Event(event.runNumber, event.eventNumber, muons)
        Events.append(entry)
        iEntry += 1

    return Events
Example #3
0
def main():
    parser = optparse.OptionParser(usage=usage)
    parser.add_option('-g', '--group', help='group to be processed (used only in fill mode)')
    parser.add_option('-i', '--input-dir', default='./out/fakerate')
    parser.add_option('-l', '--lepton', default='el', help='either el or mu')
    parser.add_option('-o', '--output-dir', default='./out/plot_by_source', help='dir for plots')
    parser.add_option('--log-dir', help='directory where the batch logs will be (default log/...)')
    parser.add_option('--samples-dir', default='samples/', help='directory with the list of samples; default ./samples/')
    parser.add_option('-f', '--fill-histos', action='store_true', default=False, help='force fill (default only if needed)')
    parser.add_option('-q', '--queue', default='atlas_all', help="batch queue, default atlas_all")
    parser.add_option('--regions', default=None, help='comma-separated list of regions to consider')
    parser.add_option('--include-regions', default='.*', help='regexp to filter regions (protect with quotes if necessary)')
    parser.add_option('--exclude-regions', default=None, help='regext to exclude regions')
    # reminder: submit_batch_fill_job_per_group expects argument-less opt to default to False
    parser.add_option('--debug', action='store_true')
    parser.add_option('--verbose', action='store_true')
    parser.add_option('-b', '--batch',  action='store_true', help='submit to batch (used in fill mode)')
    parser.add_option('--list-all-regions', action='store_true', help='list all possible regions')
    parser.add_option('--tight-tight', action='store_true', help='fill histos only when both leps are tight')
    parser.add_option('--quick-test', action='store_true', help='run a quick test and fill only 1% of the events')
    parser.add_option('--disable-cache', action='store_true', help='disable the entry cache')
    parser.add_option('--skip-fill', action='store_true', help='do not fill histograms (use existing ones)')
    parser.add_option('--just-fill', action='store_true', help='do not plot')

    (opts, args) = parser.parse_args()
    inputDir  = opts.input_dir
    lepton    = opts.lepton
    regions   = opts.regions
    verbose   = opts.verbose

    if opts.list_all_regions:
        print "All regions:\n\t%s"%'\n\t'.join(sorted(selection_formulas().keys()))
        return

    if lepton not in ['el', 'mu'] : parser.error("invalid lepton '%s'"%lepton)
    if opts.verbose : utils.print_running_conditions(parser, opts)

    if not opts.skip_fill:
        runFill(opts)
    if opts.batch:
        print "wait for the fill jobs to complete, then re-run with `--skip-fill`"
        return
    runPlot(opts)
    print 'todo: fix errorband'
Example #4
0
def main():
    parser = optparse.OptionParser(usage=usage)
    parser.add_option('-g',
                      '--group',
                      help='group to be processed (used only in fill mode)')
    parser.add_option('-i', '--input-dir', default='./out/fakerate')
    parser.add_option('-l', '--lepton', default='el', help='either el or mu')
    parser.add_option('-o',
                      '--output-dir',
                      default='./out/plot_by_source',
                      help='dir for plots')
    parser.add_option(
        '--log-dir',
        help='directory where the batch logs will be (default log/...)')
    parser.add_option(
        '--samples-dir',
        default='samples/',
        help='directory with the list of samples; default ./samples/')
    parser.add_option('-f',
                      '--fill-histos',
                      action='store_true',
                      default=False,
                      help='force fill (default only if needed)')
    parser.add_option('-q',
                      '--queue',
                      default='atlas_all',
                      help="batch queue, default atlas_all")
    parser.add_option('--regions',
                      default=None,
                      help='comma-separated list of regions to consider')
    parser.add_option(
        '--include-regions',
        default='.*',
        help='regexp to filter regions (protect with quotes if necessary)')
    parser.add_option('--exclude-regions',
                      default=None,
                      help='regext to exclude regions')
    # reminder: submit_batch_fill_job_per_group expects argument-less opt to default to False
    parser.add_option('--debug', action='store_true')
    parser.add_option('--verbose', action='store_true')
    parser.add_option('-b',
                      '--batch',
                      action='store_true',
                      help='submit to batch (used in fill mode)')
    parser.add_option('--list-all-regions',
                      action='store_true',
                      help='list all possible regions')
    parser.add_option('--tight-tight',
                      action='store_true',
                      help='fill histos only when both leps are tight')
    parser.add_option('--quick-test',
                      action='store_true',
                      help='run a quick test and fill only 1% of the events')
    parser.add_option('--disable-cache',
                      action='store_true',
                      help='disable the entry cache')
    parser.add_option('--skip-fill',
                      action='store_true',
                      help='do not fill histograms (use existing ones)')
    parser.add_option('--just-fill', action='store_true', help='do not plot')

    (opts, args) = parser.parse_args()
    inputDir = opts.input_dir
    lepton = opts.lepton
    regions = opts.regions
    verbose = opts.verbose

    if opts.list_all_regions:
        print "All regions:\n\t%s" % '\n\t'.join(
            sorted(selection_formulas().keys()))
        return

    if lepton not in ['el', 'mu']: parser.error("invalid lepton '%s'" % lepton)
    if opts.verbose: utils.print_running_conditions(parser, opts)

    if not opts.skip_fill:
        runFill(opts)
    if opts.batch:
        print "wait for the fill jobs to complete, then re-run with `--skip-fill`"
        return
    runPlot(opts)
    print 'todo: fix errorband'
Example #5
0
def main():
    parser = optparse.OptionParser(usage=usage)
    parser.add_option('-g',
                      '--group',
                      help='group to be processed (used only in fill mode)')
    parser.add_option(
        '--exclude-group',
        help='exclude group from processing (used only in fill mode)')
    parser.add_option('-f', '--input-fake', help='location of fake trees')
    parser.add_option('-O', '--input-other', help='location other trees')
    parser.add_option('-i', '--input-dir')
    parser.add_option('-o', '--output-dir')
    parser.add_option(
        '--samples-dir',
        default='samples/',
        help='directory with the list of samples; default ./samples/')
    parser.add_option(
        '-s',
        '--syst',
        help="variations to process (default all)."
        " Give a comma-sep list or say 'weight', 'object', or 'fake'")
    parser.add_option(
        '--log-dir',
        help='directory where the batch logs will be (default log/...)')
    parser.add_option('-e',
                      '--exclude',
                      help="skip some systematics, example 'EL_FR_.*'")
    parser.add_option('-q',
                      '--queue',
                      default='atlas_all',
                      help="batch queue, default atlas_all")
    parser.add_option(
        '-T',
        '--tight-def',
        help=
        'on-the-fly tight def, one of defs in fakeUtils.py: fakeu.lepIsTight_std, etc.'
    )
    parser.add_option('--regions',
                      default=None,
                      help='comma-separated list of regions to consider')
    parser.add_option('--include-regions',
                      default='.*',
                      help='regexp to filter regions')
    parser.add_option('--exclude-regions',
                      default=None,
                      help='regext to exclude regions')
    # reminder: submit_batch_fill_job_per_group expects argument-less opt to default to False
    parser.add_option('--debug', action='store_true')
    parser.add_option('--verbose', action='store_true')
    parser.add_option('--unblind', action='store_true')
    parser.add_option('-b',
                      '--batch',
                      action='store_true',
                      help='submit to batch (used in fill mode)')
    parser.add_option('-l',
                      '--list-systematics',
                      action='store_true',
                      help='list what is already in output_dir')
    parser.add_option('-L',
                      '--list-all-systematics',
                      action='store_true',
                      help='list all possible systematics')
    parser.add_option('--list-all-regions',
                      action='store_true',
                      help='list all possible regions')
    parser.add_option('--require-tight-tight',
                      action='store_true',
                      help='fill histos only when both leps are tight')
    parser.add_option('--quick-test',
                      action='store_true',
                      help='run a quick test and fill only 1% of the events')
    parser.add_option('--disable-cache',
                      action='store_true',
                      help='disable the entry cache')
    parser.add_option('--format-aux',
                      action='store_true',
                      help='format plots for paper aux material')

    (opts, args) = parser.parse_args()
    if opts.list_all_systematics:
        print "All systematics:\n\t%s" % '\n\t'.join(
            systUtils.getAllVariations())
        return
    if opts.list_systematics:
        print listExistingSyst(opts.input_dir)
        return
    if opts.list_all_regions:
        print "All regions:\n\t%s" % '\n\t'.join(
            sorted(selection_formulas().keys()))
        return

    inOtherSpecified, inDirSpecified = opts.input_other != None, opts.input_dir != None
    eitherMode = inOtherSpecified != inDirSpecified
    if not eitherMode: parser.error("Run either in 'fill' or 'plot' mode")
    mode = 'fill' if inOtherSpecified else 'plot' if inDirSpecified else None
    if opts.quick_test:
        opts.disable_cache = True  # don't write bogus entrylists
    requiredOptions = (['input_fake', 'input_other', 'output_dir']
                       if mode == 'fill' else ['input_dir', 'output_dir'])

    def optIsNotSpecified(o):
        return not hasattr(opts, o) or getattr(opts, o) is None

    if any(optIsNotSpecified(o) for o in requiredOptions):
        parser.error('Missing required option\n' + '\n'.join(
            ["%s : %s" % (o, getattr(opts, o)) for o in requiredOptions]))
    if opts.verbose: utils.print_running_conditions(parser, opts)

    if mode == 'fill': runFill(opts)
    elif mode == 'plot': runPlot(opts)
Example #6
0
def main():
    difference_dphi = 0  #CHANGE
    totat_dphi = 0
    usage = ("Usage : %prog [options] filename"
             "\n Examples :"
             "\n %prog  -v tmptrig.root")

    parser = optparse.OptionParser(usage=usage)
    parser.add_option('-n',
                      '--num-events',
                      default=None,
                      type=int,
                      help='number of events to process (default all)')
    parser.add_option('-s',
                      '--skip-events',
                      default=None,
                      type=int,
                      help='number of events to skip (default none)')
    parser.add_option('-v', '--verbose', default=False, action='store_true')
    parser.add_option('-d', '--debug', default=False, action='store_true')
    parser.add_option('-t', '--treename', default='trig')

    (options, args) = parser.parse_args()
    if len(args) != 1:
        parser.error("incorrect number of arguments")
    verbose = options.verbose
    debug = options.debug
    if verbose:
        utils.print_running_conditions(parser, options)

    input_filenames = utils.read_filename_arguments(args[0], options)
    if verbose:
        print 'Input files:'
        print '\n'.join(input_filenames)
    chain = R.TChain(options.treename)
    for input_filename in input_filenames:
        chain.Add(
            input_filename
        )  #chain beomes an array with the various files .root introduced
    num_available = chain.GetEntries()
    num_skip = options.skip_events
    num_toprocess = number_of_entries_to_process(num_available, options)
    if verbose:
        print "About to process %s (out of %d) entries: " % (num_toprocess,
                                                             num_available)
    # # test: print all branches available in the tree
    # print 'chain ',chain
    # print 'branches:'
    # print 'list of branches ',chain.GetListOfBranches()
    # print '\n'.join([k.GetName() for k in chain.GetListOfBranches()])
    # print
    # return

    iEntry = 0
    possible_outcomes = [
        'pass_em_pass_hw', 'pass_em_fail_hw', 'fail_em_pass_hw',
        'fail_em_fail_hw'
    ]
    algo_counters = defaultdict(int)
    item_counters = defaultdict(int)
    valid_counters = {k: 0 for k in ['overflow'] + possible_outcomes}

    histos = {}
    histos2 = {}
    lvl1item_name = 'L1_LFV-MU6'
    algorithm_name = '0DR15-2MU6ab'

    l = lvl1item_name
    dr_binning = (31, -0.05, 3.05)
    histos = {
        'dr_min_all':
        R.TH1F('dr_min_all', l + '; min #DeltaR 2mu6 pair', *dr_binning),
        'dr_min_hdw_pass':
        R.TH1F('dr_min_hdw_pass', l + '; min #DeltaR 2mu6 pair and hdw pass',
               *dr_binning),
        'ratio':
        R.TH1F('ratio', l + '; efficienciy 0DR15-2MU6ab algorithm',
               *dr_binning),
    }

    histo_names = [name for name, histo in histos.items()]

    for iEvent, event in enumerate(chain):
        if num_skip and iEvent < num_skip: continue
        if iEntry > num_toprocess: break
        # see how branches are created in TopoNtuple.py
        item_bits = item_tbits2bits(
            getattr(event,
                    lvl1item_name.replace('-', '_') + '_iBits'))
        increment_counters(item_counters, item_bits)
        # # These are not filled, so don't bother for now # DG-2016-06-23
        algo_bits = algo_tbits2bits(
            getattr(event,
                    algorithm_name.replace('-', '_') + '_0_aBits'))
        increment_counters(algo_counters, algo_bits)
        pass_hw = item_bits['TDT_TBP']
        pass_sim = item_bits['L1SIMULATED']

        overflown = algo_bits['OVERFLOWN']
        if overflown:
            valid_counters['overflow'] += 1
            continue

        muons = [
            Muon(tob.pt, tob.eta, tob.phi) for tob in event.emuMuonTOB
            if tob.bcn == 0
        ]  # only pick the ones from bunch crossing number 0
        #        muons = [Muon(tob.Pt(), tob.Eta(), tob.Phi()) for tob in event.recomuon]

        list_mu6ab = algo_MU6ab(muons)  #mu6 list
        if len(list_mu6ab) < 2: continue

        dr_min = algo_0DR15(list_mu6ab)  #get minimum dr

        if dr_min > 3: continue

        fill_histos(histos, dr_min, pass_hw)  #fill histograms

    histos['ratio'].Divide(histos['dr_min_all'])

    c = R.TCanvas('c')
    for name in histo_names:
        c.Clear()
        h = histos[name]
        h.Draw('h text')
        c.Update()
        c.SaveAs(name + '.png')
        c.SaveAs(name + '.root')
def main():
    parser = optparse.OptionParser(usage=usage)
    parser.add_option('-g', '--group', help='group to be processed (used only in fill mode)')
    parser.add_option('-i', '--input-dir', default='./out/fakerate')
    parser.add_option('-o', '--output-dir', default='./out/fake_scale_factor')
    parser.add_option('-l', '--lepton', default='el', help='either el or mu')
    parser.add_option('-r', '--region', help='one of the regions for which we saved the fake ntuples')
    parser.add_option('--samples-dir', default='samples/', help='directory with the list of samples; default ./samples/')
    parser.add_option('-T', '--tight-def', help='on-the-fly tight def, one of defs in fakeUtils.py: fakeu.lepIsTight_std, etc.')
    parser.add_option('-f', '--fill-histos', action='store_true', default=False, help='force fill (default only if needed)')
    parser.add_option('--keep-real', action='store_true', default=False, help='do not subtract real (to get real lep efficiency)')
    parser.add_option('--debug', action='store_true')
    parser.add_option('--verbose', action='store_true')
    parser.add_option('--disable-cache', action='store_true', help='disable the entry cache')
    (options, args) = parser.parse_args()
    inputDir  = options.input_dir
    outputDir = options.output_dir
    lepton    = options.lepton
    region    = options.region
    keepreal  = options.keep_real
    debug     = options.debug
    verbose   = options.verbose
    if lepton not in ['el', 'mu'] : parser.error("invalid lepton '%s'"%lepton)
    regions = kin.selection_formulas().keys()
    assert region in regions,"invalid region '%s', must be one of %s"%(region, str(sorted(regions)))
    regions = [region]

    dataset.Dataset.verbose_parsing = True if debug else False
    groups = dataset.DatasetGroup.build_groups_from_files_in_dir(options.samples_dir)
    if options.group : groups = [g for g in groups if g.name==options.group]
    group_names = [g.name for g in groups]

    outputDir = outputDir+'/'+region+'/'+lepton # split the output in subdirectories, so we don't overwrite things
    mkdirIfNeeded(outputDir)
    templateOutputFilename = "scale_factor_{0}.root".format(lepton)
    outputFileName = os.path.join(outputDir, templateOutputFilename)
    cacheFileName = outputFileName.replace('.root', '_cache.root')
    doFillHistograms = options.fill_histos or not os.path.exists(cacheFileName)
    onthefly_tight_def = eval(options.tight_def) if options.tight_def else None # eval will take care of aborting on typos
    if verbose : utils.print_running_conditions(parser, options)
    vars = ['mt0', 'mt1', 'pt0', 'pt1', 'eta1', 'pt1_eta1']
    #fill histos
    if doFillHistograms :
        start_time = time.clock()
        num_processed_entries = 0
        histosPerGroup = bookHistos(vars, group_names, region=region)
        histosPerSource = bookHistosPerSource(vars, leptonSources, region=region)
        histosPerGroupPerSource = bookHistosPerSamplePerSource(vars, group_names, leptonSources, region=region)
        for group in groups:
            tree_name = 'hlfv_tuple'
            chain = IndexedChain(tree_name)
            for ds in group.datasets:
                fname = os.path.join(inputDir, ds.name+'.root')
                if os.path.exists(fname):
                    chain.Add(fname)
            if verbose:
                print "{0} : {1} entries from {2} samples".format(group.name, chain.GetEntries(), len(group.datasets))
            chain.cache_directory = os.path.abspath('./selection_cache/'+group.name+'/')
            tcuts = [r.TCut(reg, selection_formulas()[reg]) for reg in regions]
            print 'tcuts ',[c.GetName() for c in tcuts]
            chain.retrieve_entrylists(tcuts)
            counters_pre, histos_pre = dict(), dict()
            counters_npre, histos_npre = dict(), dict()
            print 'tcuts_with_existing_list ',str([c.GetName() for c in chain.tcuts_with_existing_list()])
            print 'tcuts_without_existing_list ',str([c.GetName() for c in chain.tcuts_without_existing_list()])
            cached_tcuts = [] if options.disable_cache else chain.tcuts_with_existing_list()
            print 'cached_tcuts ',[c.GetName() for c in cached_tcuts]
            uncached_tcuts = tcuts if options.disable_cache else chain.tcuts_without_existing_list()
            print 'todo: skip cuts for which the histo files are there'
            if verbose:
                print " --- group : {0} ---".format(group.name)
                print '\n\t'.join(chain.filenames)
            if verbose : print 'filling cached cuts: ',' '.join([c.GetName() for c in cached_tcuts])
            if verbose: print "%s : %d entries"%(group.name, chain.GetEntries())
            histosThisGroup = histosPerGroup[group.name]
            histosThisGroupPerSource = dict((v, histosPerGroupPerSource[v][group.name]) for v in histosPerGroupPerSource.keys())
            for cut in cached_tcuts:
                print 'cached_tcut ',cut
                chain.preselect(cut)
                num_processed_entries += fillHistos(chain, histosThisGroup, histosPerSource,
                                                    histosThisGroupPerSource,
                                                    lepton, group,
                                                    cut, cut_is_cached=True,
                                                    onthefly_tight_def=onthefly_tight_def,
                                                    verbose=verbose)
            if verbose : print 'filling uncached cuts: ',' '.join([c.GetName() for c in uncached_tcuts])
            if uncached_tcuts:
                assert len(uncached_tcuts)==1, "expecting only one cut, got {}".format(len(uncached_tcuts))
                cut = uncached_tcuts[0]
                chain.preselect(None)
                num_processed_entries += fillHistos(chain, histosThisGroup, histosPerSource,
                                                    histosThisGroupPerSource,
                                                    lepton, group,
                                                    cut, cut_is_cached=False,
                                                    onthefly_tight_def=onthefly_tight_def,
                                                    verbose=verbose)
                chain.save_lists()

        writeHistos(cacheFileName, histosPerGroup, histosPerSource, histosPerGroupPerSource, verbose)
        end_time = time.clock()
        delta_time = end_time - start_time
        if verbose:
            print ("processed {0:d} entries ".format(num_processed_entries)
                   +"in "+("{0:d} min ".format(int(delta_time/60)) if delta_time>60 else
                           "{0:.1f} s ".format(delta_time))
                   +"({0:.1f} kHz)".format(num_processed_entries/delta_time))
    # return
    # compute scale factors
    histosPerGroup = fetchHistos(cacheFileName, histoNames(vars, group_names, region), verbose)
    histosPerSource = fetchHistos(cacheFileName, histoNamesPerSource(vars, leptonSources, region), verbose)
    histosPerSamplePerSource = fetchHistos(cacheFileName, histoNamesPerSamplePerSource(vars, group_names, leptonSources, region), verbose)
    plotStackedHistos(histosPerGroup, outputDir+'/by_group', region, verbose)
    plotStackedHistosSources(histosPerSource, outputDir+'/by_source', region, verbose)
    plotPerSourceEff(histosPerVar=histosPerSource, outputDir=outputDir+'/by_source', lepton=lepton, region=region, verbose=verbose)
    for g in group_names:
        hps = dict((v, histosPerSamplePerSource[v][g])for v in vars)
        plotPerSourceEff(histosPerVar=hps, outputDir=outputDir, lepton=lepton, region=region, sample=g, verbose=verbose)


    hn_sf_eta = histoname_sf_vs_eta           (lepton)
    hn_sf_pt  = histoname_sf_vs_pt            (lepton)
    hn_da_eta = histoname_data_fake_eff_vs_eta(lepton)
    hn_da_pt  = histoname_data_fake_eff_vs_pt (lepton)
    subtractReal = not keepreal
    objs_eta = subtractRealAndComputeScaleFactor(histosPerGroup, 'eta1', hn_sf_eta, hn_da_eta, outputDir, region, subtractReal, verbose)
    objs_pt  = subtractRealAndComputeScaleFactor(histosPerGroup, 'pt1',  hn_sf_pt,  hn_da_pt,  outputDir, region, subtractReal, verbose)
    objs_pt_eta  = subtractRealAndComputeScaleFactor(histosPerGroup, 'pt1_eta1',
                                                     histoname_sf_vs_pt_eta(lepton),
                                                     histoname_data_fake_eff_vs_pt_eta(lepton),
                                                     outputDir, region, subtractReal, verbose)
    rootUtils.writeObjectsToFile(outputFileName, dictSum(dictSum(objs_eta, objs_pt), objs_pt_eta), verbose)
    if verbose : print "saved scale factors to %s" % outputFileName
def main():
    usage = ("Usage : %prog [options] filename"
             "\n Examples :"
             "\n %prog  -v tmptrig.root")

    parser = optparse.OptionParser(usage=usage)
    parser.add_option('-n',
                      '--num-events',
                      default=None,
                      type=int,
                      help='number of events to process (default all)')
    parser.add_option('-s',
                      '--skip-events',
                      default=None,
                      type=int,
                      help='number of events to skip (default none)')
    parser.add_option('-v', '--verbose', default=False, action='store_true')
    parser.add_option('-d', '--debug', default=False, action='store_true')
    parser.add_option('-t', '--treename', default='trig')

    (options, args) = parser.parse_args()
    if len(args) != 1:
        parser.error("incorrect number of arguments")
    verbose = options.verbose
    debug = options.debug
    if verbose:
        utils.print_running_conditions(parser, options)

    input_filenames = utils.read_filename_arguments(args[0], options)
    if verbose:
        print 'Input files:'
        print '\n'.join(input_filenames)
    chain = R.TChain(options.treename)
    for input_filename in input_filenames:
        chain.Add(
            input_filename
        )  #chain beomes an array with the various files .root introduced
    num_available = chain.GetEntries()
    num_skip = options.skip_events
    num_toprocess = number_of_entries_to_process(num_available, options)
    if verbose:
        print "About to process %s (out of %d) entries: " % (num_toprocess,
                                                             num_available)
    # # test: print all branches available in the tree
    # print 'chain ',chain
    # print 'branches:'
    # print 'list of branches ',chain.GetListOfBranches()
    # print '\n'.join([k.GetName() for k in chain.GetListOfBranches()])
    # print
    # return

    iEntry = 0
    possible_outcomes = [
        'pass_em_pass_hw', 'pass_em_fail_hw', 'fail_em_pass_hw',
        'fail_em_fail_hw'
    ]
    algo_counters = defaultdict(int)
    item_counters = defaultdict(int)
    valid_counters = {k: 0 for k in ['overflow'] + possible_outcomes}

    histos = {}
    histos2 = {}
    lvl1item_name = 'L1_BPH-2M9-MU6MU4_BPH-0DR15-MU6MU4'
    algorithm1_name = '2INVM9-MU6ab-MU4ab'
    algorithm2_name = '0DR15-MU6ab-MU4ab'

    l = lvl1item_name
    num_binning = (9, -0.5, 8.5)
    dr_binning = (30, 0.0, 6.0)
    m_binning = (15, -500.0, 14500.0)
    pt_binning = (8, 3500.0, 11500.0)
    angle_binning = (28, -3.5, 3.5)
    for k in possible_outcomes:  #initialize the histograms, they will still be empty after
        histos[k] = {
            'n_mu':
            R.TH1F('n_mu' + '_' + k, l + '; N input l1mus', *num_binning),
            'n_mu4':
            R.TH1F('n_mu4' + '_' + k, l + '; N mu4 muons', *num_binning),
            'n_pairs_mu6mu4_2m9_0dr15':
            R.TH1F('n_pairs_mu6mu4_2m9_0dr15' + '_' + k,
                   l + '; N mu6mu4_2m9_0dr15 pairs', *num_binning),
            'n_pairs_mu6mu4_2m9':
            R.TH1F('n_pairs_mu6mu4_2m9' + '_' + k, l + '; N mu6mu4_2m9 pairs',
                   *num_binning),
            'n_pairs_mu6mu4_0dr15':
            R.TH1F('n_pairs_mu6mu4_0dr15' + '_' + k,
                   l + '; N mu6mu4_0dr15 pairs', *num_binning),
            'n_pairs_mu6mu4':
            R.TH1F('n_pairs_mu6mu4' + '_' + k, l + '; N mu6mu4 pairs',
                   *num_binning),
            'n_pairs_any':
            R.TH1F('n_pairs_any' + '_' + k, l + '; N any pair', *num_binning),
            'dr_any':
            R.TH1F('dr_any' + '_' + k, l + '; #DeltaR any pair', *dr_binning),
            'dr_mu6mu4':
            R.TH1F('dr_mu6mu4' + '_' + k, l + '; #DeltaR mu6mu4 pairs',
                   *dr_binning),
            'dr_min_mu6mu4':
            R.TH1F('dr_min_mu6mu4' + '_' + k, l + '; min #DeltaR mu6mu4 pairs',
                   *dr_binning),
            'dr_mu6mu4_0dr15':
            R.TH1F('dr_mu6mu4_0dr15' + '_' + k, l + '; #DeltaR mu6mu4_0dr15',
                   *dr_binning),
            'dr_min_mu6mu4_0dr15':
            R.TH1F('dr_min_mu6mu4_0dr15' + '_' + k,
                   l + '; min #DeltaR mu6mu4_0dr15', *dr_binning),
            'dr_mu6mu4_2m9':
            R.TH1F('dr_mu6mu4_2m9' + '_' + k, l + '; #DeltaR mu6mu4_2m9',
                   *dr_binning),
            'dr_min_mu6mu4_2m9':
            R.TH1F('dr_min_mu6mu4_2m9' + '_' + k,
                   l + '; min #DeltaR mu6mu4_2m9', *dr_binning),
            'dr_mu6mu4_2m9_0dr15':
            R.TH1F('dr_mu6mu4_2m9_0dr15' + '_' + k,
                   l + '; #DeltaR mu6mu4_2m9_0dr15', *dr_binning),
            'dr_mu6mu4':
            R.TH1F('dr_mu6mu4' + '_' + k, l + '; #DeltaR mu6mu4 pairs',
                   *dr_binning),
            'dr_min_mu6mu4_2m9_0dr15':
            R.TH1F('dr_min_mu6mu4_2m9_0dr15' + '_' + k,
                   l + '; min #DeltaR mu6mu4_2m9_0dr15', *dr_binning),
            'dr_mu6mu4':
            R.TH1F('dr_mu6mu4' + '_' + k, l + '; #DeltaR mu6mu4 pairs',
                   *dr_binning),
            'm_any':
            R.TH1F('m_any' + '_' + k, l + '; #InvMass any pair', *m_binning),
            'm_mu6mu4':
            R.TH1F('m_mu6mu4' + '_' + k, l + '; #InvMass mu6mu4 pairs',
                   *m_binning),
            'm_mu6mu4_0dr15':
            R.TH1F('m_mu6mu4_0dr15' + '_' + k, l + '; #InvMass mu6mu4_0dr15',
                   *m_binning),
            'm_mu6mu4_2m9':
            R.TH1F('m_mu6mu4_2m9' + '_' + k, l + '; #InvMass mu6mu4_2m9',
                   *m_binning),
            'm_mu6mu4_2m9_0dr15':
            R.TH1F('m_mu6mu4_2m9_0dr15' + '_' + k,
                   l + '; #InvMass mu6mu4_2m9_0dr15', *m_binning),
            'Phi_mu4':
            R.TH1F('Phi_mu6mu4' + '_' + k, l + '; Phi angle any mu4 muon',
                   *angle_binning),
            'Eta_mu4':
            R.TH1F('Eta_mu6mu4' + '_' + k, l + '; Eta angle any mu4 muon',
                   *angle_binning),
            'pt_any':
            R.TH1F('pt_any' + '_' + k, l + '; #Pt any muon', *pt_binning),
        }
        histos2[k] = R.TH2F('PhiEta_mu6mu4' + '_' + k,
                            l + '; Phi angle any mu6mu4; Eta angle any mu6mu4',
                            *2 * angle_binning)

    histo_names = [
        name for name, histo in histos[possible_outcomes[0]].items()
    ]

    n_fail_hdw = 0
    n_pass_hdw = 0

    for iEvent, event in enumerate(chain):
        if num_skip and iEvent < num_skip: continue
        if iEntry > num_toprocess: break
        # see how branches are created in TopoNtuple.py
        item_bits = item_tbits2bits(
            getattr(event,
                    lvl1item_name.replace('-', '_') + '_iBits'))
        increment_counters(item_counters, item_bits)
        # # These are not filled, so don't bother for now # DG-2016-06-23
        algo1_bits = algo_tbits2bits(
            getattr(event,
                    algorithm1_name.replace('-', '_') + '_0_aBits'))
        increment_counters(algo_counters, algo1_bits)
        algo2_bits = algo_tbits2bits(
            getattr(event,
                    algorithm2_name.replace('-', '_') + '_0_aBits'))
        increment_counters(algo_counters, algo2_bits)
        pass_hw = item_bits['TDT_TBP']
        pass_sim = item_bits['L1SIMULATED']
        pass_hw = item_bits['TDT_TBP']
        if pass_hw:
            n_pass_hdw += 1
        else:
            n_fail_hdw += 1
        if n_pass_hdw > 500 and pass_hw: continue
        if n_fail_hdw > 501 and not pass_hw: continue

        overflown1 = algo1_bits['OVERFLOWN']
        overflown2 = algo2_bits['OVERFLOWN']
        if overflown1 and overflown2:
            valid_counters['overflow'] += 1
            continue
        # emTobs = [EmTob(w) for w in event.emTobs]
        # jetTobs = [JetTob(w) for w in event.jetTobs]
        # tauTobs = [TauTob(w) for w in event.tauTobs]
        # if debug:
        #     print 'emTobs[%d]' % len(emTobs)
        #     for i, et in enumerate(emTobs):
        #         print "[%d] (%f, %f)"%(i, et.eta(), et.phi())
        #     print 'jetTobs[%d]' % len(jetTobs)
        #     for i, jt in enumerate(jetTobs):
        #         print "[%d] (%f, %f)"%(i, jt.eta(), jt.phi())
        #     print 'tauTobs[%d]' % len(tauTobs)
        #     for i, tt in enumerate(tauTobs):
        #         print "[%d] (%f, %f)"%(i, tt.eta(), tt.phi())

        # these are EnhancedMuonTOB objects
        muons = [
            Muon(tob.pt, tob.eta, tob.phi) for tob in event.hdwMuonTOB
            if tob.bcn == 0
        ]  # only pick the ones from bunch crossing number 0
        #        muons = remove_equal_muons(muons)
        #        muons_emu = [Muon(tob.pt, tob.eta, tob.phi) for tob in event.emuMuonTOB
        #                 if tob.bcn==0] # only pick the ones from bunch crossing number 0

        #        muons = muons_emu

        list_mu4 = sorted(
            muons, key=lambda muon: muon.p4.Pt())  #all muons satisfy mu4
        list_mu6mu4_2m9_0dr15_pairs, list_mu6mu4_2m9_pairs, list_mu6mu4_0dr15_pairs, list_mu6mu4_pairs = algo_2M9_0DR15(
            list_mu4, pass_hw=pass_hw)  #2im9_0dr15 couplelist
        list_pairs = make_all_pairs(muons)

        pass_emul = len(
            list_mu6mu4_2m9_0dr15_pairs)  #returns true if mu6mu4, 2m9 or 0dr15
        #        pass_emul = len(list_mu6mu4_2m9_pairs) and len(list_mu6mu4_0dr15_pairs)  #returns true if mu6mu4, 2m9 or 0dr15

        outcome = ('pass_em_pass_hw'
                   if pass_hw and pass_emul else 'pass_em_fail_hw' if pass_emul
                   else 'fail_em_pass_hw' if pass_hw else 'fail_em_fail_hw')

        #        if pass_hw and not len(list_mu6mu4_2m9_pairs):
        #        if outcome == 'fail_em_pass_hw':
        #        if pass_hw:
        if False:
            n_muons = len(muons)
            for i in range(n_muons):
                mu = muons[n_muons - i - 1].p4
                print("Pt = {0:.0f}  \tPhi = {1:.0f}  \tEta = {2:.0f}".format(
                    mu.Pt() / 1000.,
                    mu.Phi() * 10.,
                    mu.Eta() * 10.))

#            print("")
#            for pair in list_mu6mu4_pairs:
#                mu1 = pair.muon1.p4
#                mu2 = pair.muon2.p4
#                print("muon1:  Pt = {:.0f}  \tPhi = {:.0f}  \tEta = {:.0f}".format(mu1.Pt()/1000., mu1.Phi()*10., mu1.Eta()*10.))
#                print("muon2:  Pt = {:.0f}  \tPhi = {:.0f}  \tEta = {:.0f}".format(mu2.Pt()/1000., mu2.Phi()*10., mu2.Eta()*10.))
#                print('invm2 = {:.0f}'.format(pair.invm2/1000000.))
#            print('runNumber = {}  eventNumber = {}  lumiBlock = {}'.format(event.runNumber, event.eventNumber, event.lumiBlock))
            print('event_number = {}'.format(event.eventNumber))
            print("--------------------------------------")
#        else: continue

        if False:
            for muon in muons:
                mu = muon.p4
                print("Pt = {0:.0f}\t\tPhi = {1:.0f}  \tEta = {2:.0f}".format(
                    mu.Pt() / 1000.,
                    mu.Phi() * 10,
                    mu.Eta() * 10))
            print("")
            for muon in muons_emu:
                mu = muon.p4
                print("Pt = {0:.0f}\t\tPhi = {1:.0f}  \tEta = {2:.0f}".format(
                    mu.Pt() / 1000.,
                    mu.Phi() * 10,
                    mu.Eta() * 10))
            print("")
            print("")

#        if outcome == 'pass_em_fail_hw':
#        if outcome == 'fail_em_pass_hw':
        if False:
            print("all muons in event")
            for muon in muons:
                mu = muon.p4
                print("Pt = {0:.0f}\t\tPhi = {1:.0f}  \tEta = {2:.0f}".format(
                    mu.Pt() / 1000.,
                    mu.Phi() * 10,
                    mu.Eta() * 10))
#            print("pairs with a pass in emulation")
            print("pairs with a pass in 0DR15-MU6ab-MU4ab emulation")
            for pair in list_mu6mu4_0dr15_pairs:
                mu1 = pair.muon1.p4
                mu2 = pair.muon2.p4
                print("Pt = {0:.0f}\t\tPhi = {1:.0f}  \tEta = {2:.0f}".format(
                    mu1.Pt() / 1000.,
                    mu1.Phi() * 10,
                    mu1.Eta() * 10))
                print("Pt = {0:.0f}\t\tPhi = {1:.0f}  \tEta = {2:.0f}".format(
                    mu2.Pt() / 1000.,
                    mu2.Phi() * 10,
                    mu2.Eta() * 10))
                print("dr = {0:.2f}  \t\tinvm = {1:.2f}".format(
                    pair.dr * 10, pair.invm / 1000.))
            print("pairs with a pass in 2INVM9-MU6ab-MU4ab emulation")
            for pair in list_mu6mu4_2m9_pairs:
                mu1 = pair.muon1.p4
                mu2 = pair.muon2.p4
                print("Pt = {0:.0f}\t\tPhi = {1:.0f}  \tEta = {2:.0f}".format(
                    mu1.Pt() / 1000.,
                    mu1.Phi() * 10,
                    mu1.Eta() * 10))
                print("Pt = {0:.0f}\t\tPhi = {1:.0f}  \tEta = {2:.0f}".format(
                    mu2.Pt() / 1000.,
                    mu2.Phi() * 10,
                    mu2.Eta() * 10))
                print("dr = {0:.2f}  \t\tinvm = {1:.2f}".format(
                    pair.dr * 10, pair.invm / 1000.))
            print("-----------")

        valid_counters[outcome] += 1
        fill_histos(histos[outcome], histos2[outcome], muons, list_mu4,
                    list_mu6mu4_2m9_pairs, list_mu6mu4_0dr15_pairs,
                    list_mu6mu4_2m9_0dr15_pairs, list_mu6mu4_pairs,
                    list_pairs)  #fill histograms

        if debug and pass_hw:
            print "passed, %d muons" % len(muons)
        iEntry += 1

    print 'algo_counters:'
    pprint(dict(algo_counters))
    print 'item_counters:'
    pprint(dict(item_counters))
    print 'valid counters:'
    pprint(dict(valid_counters))

    if True:
        #print errors
        p_p = valid_counters['pass_em_pass_hw']
        p_f = valid_counters['pass_em_fail_hw']
        f_p = valid_counters['fail_em_pass_hw']
        f_f = valid_counters['fail_em_fail_hw']

        total_imputs = p_p + p_f + f_p + f_f
        total_pass_em = p_p + p_f
        total_pass_hw = f_p + p_p
        total_fail_em = f_p + f_f
        total_fail_hw = p_f + f_f
        total_discordance = 100. * (f_p + p_f) / total_imputs
        pass_em_discordance = 100. * p_f / total_pass_em
        fail_em_discordance = 100. * f_p / total_fail_em
        pass_hw_discordance = 100. * f_p / total_pass_hw
        fail_hw_discordance = 100. * p_f / total_fail_hw
        print('  total   error {:.2f}%'.format(total_discordance))
        print('  em pass error {:.2f}%'.format(pass_em_discordance))
        print('  em fail error {:.2f}%'.format(fail_em_discordance))
        print('  hw pass error {:.2f}%'.format(pass_hw_discordance))
        print('  hw fail error {:.2f}%'.format(fail_hw_discordance))

    c = R.TCanvas('c')
    order = [2, 4, 3, 1]

    for name in histo_names:
        i = 0
        c.Clear()
        c.Divide(2, 2)
        for outcome, hs in histos.items():
            h = histos[outcome][name]
            c.cd(order[i])
            h.Draw('h text')
            c.Update()
            i += 1
#        h = histos['fail_em_pass_hw'][name]
#        h.Draw('h text')
#        c.Update()

        c.SaveAs(name + '.png')
        c.SaveAs(name + '.root')

    i = 0
    c.Clear()
    c.Divide(2, 2)
    for outcome, h in histos2.items():
        c.cd(order[i])
        h.Draw('Colz')
        c.Update()
        i += 1


#    h = histos2['fail_em_pass_hw']
#    h.Draw('Colz')
#    c.Update()

#        if verbose:
#            h.Print("all")
#    if verbose:
#        print('\n')

    c.SaveAs('PhiEta_mu6mu4.png')
    c.SaveAs('PhiEta_mu6mu4.root')
Example #9
0
def main():
    difference_dphi = 0  #CHANGE
    totat_dphi = 0
    usage = ("Usage : %prog [options] filename"
             "\n Examples :"
             "\n %prog  -v tmptrig.root")

    parser = optparse.OptionParser(usage=usage)
    parser.add_option('-n',
                      '--num-events',
                      default=None,
                      type=int,
                      help='number of events to process (default all)')
    parser.add_option('-s',
                      '--skip-events',
                      default=None,
                      type=int,
                      help='number of events to skip (default none)')
    parser.add_option('-v', '--verbose', default=False, action='store_true')
    parser.add_option('-d', '--debug', default=False, action='store_true')
    parser.add_option('-t', '--treename', default='trig')

    (options, args) = parser.parse_args()
    if len(args) != 1:
        parser.error("incorrect number of arguments")
    verbose = options.verbose
    debug = options.debug
    if verbose:
        utils.print_running_conditions(parser, options)

    input_filenames = utils.read_filename_arguments(args[0], options)
    if verbose:
        print 'Input files:'
        print '\n'.join(input_filenames)
    chain = R.TChain(options.treename)
    for input_filename in input_filenames:
        chain.Add(
            input_filename
        )  #chain beomes an array with the various files .root introduced
    num_available = chain.GetEntries()
    num_skip = options.skip_events
    num_toprocess = number_of_entries_to_process(num_available, options)
    if verbose:
        print "About to process %s (out of %d) entries: " % (num_toprocess,
                                                             num_available)
    # # test: print all branches available in the tree
    # print 'chain ',chain
    # print 'branches:'
    # print 'list of branches ',chain.GetListOfBranches()
    # print '\n'.join([k.GetName() for k in chain.GetListOfBranches()])
    # print
    # return

    iEntry = 0
    possible_outcomes = [
        'pass_em_pass_hw', 'pass_em_fail_hw', 'fail_em_pass_hw',
        'fail_em_fail_hw'
    ]
    algo_counters = defaultdict(int)
    item_counters = defaultdict(int)
    valid_counters = {k: 0 for k in ['overflow'] + possible_outcomes}

    histos = {}
    histos2 = {}
    lvl1item_name = 'L1_LFV-MU6'
    algorithm_name = '0DR15-2MU6ab'

    l = lvl1item_name
    num_binning = (9, -0.5, 8.5)
    dr_binning = (30, 0, 6)
    binning_0dr15 = (15, 0.0, 1.5)
    pt_binning = (8, 3500.0, 11500.0)
    angle_binning = (28, -3.5, 3.5)
    for k in possible_outcomes:  #initialize the histograms, they will still be empty after
        histos[k] = {
            'n_mu':
            R.TH1F('n_mu' + '_' + k, l + '; N input l1mus', *num_binning),
            'n_mu6ab':
            R.TH1F('n_mu6ab' + '_' + k, l + '; N mu6 muons', *num_binning),
            'n_pairs_mu6_0dr15':
            R.TH1F('n_pairs_mu6_0dr15' + '_' + k, l + '; N mu6_0dr15 pairs',
                   *num_binning),
            'n_pairs_0dr15':
            R.TH1F('n_pairs_0dr15' + '_' + k, l + '; N 0dr15 pairs',
                   *num_binning),
            'n_pairs_mu6ab':
            R.TH1F('n_pairs_mu6ab' + '_' + k, l + '; N mu6 pairs',
                   *num_binning),
            'n_cand_pairs':
            R.TH1F('n_cand_pairs' + '_' + k, l + '; N candidate pairs',
                   *num_binning),
            'dr_min':
            R.TH1F('dr_min' + '_' + k, l + '; min #DeltaR best candidate pair',
                   *dr_binning),
            'dr_0dr15':
            R.TH1F('dr_0dr15' + '_' + k, l + '; #DeltaR 0dr15 pairs',
                   *binning_0dr15),
            'dr_mu6':
            R.TH1F('dr_mu6' + '_' + k, l + '; #DeltaR mu6 pairs', *dr_binning),
            'dr_mu6_0dr15':
            R.TH1F('dr_mu6_0dr15' + '_' + k, l + '; #DeltaR mu6_0dr15',
                   *binning_0dr15),
            'dr_any':
            R.TH1F('dr_any' + '_' + k, l + '; #DeltaR any candidate pair',
                   *dr_binning),
            'Phi_mu6':
            R.TH1F('Phi_mu6' + '_' + k, l + '; Phi angle any mu6 muon',
                   *angle_binning),
            'Eta_mu6':
            R.TH1F('Eta_mu6' + '_' + k, l + '; Eta angle any mu6 muon',
                   *angle_binning),
            'pt_0dr15':
            R.TH1F('pt_0dr15' + '_' + k, l + '; #Pt 0dr15 muons', *pt_binning),
            'pt_any':
            R.TH1F('pt_any' + '_' + k, l + '; #Pt any muon', *pt_binning),
        }
        histos2[k] = R.TH2F('PhiEta_mu6' + '_' + k,
                            l + '; Phi angle any mu6; Eta angle any mu6',
                            *2 * angle_binning)

    histo_names = [
        name for name, histo in histos[possible_outcomes[0]].items()
    ]

    for iEvent, event in enumerate(chain):
        if num_skip and iEvent < num_skip: continue
        if iEntry > num_toprocess: break
        # see how branches are created in TopoNtuple.py
        item_bits = item_tbits2bits(
            getattr(event,
                    lvl1item_name.replace('-', '_') + '_iBits'))
        increment_counters(item_counters, item_bits)
        # # These are not filled, so don't bother for now # DG-2016-06-23
        # algo_bits = algo_tbits2bits(getattr(event, algorithm_name+'_0_aBits'))
        # increment_counters(algo_counters, algo_bits)
        pass_hw = item_bits['TDT_TBP']
        pass_sim = item_bits['L1SIMULATED']

        # overflown = algo_bits['OVERFLOWN']
        # if overflown:
        #     valid_counters['overflow'] += 1
        #     continue
        # emTobs = [EmTob(w) for w in event.emTobs]
        # jetTobs = [JetTob(w) for w in event.jetTobs]
        # tauTobs = [TauTob(w) for w in event.tauTobs]
        # if debug:
        #     print 'emTobs[%d]' % len(emTobs)
        #     for i, et in enumerate(emTobs):
        #         print "[%d] (%f, %f)"%(i, et.eta(), et.phi())
        #     print 'jetTobs[%d]' % len(jetTobs)
        #     for i, jt in enumerate(jetTobs):
        #         print "[%d] (%f, %f)"%(i, jt.eta(), jt.phi())
        #     print 'tauTobs[%d]' % len(tauTobs)
        #     for i, tt in enumerate(tauTobs):
        #         print "[%d] (%f, %f)"%(i, tt.eta(), tt.phi())

        # these are EnhancedMuonTOB objects
        #        muons = [Muon(tob.pt, tob.eta, tob.phi) for tob in event.hdwMuonTOB
        #                 if tob.bcn==0] # only pick the ones from bunch crossing number 0
        muons = [
            Muon(tob.pt, tob.eta, tob.phi) for tob in event.hdwMuonTOB
            if tob.bcn == 0
        ]  # only pick the ones from bunch crossing number 0

        list_mu6ab = algo_MU6ab(muons)  #mu6 list
        #        Phi3 = algo_PHI3(muons)
        #        if not Phi3:
        #            continue
        list_0dr15_pairs, list_pairs, list_0dr15 = algo_0DR15(
            muons, muonList=True)  #0dr15 couplelist
        #aux = [muon for muon, Pt in list_mu6ab]
        list_mu6_0dr15_pairs, list_mu6_pairs = algo_0DR15(
            [muon for muon in list_mu6ab],
            printout=pass_hw)  #mu6_0dr15 couplelist
        list_0dr15_mu6 = algo_MU6ab_pairs(list_0dr15_pairs)

        pass_emul = len(
            list_mu6_0dr15_pairs)  #returns true if 2mu6ab and 0dr15
        #pass_emul = len(list_0dr15_mu6) or len(list_mu6_0dr15_pairs)
        #pass_emul = len(list_0dr15_mu6)

        outcome = ('pass_em_pass_hw'
                   if pass_hw and pass_emul else 'pass_em_fail_hw' if pass_emul
                   else 'fail_em_pass_hw' if pass_hw else 'fail_em_fail_hw')
        valid_counters[outcome] += 1
        fill_histos(histos[outcome], histos2[outcome], muons, list_mu6ab,
                    list_0dr15, list_0dr15_pairs, list_pairs,
                    list_mu6_0dr15_pairs, list_mu6_pairs)  #fill histograms
        if outcome == 'fail_em_pass_hw':
            print("runNumber = {0:d}  eventNumber = {1:d}".format(
                event.runNumber, event.eventNumber))
            print("emuMuonTOB")
            for i, mu in enumerate(event.emuMuonTOB):
                print("<{:d}>: Pt = {:.2f} Phi = {:.2f} Eta = {:.2f}".format(
                    i, mu.pt, mu.phi, mu.eta))

            print("hdwMuonTOB")
            for i, mu in enumerate(event.hdwMuonTOB):
                print("<{:d}>: Pt = {:.2f} Phi = {:.2f} Eta = {:.2f}".format(
                    i, mu.pt, mu.phi, mu.eta))

        if debug and pass_hw:
            print "passed, %d muons" % len(muons)
        iEntry += 1

    print 'algo_counters:'
    pprint(dict(algo_counters))
    print 'item_counters:'
    pprint(dict(item_counters))
    print 'valid counters:'
    pprint(dict(valid_counters))

    #print errors
    p_p = valid_counters['pass_em_pass_hw']
    p_f = valid_counters['pass_em_fail_hw']
    f_p = valid_counters['fail_em_pass_hw']
    f_f = valid_counters['fail_em_fail_hw']

    total_imputs = p_p + p_f + f_p + f_f
    total_pass_em = p_p + p_f
    total_pass_hw = f_p + p_p
    total_discordance = 100. * (f_p + p_f) / total_imputs
    pass_em_discordance = 100. * p_f / total_pass_em
    pass_hw_discordance = 100. * f_p / total_pass_hw
    print('  total   error {:.2f}%'.format(total_discordance))
    print('  em pass error {:.2f}%'.format(pass_em_discordance))
    print('  hw pass error {:.2f}%'.format(pass_hw_discordance))
    #    print('Difference Dphi = {:.2f}%'.format(100.*difference_dphi/totat_dphi))

    c = R.TCanvas('c')
    order = [
        2, 4, 3, 1
    ]  #is used to reorganize the histograms so that the firs line corresponds to pass emu and the first column to pass hdw

    for name in histo_names:
        i = 0
        c.Clear()
        c.Divide(2, 2)
        for outcome, hs in histos.items():
            h = histos[outcome][name]
            c.cd(order[i])
            h.Draw('h text')
            c.Update()
            i += 1
            if verbose:
                print('\n')
                h.Print("all")
                print("- Mean     = %.3f +- %.3f" %
                      (h.GetMean(), h.GetMeanError()))
                print("- Std Dev  = %.3f +- %.3f" %
                      (h.GetStdDev(), h.GetStdDevError()))
                print("- Skewness = %.3f" % (h.GetSkewness()))
                print("- Kurtosis = %.3f" % (h.GetKurtosis()))
        if verbose:
            print('\n\n')

        c.SaveAs(name + '.png')
        c.SaveAs(name + '.root')

    i = 0
    c.Clear()
    c.Divide(2, 2)
    for outcome, h in histos2.items():
        c.cd(order[i])
        h.Draw('Colz')
        c.Update()
        i += 1
        if verbose:
            h.Print("all")
    if verbose:
        print('\n')

    c.SaveAs('PhiEta_mu6.png')
    c.SaveAs('PhiEta_mu6.root')
def main():
    parser = optparse.OptionParser(usage=usage)
    parser.add_option('-g',
                      '--group',
                      help='group to be processed (used only in fill mode)')
    parser.add_option('-i', '--input-dir', default='./out/fakerate')
    parser.add_option('-o', '--output-dir', default='./out/fake_scale_factor')
    parser.add_option('-l', '--lepton', default='el', help='either el or mu')
    parser.add_option(
        '-r',
        '--region',
        help='one of the regions for which we saved the fake ntuples')
    parser.add_option(
        '--samples-dir',
        default='samples/',
        help='directory with the list of samples; default ./samples/')
    parser.add_option(
        '-T',
        '--tight-def',
        help=
        'on-the-fly tight def, one of defs in fakeUtils.py: fakeu.lepIsTight_std, etc.'
    )
    parser.add_option('-f',
                      '--fill-histos',
                      action='store_true',
                      default=False,
                      help='force fill (default only if needed)')
    parser.add_option('--keep-real',
                      action='store_true',
                      default=False,
                      help='do not subtract real (to get real lep efficiency)')
    parser.add_option('--debug', action='store_true')
    parser.add_option('--verbose', action='store_true')
    parser.add_option('--disable-cache',
                      action='store_true',
                      help='disable the entry cache')
    (options, args) = parser.parse_args()
    inputDir = options.input_dir
    outputDir = options.output_dir
    lepton = options.lepton
    region = options.region
    keepreal = options.keep_real
    debug = options.debug
    verbose = options.verbose
    if lepton not in ['el', 'mu']: parser.error("invalid lepton '%s'" % lepton)
    regions = kin.selection_formulas().keys()
    assert region in regions, "invalid region '%s', must be one of %s" % (
        region, str(sorted(regions)))
    regions = [region]

    dataset.Dataset.verbose_parsing = True if debug else False
    groups = dataset.DatasetGroup.build_groups_from_files_in_dir(
        options.samples_dir)
    if options.group: groups = [g for g in groups if g.name == options.group]
    group_names = [g.name for g in groups]

    outputDir = outputDir + '/' + region + '/' + lepton  # split the output in subdirectories, so we don't overwrite things
    mkdirIfNeeded(outputDir)
    templateOutputFilename = "scale_factor_{0}.root".format(lepton)
    outputFileName = os.path.join(outputDir, templateOutputFilename)
    cacheFileName = outputFileName.replace('.root', '_cache.root')
    doFillHistograms = options.fill_histos or not os.path.exists(cacheFileName)
    onthefly_tight_def = eval(
        options.tight_def
    ) if options.tight_def else None  # eval will take care of aborting on typos
    if verbose: utils.print_running_conditions(parser, options)
    vars = ['mt0', 'mt1', 'pt0', 'pt1', 'eta1', 'pt1_eta1']
    #fill histos
    if doFillHistograms:
        start_time = time.clock()
        num_processed_entries = 0
        histosPerGroup = bookHistos(vars, group_names, region=region)
        histosPerSource = bookHistosPerSource(vars,
                                              leptonSources,
                                              region=region)
        histosPerGroupPerSource = bookHistosPerSamplePerSource(vars,
                                                               group_names,
                                                               leptonSources,
                                                               region=region)
        for group in groups:
            tree_name = 'hlfv_tuple'
            chain = IndexedChain(tree_name)
            for ds in group.datasets:
                fname = os.path.join(inputDir, ds.name + '.root')
                if os.path.exists(fname):
                    chain.Add(fname)
            if verbose:
                print "{0} : {1} entries from {2} samples".format(
                    group.name, chain.GetEntries(), len(group.datasets))
            chain.cache_directory = os.path.abspath('./selection_cache/' +
                                                    group.name + '/')
            tcuts = [r.TCut(reg, selection_formulas()[reg]) for reg in regions]
            print 'tcuts ', [c.GetName() for c in tcuts]
            chain.retrieve_entrylists(tcuts)
            counters_pre, histos_pre = dict(), dict()
            counters_npre, histos_npre = dict(), dict()
            print 'tcuts_with_existing_list ', str(
                [c.GetName() for c in chain.tcuts_with_existing_list()])
            print 'tcuts_without_existing_list ', str(
                [c.GetName() for c in chain.tcuts_without_existing_list()])
            cached_tcuts = [] if options.disable_cache else chain.tcuts_with_existing_list(
            )
            print 'cached_tcuts ', [c.GetName() for c in cached_tcuts]
            uncached_tcuts = tcuts if options.disable_cache else chain.tcuts_without_existing_list(
            )
            print 'todo: skip cuts for which the histo files are there'
            if verbose:
                print " --- group : {0} ---".format(group.name)
                print '\n\t'.join(chain.filenames)
            if verbose:
                print 'filling cached cuts: ', ' '.join(
                    [c.GetName() for c in cached_tcuts])
            if verbose:
                print "%s : %d entries" % (group.name, chain.GetEntries())
            histosThisGroup = histosPerGroup[group.name]
            histosThisGroupPerSource = dict(
                (v, histosPerGroupPerSource[v][group.name])
                for v in histosPerGroupPerSource.keys())
            for cut in cached_tcuts:
                print 'cached_tcut ', cut
                chain.preselect(cut)
                num_processed_entries += fillHistos(
                    chain,
                    histosThisGroup,
                    histosPerSource,
                    histosThisGroupPerSource,
                    lepton,
                    group,
                    cut,
                    cut_is_cached=True,
                    onthefly_tight_def=onthefly_tight_def,
                    verbose=verbose)
            if verbose:
                print 'filling uncached cuts: ', ' '.join(
                    [c.GetName() for c in uncached_tcuts])
            if uncached_tcuts:
                assert len(uncached_tcuts
                           ) == 1, "expecting only one cut, got {}".format(
                               len(uncached_tcuts))
                cut = uncached_tcuts[0]
                chain.preselect(None)
                num_processed_entries += fillHistos(
                    chain,
                    histosThisGroup,
                    histosPerSource,
                    histosThisGroupPerSource,
                    lepton,
                    group,
                    cut,
                    cut_is_cached=False,
                    onthefly_tight_def=onthefly_tight_def,
                    verbose=verbose)
                chain.save_lists()

        writeHistos(cacheFileName, histosPerGroup, histosPerSource,
                    histosPerGroupPerSource, verbose)
        end_time = time.clock()
        delta_time = end_time - start_time
        if verbose:
            print("processed {0:d} entries ".format(num_processed_entries) +
                  "in " +
                  ("{0:d} min ".format(int(delta_time / 60))
                   if delta_time > 60 else "{0:.1f} s ".format(delta_time)) +
                  "({0:.1f} kHz)".format(num_processed_entries / delta_time))
    # return
    # compute scale factors
    histosPerGroup = fetchHistos(cacheFileName,
                                 histoNames(vars, group_names, region),
                                 verbose)
    histosPerSource = fetchHistos(
        cacheFileName, histoNamesPerSource(vars, leptonSources, region),
        verbose)
    histosPerSamplePerSource = fetchHistos(
        cacheFileName,
        histoNamesPerSamplePerSource(vars, group_names, leptonSources, region),
        verbose)
    plotStackedHistos(histosPerGroup, outputDir + '/by_group', region, verbose)
    plotStackedHistosSources(histosPerSource, outputDir + '/by_source', region,
                             verbose)
    plotPerSourceEff(histosPerVar=histosPerSource,
                     outputDir=outputDir + '/by_source',
                     lepton=lepton,
                     region=region,
                     verbose=verbose)
    for g in group_names:
        hps = dict((v, histosPerSamplePerSource[v][g]) for v in vars)
        plotPerSourceEff(histosPerVar=hps,
                         outputDir=outputDir,
                         lepton=lepton,
                         region=region,
                         sample=g,
                         verbose=verbose)

    hn_sf_eta = histoname_sf_vs_eta(lepton)
    hn_sf_pt = histoname_sf_vs_pt(lepton)
    hn_da_eta = histoname_data_fake_eff_vs_eta(lepton)
    hn_da_pt = histoname_data_fake_eff_vs_pt(lepton)
    subtractReal = not keepreal
    objs_eta = subtractRealAndComputeScaleFactor(histosPerGroup, 'eta1',
                                                 hn_sf_eta, hn_da_eta,
                                                 outputDir, region,
                                                 subtractReal, verbose)
    objs_pt = subtractRealAndComputeScaleFactor(histosPerGroup, 'pt1',
                                                hn_sf_pt, hn_da_pt, outputDir,
                                                region, subtractReal, verbose)
    objs_pt_eta = subtractRealAndComputeScaleFactor(
        histosPerGroup, 'pt1_eta1', histoname_sf_vs_pt_eta(lepton),
        histoname_data_fake_eff_vs_pt_eta(lepton), outputDir, region,
        subtractReal, verbose)
    rootUtils.writeObjectsToFile(
        outputFileName, dictSum(dictSum(objs_eta, objs_pt), objs_pt_eta),
        verbose)
    if verbose: print "saved scale factors to %s" % outputFileName
Example #11
0
def main():
    usage = ("Usage : %prog [options] filename"
             "\n Examples :"
             "\n %prog  -v tmptrig.root"
             )

    parser = optparse.OptionParser(usage = usage)
    parser.add_option('-n', '--num-events', default=None, type=int, help='number of events to process (default all)')
    parser.add_option('-s', '--skip-events', default=None, type=int, help='number of events to skip (default none)')
    parser.add_option('-v', '--verbose', default=False, action='store_true')
    parser.add_option('-d', '--debug', default=False, action='store_true')
    parser.add_option('-t', '--treename', default='trig')

    (options, args) = parser.parse_args()
    if len(args) != 1:
        parser.error("incorrect number of arguments")
    verbose = options.verbose
    debug = options.debug
    if verbose:
        utils.print_running_conditions(parser, options)

    input_filenames = utils.read_filename_arguments(args[0], options)
    if verbose:
        print 'Input files:'
        print '\n'.join(input_filenames)
    chain = R.TChain(options.treename)
    for input_filename in input_filenames:
        chain.Add(input_filename)  #chain beomes an array with the various files .root introduced
    num_available = chain.GetEntries()
    num_skip = options.skip_events
    num_toprocess = number_of_entries_to_process(num_available, options)
    if verbose:
        print "About to process %s (out of %d) entries: " % (num_toprocess, num_available)
    # # test: print all branches available in the tree
    # print 'chain ',chain
    # print 'branches:'
    # print 'list of branches ',chain.GetListOfBranches()
    # print '\n'.join([k.GetName() for k in chain.GetListOfBranches()])
    # print
    # return

    iEntry = 0
    possible_outcomes = ['pass_em_pass_hw', 'pass_em_fail_hw', 'fail_em_pass_hw', 'fail_em_fail_hw']
    algo_counters = defaultdict(int)
    item_counters = defaultdict(int)
    valid_counters = {k:0 for k in ['overflow'] + possible_outcomes}

    histos = {}
    histos2= {}
    lvl1item_name  = 'L1_BPH-2M9-MU6MU4_BPH-0DR15-MU6MU4'           #ASK DAVIDE FOR NAME
    algorithm_name = '0DR15-MU4MU6'

    l = lvl1item_name
    num_binning   = (9 , -0.5, 8.5)
    dr_binning    = (30 , -0.1 , 5.9)
    binning_2im9 = (15 , 0.0 , 1.5)
    pt_binning    = (8, 3500.0 , 11500.0) 
    angle_binning = (28, -3.5, 3.5)
    for k in possible_outcomes: #initialize the histograms, they will still be empty after 
        histos[k] = {
            'n_mu'                     : R.TH1F('n_mu'+'_'+k                     , l+'; N input l1mus'                   , *num_binning),
            'n_mu6mu4'                 : R.TH1F('n_mu6mu4'+'_'+k                 , l+'; N mu6mu4 muons'                  , *num_binning),
            'n_pairs_mu6mu4_2m9_0dr15' : R.TH1F('n_pairs_mu6mu4_2m9_0dr15'+'_'+k , l+'; N mu6mu4_2m9_0dr15 pairs'        , *num_binning),
            'n_pairs_mu6mu4'           : R.TH1F('n_pairs_mu6mu4'+'_'+k           , l+'; N mu6mu4 pairs'                  , *num_binning),
            'n_cand_pairs'             : R.TH1F('n_cand_pairs'+'_'+k             , l+'; N candidate pairs'               , *num_binning),
            'dr_min_mu6mu4'            : R.TH1F('dr_min_mu6mu4'+'_'+k            , l+'; min #DeltaR best candidate pair' , *dr_binning),
            'dr_mu6mu4'                : R.TH1F('dr_mu6mu4'+'_'+k                , l+'; #DeltaR mu6mu4 pairs'            , *dr_binning),
            'dr_mu6mu4_2m9_0dr15'      : R.TH1F('dr_mu6mu4_2m9_0dr15'+'_'+k      , l+'; #DeltaR mu6mu4_2m9_0dr15'        , *binning_2im9),
            'Phi_mu6mu4'               : R.TH1F('Phi_mu6mu4'+'_'+k               , l+'; Phi angle any mu6mu4 muon'       , *angle_binning),
            'Eta_mu6mu4'               : R.TH1F('Eta_mu6mu4'+'_'+k               , l+'; Eta angle any mu6mu4 muon'       , *angle_binning),
            'pt_any'                   : R.TH1F('pt_any'+'_'+k                   , l+'; #Pt any muon'                    , *pt_binning),
            }
        histos2[k] = R.TH2F('PhiEta_mu6mu4'+'_'+k   , l+'; Phi angle any mu6mu4; Eta angle any mu6mu4' , *2*angle_binning)

    histo_names = [name for name, histo in histos[possible_outcomes[0]].items()]

    for iEvent, event in enumerate(chain):
        if num_skip and iEvent<num_skip: continue
        if iEntry > num_toprocess: break
        # see how branches are created in TopoNtuple.py
        item_bits = item_tbits2bits(getattr(event,
                                            lvl1item_name.replace('-','_')+'_iBits'))
        increment_counters(item_counters, item_bits)
        # # These are not filled, so don't bother for now # DG-2016-06-23
        # algo_bits = algo_tbits2bits(getattr(event, algorithm_name+'_0_aBits'))
        # increment_counters(algo_counters, algo_bits)
        pass_hw = item_bits['TDT_TBP']
        pass_sim = item_bits['L1SIMULATED']
        
        # overflown = algo_bits['OVERFLOWN']
        # if overflown:
        #     valid_counters['overflow'] += 1
        #     continue
        # emTobs = [EmTob(w) for w in event.emTobs]
        # jetTobs = [JetTob(w) for w in event.jetTobs]
        # tauTobs = [TauTob(w) for w in event.tauTobs]
        # if debug:
        #     print 'emTobs[%d]' % len(emTobs)
        #     for i, et in enumerate(emTobs):
        #         print "[%d] (%f, %f)"%(i, et.eta(), et.phi())
        #     print 'jetTobs[%d]' % len(jetTobs)
        #     for i, jt in enumerate(jetTobs):
        #         print "[%d] (%f, %f)"%(i, jt.eta(), jt.phi())
        #     print 'tauTobs[%d]' % len(tauTobs)
        #     for i, tt in enumerate(tauTobs):
        #         print "[%d] (%f, %f)"%(i, tt.eta(), tt.phi())

        # these are EnhancedMuonTOB objects
        muons = [Muon(tob.pt, tob.eta, tob.phi) for tob in event.hdwMuonTOB
                 if tob.bcn==0] # only pick the ones from bunch crossing number 0

        list_mu4 = algo_MU4(muons) #mu4 list
        list_mu6mu4_2m9_0dr15_pairs, list_mu6mu4_pairs = algo_2M9_0DR15(list_mu4) #2im9_0dr15 couplelist
        list_2m9_0dr15_mu6mu4 = algo_MU6MU4_pairs(list_mu6mu4_2m9_0dr15_pairs)

        pass_emul = len(list_mu6mu4_2m9_0dr15_pairs)   #returns true if mu6mu4, 2m9 and 0dr15

        outcome = ('pass_em_pass_hw' if pass_hw and pass_emul else
                   'pass_em_fail_hw' if pass_emul else
                   'fail_em_pass_hw' if pass_hw else
                   'fail_em_fail_hw')
        valid_counters[outcome] += 1
        fill_histos(histos[outcome], histos2[outcome], muons, list_mu4,
                    list_mu6mu4_2m9_0dr15_pairs, list_mu6mu4_pairs) #fill histograms
        
        if debug and pass_hw:
            print "passed, %d muons" % len(muons)
        iEntry += 1


    print 'algo_counters:'
    pprint(dict(algo_counters))
    print 'item_counters:'
    pprint(dict(item_counters))
    print 'valid counters:'
    pprint(dict(valid_counters))

    #print errors
    p_p=valid_counters['pass_em_pass_hw']
    p_f=valid_counters['pass_em_fail_hw']
    f_p=valid_counters['fail_em_pass_hw']
    f_f=valid_counters['fail_em_fail_hw']

    total_imputs = p_p+p_f+f_p+f_f
    total_pass_em = p_p+p_f
    total_pass_hw = f_p+p_p
    total_discordance = 100.*(f_p+p_f)/total_imputs
    pass_em_discordance = 100.*p_f/total_pass_em
    pass_hw_discordance = 100.*f_p/total_pass_hw
    print('  total   error {:.2f}%'.format(total_discordance))
    print('  em pass error {:.2f}%'.format(pass_em_discordance))
    print('  hw pass error {:.2f}%'.format(pass_hw_discordance))

    c = R.TCanvas('c')
    order = [2,4,3,1]
    
    for name in histo_names:
        i = 0
        c.Clear()
        c.Divide(2,2)
        for outcome, hs in histos.items():
            h = histos[outcome][name]
            c.cd(order[i])
            h.Draw('h text')
            c.Update()
            i+=1
        
        c.SaveAs(name+'.png')
        c.SaveAs(name+'.root')
    
    i=0
    c.Clear()
    c.Divide(2,2)
    for outcome, h in histos2.items(): 
        c.cd(order[i])
        h.Draw('Colz')
        c.Update()
        i+=1
        if verbose:
            h.Print("all")
    if verbose:
        print('\n')

    c.SaveAs('PhiEta_mu6mu4.png')
    c.SaveAs('PhiEta_mu6mu4.root')