コード例 #1
0
    def process_efficiency(self):
        print("Doing efficiencies", self.mcordata, self.period)
        print("Using run selection for eff histo", \
               self.runlistrigger[self.triggerbit], "for period", self.period)
        if self.doml is True:
            print("Doing ml analysis")
        else:
            print("No extra selection needed since we are doing std analysis")
        for ibin2 in range(len(self.lvar2_binmin)):
            if self.corr_eff_mult[ibin2] is True:
                print("Reweighting efficiencies for bin", ibin2)
            else:
                print("Not reweighting efficiencies for bin", ibin2)

        create_folder_struc(self.d_results, self.l_path)
        arguments = [(i,) for i in range(len(self.l_root))]
        self.parallelizer(self.process_efficiency_single, arguments, self.p_chunksizeunp)
        tmp_merged = f"/data/tmp/hadd/{self.case}_{self.typean}/histoeff_{self.period}/{get_timestamp_string()}/"
        mergerootfiles(self.l_histoeff, self.n_fileeff, tmp_merged)
コード例 #2
0
    def process_histomass(self):
        print("Doing masshisto", self.mcordata, self.period)
        print("Using run selection for mass histo", \
               self.runlistrigger, "for period", self.period)
        if self.doml is True:
            print("Doing ml analysis")
        else:
            print("No extra selection needed since we are doing std analysis")

        # Load potential custom cuts
        self.load_cuts()

        create_folder_struc(self.d_results, self.l_path)
        arguments = [(i, ) for i in range(len(self.l_root))]
        self.parallelizer(self.process_histomass_single, arguments,
                          self.p_chunksizeunp)  # pylint: disable=no-member
        tmp_merged = \
            f"/data/tmp/hadd/{self.case}_{self.typean}/mass_{self.period}/{get_timestamp_string()}/"
        mergerootfiles(self.l_histomass, self.n_filemass, tmp_merged)
コード例 #3
0
 def multi_valevents(self):
     for indexp in range(self.prodnumber):
         self.process_listsample[indexp].process_valevents_par()
     tmp_merged = f"/data/tmp/hadd/{self.case}_{self.typean}/val_all/{get_timestamp_string()}/"
     mergerootfiles(self.lper_evtvalroot, self.f_evtvalroot_mergedallp,
                    tmp_merged)
コード例 #4
0
 def multi_preparenorm(self):
     tmp_merged = \
             f"/data/tmp/hadd/{self.case}_{self.typean}/norm_processer/{get_timestamp_string()}/"
     mergerootfiles(self.lper_normfiles, self.f_evtvalroot_mergedallp,
                    tmp_merged)
コード例 #5
0
 def multi_histomass(self):
     for indexp in range(self.prodnumber):
         if self.p_useperiod[indexp] == 1:
             self.process_listsample[indexp].process_histomass()
     tmp_merged = f"/data/tmp/hadd/{self.case}_{self.typean}/mass/{get_timestamp_string()}/"
     mergerootfiles(self.lper_filemass, self.filemass_mergedall, tmp_merged)
コード例 #6
0
    def multi_cutvariation(self, domass, doeff, dofit, docross):

        if domass is True or doeff is True or dofit is True:
            for indexp in range(self.prodnumber):
                if self.p_useperiodforlimits[indexp] == 1:
                    print("Processing systematics period: ", indexp)
                    min_cv_cut, max_cv_cut = \
                          self.process_listsample[indexp].define_cutvariation_limits()

        if domass is True:
            for indexp in range(self.prodnumber):
                if self.p_useperiod[indexp] == 1:
                    self.process_listsample[indexp].cutvariation_masshistos(
                        min_cv_cut, max_cv_cut)
            tmp_merged = f"/data/tmp/hadd/{self.case}_{self.typean}/cutvar_mass/" \
                         f"{get_timestamp_string()}/"
            mergerootfiles(self.lper_filemass_cutvar,
                           self.filemass_cutvar_mergedall, tmp_merged)

        if doeff is True:
            for indexp in range(self.prodnumber):
                if self.p_useperiod[indexp] == 1:
                    self.process_listsample[indexp].cutvariation_efficiencies(min_cv_cut, \
                                                                              max_cv_cut)
            tmp_merged = f"/data/tmp/hadd/{self.case}_{self.typean}/cutvar_eff/" \
                         f"{get_timestamp_string()}/"
            mergerootfiles(self.lper_fileeff_cutvar,
                           self.fileeff_cutvar_mergedall, tmp_merged)

        if dofit is True:
            for indexp in range(self.prodnumber):
                if self.p_useperiod[indexp] == 1:
                    self.process_listsample[indexp].cutvariation_fitter(
                        min_cv_cut, max_cv_cut)
                    self.process_listsample[indexp].cutvariation_efficiency()
            self.myprocesstot.cutvariation_fitter(min_cv_cut, max_cv_cut)
            self.myprocesstot.cutvariation_efficiency()

        if docross is True:
            for indexp in range(self.prodnumber):
                if self.p_useperiod[indexp] == 1:
                    self.process_listsample[
                        indexp].cutvariation_makenormyields()
            self.myprocesstot.cutvariation_makenormyields()

            histname = [
                "histoSigmaCorr", "hDirectEffpt", "hFeedDownEffpt", "hRECpt"
            ]
            for name in histname:
                for indexp in range(self.prodnumber):
                    if self.p_useperiod[indexp] == 1:
                        if domass is True or doeff is True or dofit is True:
                            self.process_listsample[indexp].cutvariation_makeplots(name, \
                                                                                   min_cv_cut, \
                                                                                   max_cv_cut)
                        else:
                            self.process_listsample[
                                indexp].cutvariation_makeplots(
                                    name, None, None)
                if domass is True or doeff is True or dofit is True:
                    self.myprocesstot.cutvariation_makeplots(
                        name, min_cv_cut, max_cv_cut)
                else:
                    self.myprocesstot.cutvariation_makeplots(name, None, None)
コード例 #7
0
 def multi_efficiency(self):
     for indexp in range(self.prodnumber):
         self.process_listsample[indexp].process_efficiency()
     mergerootfiles(self.lper_fileeff, self.fileeff_mergedall)
コード例 #8
0
 def multi_histomass(self):
     for indexp in range(self.prodnumber):
         self.process_listsample[indexp].process_histomass()
     mergerootfiles(self.lper_filemass, self.filemass_mergedall)
コード例 #9
0
 def multi_valevents(self):
     for indexp in range(self.prodnumber):
         self.process_listsample[indexp].process_valevents_par()
     mergerootfiles(self.lper_evtvalroot, self.f_evtvalroot_mergedallp)
コード例 #10
0
 def process_valevents_par(self):
     print("doing event validation", self.mcordata, self.period)
     create_folder_struc(self.d_val, self.l_path)
     arguments = [(i,) for i in range(len(self.l_evtorig))]
     self.parallelizer(self.process_valevents, arguments, self.p_chunksizeskim)
     mergerootfiles(self.l_evtvalroot, self.f_totevtvalroot)
コード例 #11
0
def multi_preparenorm(database, case, typean, doperiodbyperiod):

    logger = get_logger()

    lper_normfilesorig = []
    lper_normfiles = []
    dlper_valevtroot = database["validation"]["data"]["dir"]
    resultsdata = database["analysis"][typean]["data"]["results"]

    for res_path, lper_val in zip(resultsdata, dlper_valevtroot):
        lper_normfilesorig.append(join(lper_val, "correctionsweights.root"))
        lper_normfiles.append(join(res_path, "correctionsweights.root"))

    f_normmerged = join(database["analysis"][typean]["data"]["resultsallp"],
                        "correctionsweights.root")

    listempty = []
    tmp_merged = f"/data/tmp/hadd/{case}_{typean}/norm_analyzer/{get_timestamp_string()}/"
    useperiod = database["analysis"][typean]["useperiod"]

    for indexp in range(len(resultsdata)):
        logger.info("Origin path: %s, target path: %s",
                    lper_normfilesorig[indexp], lper_normfiles[indexp])
        mergerootfiles([lper_normfilesorig[indexp]], lper_normfiles[indexp],
                       tmp_merged)
        if doperiodbyperiod and useperiod[indexp]:
            listempty.append(lper_normfiles[indexp])

    mergerootfiles(listempty, f_normmerged, tmp_merged)