def _print_likelihood(evaluators): tot_likeli = 0. dict_likeli = {} for analysis, v in evaluators.iteritems(): ana_likeli =0. AdvPrint.set_cout_file(Info.files['output_evaluation_likelihood'][analysis], True) AdvPrint.mute() AdvPrint.cout("SR o b db s ds likeli") for sr, ev in v.iteritems(): AdvPrint.cout(sr+" " +str(float(ev.obs))+" " +str(float(ev.bkg))+" " +str(float(ev.bkg_err))+" " +str(ev.resultCollector.signal_normevents)+" " +str(ev.resultCollector.signal_err_tot)+" " +str(ev.likelihood)) ana_likeli += ev.likelihood AdvPrint.format_columnated_file(Info.files['output_evaluation_likelihood'][analysis]) AdvPrint.set_cout_file("#None") AdvPrint.unmute() dict_likeli[analysis] = ana_likeli tot_likeli += ana_likeli AdvPrint.set_cout_file(Info.files['likelihood'], True) AdvPrint.mute() AdvPrint.cout("Analysis -2lnL") for a in dict_likeli: AdvPrint.cout(a+" "+str(dict_likeli[a])) AdvPrint.cout("\nTotal: "+str(tot_likeli)) AdvPrint.format_columnated_file(Info.files['likelihood'])
def printInfo(self): AdvPrint.cout("\t\t ROOT events") AdvPrint.cout("\t\t\t - internal identifier: '"+self.identifier+"'") AdvPrint.cout("\t\t\t - path to eventfile: "+self.full_filename) if self.maxEvents > 0: AdvPrint.cout("\t\t\t - at most "+str(self.maxEvents)+" events are analysed") if self.processed: AdvPrint.cout("\t\t\t [Events already processed, results from earlier run are used!]")
def printLogo(self): """ Obvious""" AdvPrint.cout( """ ____ _ _ __ __ _ _____ _____ ____ / ___| |__ ___ ___| | _| \/ | / \|_ _| ____|___ \ | | | '_ \ / _ \/ __| |/ / |\/| | / _ \ | | | _| __) | | |___| | | | __/ (__| <| | | |/ ___ \| | | |___ / __/ \____|_| |_|\___|\___|_|\_\_| |_/_/ \_\_| |_____|_____| """ ) # TODO: add cite info?
def printInfo(self): AdvPrint.cout("\t\t ROOT events") AdvPrint.cout("\t\t\t - internal identifier: '" + self.identifier + "'") AdvPrint.cout("\t\t\t - path to eventfile: " + self.full_filename) if self.maxEvents > 0: AdvPrint.cout("\t\t\t - at most " + str(self.maxEvents) + " events are analysed") if self.processed: AdvPrint.cout( "\t\t\t [Events already processed, results from earlier run are used!]" )
def prepare(self): Pythia8Events.prepare(self) if self.commandstring != "": self.mg5_cards["proc"] = os.path.join( Info.paths["output_mg5"], self.identifier + "_proc_card.dat") with open(self.mg5_cards["proc"], "w") as f: f.write(self.commandstring) if self.mg5_cards["param"] == "" and Info.files["slha"] != "": self.mg5_cards["param"] = Info.files["slha"] if self.py8_infile == "": self.py8_infile = os.path.join(Info.paths["output_pythia"], self.identifier + "_showercard.in") with open(self.py8_infile, "w") as f: with open(Info.files['pythia_mg5minimal_template'], "r") as g: for line in g: f.write(line) f.write("SLHA:file = " + self.mg5_cards["param"] + "\n") if self.mg5_cards["run"] == "": # copy template and fill rlevant information self.mg5_cards["run"] = os.path.join( Info.paths["output_mg5"], self.identifier + "_run_card.dat") with open(self.mg5_cards["run"], "w") as f_out: with open(Info.files["mg5_run_template"], "r") as f_in: for line in f_in: ecmhalf = str( float(Info.parameters["ecm"]) * 1000. / 2.) if self.maxEvents == -1: AdvPrint.cout( "\t " + self.name + ":prepare(): Setting number of to-be-generated events to 5000. Use --maxevents parameter to change this behaviour." ) self.maxEvents = 5000 nevents = str(self.maxEvents) seed = str(Info.parameters["randomseed"]) line = line.replace("@ecmhalf@", ecmhalf).replace( "@nevents@", nevents).replace("@seed@", seed) f_out.write(line) if self.mg5_cards["config"] == "": # copy template and fill rlevant information self.mg5_cards["config"] = os.path.join( Info.paths["output_mg5"], self.identifier + "_me5_configuration.txt") with open(self.mg5_cards["config"], "w") as f: with open(Info.files["me5_configuration_template"], "r") as g: for line in g: f.write(line)
def get_resultCollectors(self): """ Gathers results from all events""" # setup resultCollector object resultCollectors_pr = dict() for analysis in Info.analyses: resultCollectors_pr[analysis] = dict() signal_regions = Info.get_analysis_parameters(analysis)["signal_regions"] for sr in signal_regions: resultCollectors_pr[analysis][sr] = ResultCollector(self.name, analysis, sr) # loop over all associated events and average results in all resultCollectors for event in self.eventsList: resultCollectors_ev = event.get_resultCollectors() for analysis in resultCollectors_pr: for sr in resultCollectors_pr[analysis]: resultCollectors_pr[analysis][sr].add_and_average(resultCollectors_ev[analysis][sr]) # Write process file, if wanted if Info.parameters["ProcessResultFileColumns"] != []: AdvPrint.mute() AdvPrint.set_cout_file(self.result_output_file, True) for col in Info.parameters["ProcessResultFileColumns"]: AdvPrint.cout(col+" ", "nlb") AdvPrint.cout("") for a in sorted(resultCollectors_pr.keys()): for sr in sorted(resultCollectors_pr[a].keys()): AdvPrint.cout(resultCollectors_pr[a][sr].line_from_data(Info.parameters["ProcessResultFileColumns"])) AdvPrint.format_columnated_file(self.result_output_file) AdvPrint.set_cout_file("#None") AdvPrint.unmute() return resultCollectors_pr
def __init__(self): #global Info, AdvPrint """ Initialisation of a CheckMATE object leads to an entire run of the CheckMATE procedure""" # Initialisation steps #Info.init() Info.fill_standard_paths_and_files() if len(sys.argv) == 1: self.printUsage() self.printLogo() if len(sys.argv) == 2 and sys.argv[-1] != "-h": Info.fill_info_from_file(sys.argv[1]) self.procList = Info.fill_processes_from_file(sys.argv[1]) else: Info.fill_info_from_parameters() self.procList = Info.fill_processes_from_parameters() if Info.parameters["outputexists"] == "add": self.load(Info.files['internal_processes']) for p in self.procList: p.checkInputConsistency() self.user_param_check() self.prepare_run() # Running the event-based part if self.procList == []: AdvPrint.cerr_exit("No processes are loaded!") for p in self.procList: p.prepare() p.run() AdvPrint.cout("\n") # Evaluate if not Info.flags['skipevaluation']: self.evaluate() # Store internal status Info.save(Info.files['internal_info']) self.save(Info.files['internal_processes'])
def prepare(self): Pythia8Events.prepare(self) if self.commandstring != "": self.mg5_cards["proc"] = os.path.join(Info.paths["output_mg5"], self.identifier+"_proc_card.dat") with open(self.mg5_cards["proc"], "w") as f: f.write(self.commandstring) if self.mg5_cards["param"] == "" and Info.files["slha"] != "": self.mg5_cards["param"] = Info.files["slha"] if self.py8_infile == "": self.py8_infile = os.path.join(Info.paths["output_pythia"], self.identifier+"_showercard.in") with open(self.py8_infile, "w") as f: with open(Info.files['pythia_mg5minimal_template'], "r") as g: for line in g: f.write(line) f.write("SLHA:file = "+self.mg5_cards["param"]+"\n") if self.mg5_cards["run"] == "": # copy template and fill rlevant information self.mg5_cards["run"] = os.path.join(Info.paths["output_mg5"], self.identifier+"_run_card.dat") with open(self.mg5_cards["run"], "w") as f_out: with open(Info.files["mg5_run_template"], "r") as f_in: for line in f_in: ecmhalf = str(float(Info.parameters["ecm"])*1000./2.) if self.maxEvents == -1: AdvPrint.cout("\t "+self.name+":prepare(): Setting number of to-be-generated events to 5000. Use --maxevents parameter to change this behaviour.") self.maxEvents = 5000 nevents = str(self.maxEvents) seed = str(Info.parameters["randomseed"]) line = line.replace("@ecmhalf@", ecmhalf).replace("@nevents@", nevents).replace("@seed@", seed) f_out.write(line) if self.mg5_cards["config"] == "": # copy template and fill rlevant information self.mg5_cards["config"] = os.path.join(Info.paths["output_mg5"], self.identifier+"_me5_configuration.txt") with open(self.mg5_cards["config"], "w") as f: with open(Info.files["me5_configuration_template"], "r") as g: for line in g: f.write(line)
def get_resultCollectors(self): """ Gathers results from all events""" # setup resultCollector object resultCollectors_pr = dict() for analysis in Info.analyses: resultCollectors_pr[analysis] = dict() signal_regions = Info.get_analysis_parameters( analysis)["signal_regions"] for sr in signal_regions: resultCollectors_pr[analysis][sr] = ResultCollector( self.name, analysis, sr) # loop over all associated events and average results in all resultCollectors for event in self.eventsList: resultCollectors_ev = event.get_resultCollectors() for analysis in resultCollectors_pr: for sr in resultCollectors_pr[analysis]: resultCollectors_pr[analysis][sr].add_and_average( resultCollectors_ev[analysis][sr]) # Write process file, if wanted if Info.parameters["ProcessResultFileColumns"] != []: AdvPrint.mute() AdvPrint.set_cout_file(self.result_output_file, True) for col in Info.parameters["ProcessResultFileColumns"]: AdvPrint.cout(col + " ", "nlb") AdvPrint.cout("") for a in sorted(resultCollectors_pr.keys()): for sr in sorted(resultCollectors_pr[a].keys()): AdvPrint.cout(resultCollectors_pr[a][sr].line_from_data( Info.parameters["ProcessResultFileColumns"])) AdvPrint.format_columnated_file(self.result_output_file) AdvPrint.set_cout_file("#None") AdvPrint.unmute() return resultCollectors_pr
def printInfo(self): AdvPrint.cout("\tProcess Name: "+self.name) if self.have_xsect: AdvPrint.cout("\tInput Cross section: "+str(self.xsec)+" "+str(self.xsec_unit)) if self.have_xerr: AdvPrint.cout("\tInput cross section error: "+str(self.xerr)+" "+str(self.xerr_unit)) elif self.have_kfac: AdvPrint.cout("\tInput KFactor: "+str(self.kfac)) eventsList = [ef for ef in self.eventsList] if len(eventsList) != 0: AdvPrint.cout("\tAssociated event files and/or Monte-Carlo generation runs:") for ef in eventsList: ef.printInfo() AdvPrint.cout("") AdvPrint.cout("")
def runFritz(self): self.prepareFritz() """ Runs Fritz """ from events import MG5Events for event in self.eventsList: if event.processed: continue fritz_command = Info.files["fritz_bin"]+" "+event.configFile result = subprocess.Popen(fritz_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) maxlen = 0 try: for line in iter(result.stdout.readline, b''): # Print to logfile. If it does not start with the Fritz::Global prefix, it comes from MG5 and should be redirected AdvPrint.mute() if not line.startswith("|~| ") and isinstance(event, MG5Events): AdvPrint.set_cout_file(os.path.join(Info.paths["output_mg5"], "mg5amcatnlo_"+event.identifier+".log")) elif "PYTHIA Rndm::dumpState" in line: AdvPrint.set_cout_file(os.path.join(Info.paths["output_pythia"], "pythia_"+event.identifier+".log")) else: line = line.replace("|~| ", "") AdvPrint.set_cout_file(os.path.join(Info.paths["output_fritz"], "fritz_"+event.identifier+".log")) AdvPrint.cout(line.rstrip()) AdvPrint.set_cout_file("#None") AdvPrint.unmute() # We should not exceed the terminal terminal_width: terminal_width = AdvPrint.get_terminal_width() print_line = " |-> "+str(line.strip()) print_line.replace("\t", " ") while "\r" in print_line: print_line = print_line[print_line.index("\r"):] len_of_print_line = len(print_line) maxlen = max(maxlen, len(print_line)) # As we print line by line in the same terminal row, we have to add spaces if the curr line is shorter than a line before fill_spaces = "" if len(print_line) < maxlen: fill_spaces = " "*(maxlen-len(print_line)) # if line is too long, make it shorter by appropriate amoung if len(print_line+fill_spaces) >= terminal_width and len(print_line) <= terminal_width: fill_spaces = " "*(terminal_width - len(print_line)-1) elif len(print_line) > terminal_width: fill_spaces = "" print_line = print_line[:terminal_width-4]+"..." AdvPrint.cout("\r"+print_line+fill_spaces+"\x1b[0m\r", "nlb") except KeyboardInterrupt: AdvPrint.cout("Caught Keyboard Signal. Aborting Fritz") result.send_signal(signal.SIGTERM) for line in iter(result.stderr.readline, b''): AdvPrint.unmute() AdvPrint.set_cout_file(Info.files['fritz_log']) # remove nasty ROOT6-CLING warnings from on-screen output if "cling::AutoloadingVisitor::InsertIntoAutoloadingState:" in line: AdvPrint.mute() elif "Missing FileEntry for ExRootAnalysis" in line: AdvPrint.mute() elif "requested to autoload type" in line: AdvPrint.mute() AdvPrint.cout(line.rstrip()+"") AdvPrint.set_cout_file("#None") AdvPrint.unmute() AdvPrint.cout("") # Abort if there was an error result.wait() if result.returncode != 0: AdvPrint.cerr_exit("Fritz returned with error. Check logfiles in result folder for more information!") # Remove all empty analysisstdout files for f in [x for x in os.listdir(Info.paths['output_analysis']) if x.startswith("analysisstdout")]: if os.stat(os.path.join(Info.paths['output_analysis'], f)).st_size == 0: os.remove(os.path.join(Info.paths['output_analysis'], f)) # Associate result files to event for a in Info.analyses: event.analysis_signal_files[a] = os.path.join(Info.paths['output_analysis'], event.identifier+'_'+a+'_signal.dat') if os.path.isfile(event.analysis_signal_files[a]): AdvPrint.format_columnated_file(event.analysis_signal_files[a]) event.analysis_cutflow_files[a] = os.path.join(Info.paths['output_analysis'], event.identifier+'_'+a+'_cutflow.dat') if os.path.isfile(event.analysis_cutflow_files[a]): AdvPrint.format_columnated_file(event.analysis_cutflow_files[a]) # finish event.processed = True
def print_result(self): AdvPrint.set_cout_file(Info.files["output_result"], True) if self.cls_obs != -1: AdvPrint.cout( "Test: Calculation of CLs(S, dS, B, dB, O) using profile likelihood" ) elif self.likelihood != -1: AdvPrint.cout( "Test: Calculation of approximate (fast) likelihood given in results folder" ) elif self.r_obs != -1: AdvPrint.cout( "Test: Calculation of r = signal/(95%CL limit on signal)") else: AdvPrint.cerr_exit( "evaluator::printResult(): No result has been evaluated!") for w in self.warnings: AdvPrint.cout("\033[33mWarning: " + w + "\033[0m") result = "\033[31mExcluded\033[0m" if self.allowed(): result = "\033[32mAllowed\033[0m" AdvPrint.cout("Result: " + result) if self.cls_obs != -1: AdvPrint.cout("Result for CLs: " + str(self.cls_obs)) #if self.likelihood != -1: # AdvPrint.cout("Result for likelihood: "+str(self.likelihood)) elif self.r_obs != -1: AdvPrint.cout("Result for r: " + str(self.r_obs_cons)) else: AdvPrint.cerr_exit( "evaluator::printResult(): No result has been evaluated!") AdvPrint.cout("Analysis: " + self.resultCollector.analysis) AdvPrint.cout("SR: " + self.resultCollector.sr) AdvPrint.set_cout_file("#None")
def get_resultCollectors(self): resultCollectors = dict( ) # list of all collectors of all analyses and all signal regions resultCollector = ResultCollector( self.identifier, "", "") # base collector object which we will just edit and copy for analysis in Info.analyses: # check if results file exists if not os.path.isfile(self.analysis_signal_files[analysis]): AdvPrint.cerr_exit( "\t events::get_resultCollector() \n" "\t Required analysis result file does not exist: \n " "\t\t" + self.analysis_signal_files[analysis] + "\n" "\t It is very likely that something went wrong in the delphes and/or the analysis step. \n" "\t Please check \n " "\t \t " + Info.files['delphes_log'] + " \n " "\t \t " + Info.files['analysis_log'] + "* \n " "\t for error messages and, should you not be able to fix them yourself, contact the authors under \n" "\t \t [email protected]") # setup resultCollector object resultCollector.analysis = analysis resultCollectors[analysis] = dict() signal_regions = Info.get_analysis_parameters( analysis)["signal_regions"] # Read result file f = open(self.analysis_signal_files[analysis], "r") for line in f: # Ignore empty or commented lines line = line.rstrip() if line == "" or line[0] == "#": continue # Read file: line = AdvPrint.remove_extra_spaces(line) tokens = [t for t in line.split(" ") if t != ""] # First, read information on total events number if tokens[0] == "MCEvents:": resultCollector.total_mcevents = float(tokens[1]) elif tokens[0] == " SumOfWeights:": resultCollector.total_sumofweights = float(tokens[1]) elif tokens[0] == " SumOfWeights2:": resultCollector.total_sumofweights2 = float(tokens[1]) elif tokens[0] == " NormEvents:": resultCollector.total_normevents = float(tokens[1]) elif tokens[0] == "XSect:": xsect = float(tokens[1].split(" ")[0]) elif tokens[0] == " Error:": xsecterr = float(tokens[1].split(" ")[0]) else: # SR Sum_W Sum_W2 Acc N_Norm for sr in signal_regions: if tokens[0].startswith(sr): resultCollector.sr = sr # Read number of events resultCollector.signal_sumofweights = float( tokens[1]) resultCollector.signal_sumofweights2 = float( tokens[2]) resultCollector.signal_normevents = float( tokens[4]) # Calculate errors if resultCollector.signal_sumofweights > 0: resultCollector.signal_err_stat = resultCollector.signal_normevents * sqrt( resultCollector.signal_sumofweights2 ) / resultCollector.signal_sumofweights resultCollector.signal_err_sys = resultCollector.signal_normevents * xsecterr / xsect resultCollector.signal_err_tot = sqrt( resultCollector.signal_err_stat**2 + resultCollector.signal_err_sys**2) else: resultCollector.signal_err_stat = 0 resultCollector.signal_err_sys = 0 resultCollector.signal_err_tot = 0 # put copy of resultCollector in collector dict resultCollectors[analysis][sr] = deepcopy( resultCollector) f.close() # Write events file, if wanted if Info.parameters["EventResultFileColumns"] != []: AdvPrint.mute() AdvPrint.set_cout_file(self.result_output_file, True) for col in Info.parameters["EventResultFileColumns"]: AdvPrint.cout(col + " ", "nlb") AdvPrint.cout("") for a in sorted(resultCollectors.keys()): for sr in sorted(resultCollectors[a].keys()): AdvPrint.cout(resultCollectors[a][sr].line_from_data( Info.parameters["EventResultFileColumns"])) AdvPrint.format_columnated_file(self.result_output_file) AdvPrint.set_cout_file("#None") AdvPrint.unmute() return resultCollectors
def printInfo(self): AdvPrint.cout("\t\tLHE Events") AdvPrint.cout("\t\t\t - internal identifier: '" + self.identifier + "'") counter = 1 for filename in self.full_filenames: if len(self.full_filenames) == 1: AdvPrint.cout("\t\t\t - path to .lhe file: " + filename) else: AdvPrint.cout("\t\t\t - path to .lhe file #" + str(counter) + ": " + filename) counter += 1 if self.py8_infile != "": AdvPrint.cout( "\t\t\t - Pythia8 .in settings file for showering: " + self.py8_infile) if self.maxEvents > 0: AdvPrint.cout("\t\t\t - at most " + str(self.maxEvents) + " events are generated/analysed") if self.processed: AdvPrint.cout( "\t\t\t [Events already processed, results from earlier run are used!]" )
def printInfo(self): AdvPrint.cout("\t\t Pythia8 Events") AdvPrint.cout("\t\t\t - internal identifier: '" + self.identifier + "'") if self.py8_infile != "": AdvPrint.cout("\t\t\t - .in settings file: " + self.py8_infile) if self.processString != "": AdvPrint.cout("\t\t\t - simplified SUSY process: " + self.processString) if self.maxEvents > 0: AdvPrint.cout("\t\t\t - at most " + str(self.maxEvents) + " events are generated and analysed") if self.processed: AdvPrint.cout( "\t\t\t [Events already processed, results from earlier run are used!]" )
def user_param_check(self): """Prints settings on screen and awaits user confirmation""" analysis_info = dict() AdvPrint.cout("The following settings are used:") AdvPrint.cout("Analyses: ") for analysis in Info.analyses: parameters = Info.get_analysis_parameters(analysis) analysis_info = "" if parameters["expectation_known"] == "n": analysis_info += "[NO EXPECTATION KNOWN -> NO EXCLUSION TEST] " analysis_info += parameters["short_info"] AdvPrint.cout("\t"+analysis+" ("+analysis_info+")") AdvPrint.cout("E_CM: "+str(Info.parameters["ecm"])) AdvPrint.cout("Processes: ") for process in self.procList: process.printInfo() AdvPrint.cout("") AdvPrint.cout("Output Directory: ") AdvPrint.cout("\t"+Info.paths['output']) AdvPrint.cout("Additional Settings: ") if Info.files['slha'] != "": AdvPrint.cout("\t - SLHA file "+Info.files['slha']+" will be used for event generation") if Info.parameters['invisiblePIDs'] != []: AdvPrint.cout("\t - The following PIDs will be considered as invisible for the detector: "+str(Info.parameters['invisiblePIDs']).translate(None, "[]'")) if Info.flags['skipanalysis']: AdvPrint.cout("\t - No analysis step") if Info.flags['skippythia']: AdvPrint.cout("\t - No pythia step") if Info.flags['skipevaluation']: AdvPrint.cout("\t - No evaluation step") if Info.flags['fullcls']: AdvPrint.cout("\t - CLs of all signal regions will be explicitly calculated") if Info.parameters['bestcls'] != 0: AdvPrint.cout("\t - CLs of "+str(Info.parameters['bestcls'])+" best signal region will be explicitly calculated") if Info.flags['likelihood']: AdvPrint.cout("\t - Likelihood will be calculated for each signal region") if Info.flags['no_mc_stat_err']: AdvPrint.cout("\t - No Monte Carlo statistical uncertainty will be included in the evaluation") if Info.flags['eff_tab']: AdvPrint.cout("\t - Efficiency tables will be calculated for each signal region of every analysis run") if Info.flags["controlregions"]: AdvPrint.cout("\t - Analysing control regions") if Info.parameters["outputexists"] == "overwrite": AdvPrint.cout("\t - Old results will be deleted") if Info.parameters["outputexists"] == "add": AdvPrint.cout("\t - New results will be added to old ones") if Info.parameters["randomseed"] != 0: AdvPrint.cout("\t - Fixed random seed of "+str(Info.parameters["randomseed"])) if Info.flags["write_delphes_events"]: AdvPrint.cout("\t - delphes .root files will be written!") if Info.flags["write_pythia_events"]: AdvPrint.cout("\t - pythia .hepmc files will be written!") if Info.parameters["EventResultFileColumns"] != ['analysis', 'sr', 'signal_normevents', 'signal_err_tot']: AdvPrint.cout("\t - print columns "+str(Info.parameters['EventResultFileColumns']).translate(None, "[]'")+" in event result files!") if Info.parameters["ProcessResultFileColumns"] != ['analysis', 'sr', 'signal_normevents', 'signal_err_tot']: AdvPrint.cout("\t - print columns "+str(Info.parameters['ProcessResultFileColumns']).translate(None, "[]'")+" in process result files!") if Info.parameters["TotalEvaluationFileColumns"] != ['analysis', 'sr', 'o', 'b', 'db', 's', 'ds', 's95obs', 's95exp', 'robscons', 'rexpcons']: AdvPrint.cout("\t - print columns "+str(Info.parameters['TotalEvaluationFileColumns']).translate(None, "[]'")+" in total evaluation files!") if Info.parameters["BestPerAnalysisEvaluationFileColumns"] != ['analysis', 'sr', 'o', 'b', 'db', 's', 'ds', 's95obs', 's95exp', 'robscons', 'rexpcons']: AdvPrint.cout("\t - print columns "+str(Info.parameters['BestPerAnalysisEvaluationFileColumns']).translate(None, "[]'")+" in bert-per-analysis evaluation files!") # Let user check correctness of parameters, unless in skipparamcheck. if not Info.flags['skipparamcheck']: while True: c = raw_input("Is this correct? (y/n) ") if c == "y": break elif c == "n": exit(1) AdvPrint.cout("")
def printInfo(self): AdvPrint.cout("\tProcess Name: " + self.name) if self.have_xsect: AdvPrint.cout("\tInput Cross section: " + str(self.xsec) + " " + str(self.xsec_unit)) if self.have_xerr: AdvPrint.cout("\tInput cross section error: " + str(self.xerr) + " " + str(self.xerr_unit)) elif self.have_kfac: AdvPrint.cout("\tInput KFactor: " + str(self.kfac)) eventsList = [ef for ef in self.eventsList] if len(eventsList) != 0: AdvPrint.cout( "\tAssociated event files and/or Monte-Carlo generation runs:") for ef in eventsList: ef.printInfo() AdvPrint.cout("") AdvPrint.cout("")
def runFritz(self): self.prepareFritz() """ Runs Fritz """ from events import MG5Events for event in self.eventsList: if event.processed: continue fritz_command = Info.files["fritz_bin"] + " " + event.configFile result = subprocess.Popen(fritz_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) maxlen = 0 try: for line in iter(result.stdout.readline, b''): # Print to logfile. If it does not start with the Fritz::Global prefix, it comes from MG5 and should be redirected AdvPrint.mute() if not line.startswith("|~| ") and isinstance( event, MG5Events): AdvPrint.set_cout_file( os.path.join( Info.paths["output_mg5"], "mg5amcatnlo_" + event.identifier + ".log")) elif "PYTHIA Rndm::dumpState" in line: AdvPrint.set_cout_file( os.path.join(Info.paths["output_pythia"], "pythia_" + event.identifier + ".log")) else: line = line.replace("|~| ", "") AdvPrint.set_cout_file( os.path.join(Info.paths["output_fritz"], "fritz_" + event.identifier + ".log")) AdvPrint.cout(line.rstrip()) AdvPrint.set_cout_file("#None") AdvPrint.unmute() # We should not exceed the terminal terminal_width: terminal_width = AdvPrint.get_terminal_width() print_line = " |-> " + str(line.strip()) print_line.replace("\t", " ") while "\r" in print_line: print_line = print_line[print_line.index("\r"):] len_of_print_line = len(print_line) maxlen = max(maxlen, len(print_line)) # As we print line by line in the same terminal row, we have to add spaces if the curr line is shorter than a line before fill_spaces = "" if len(print_line) < maxlen: fill_spaces = " " * (maxlen - len(print_line)) # if line is too long, make it shorter by appropriate amoung if len(print_line + fill_spaces) >= terminal_width and len( print_line) <= terminal_width: fill_spaces = " " * (terminal_width - len(print_line) - 1) elif len(print_line) > terminal_width: fill_spaces = "" print_line = print_line[:terminal_width - 4] + "..." AdvPrint.cout( "\r" + print_line + fill_spaces + "\x1b[0m\r", "nlb") except KeyboardInterrupt: AdvPrint.cout("Caught Keyboard Signal. Aborting Fritz") result.send_signal(signal.SIGTERM) for line in iter(result.stderr.readline, b''): AdvPrint.unmute() AdvPrint.set_cout_file(Info.files['fritz_log']) # remove nasty ROOT6-CLING warnings from on-screen output if "cling::AutoloadingVisitor::InsertIntoAutoloadingState:" in line: AdvPrint.mute() elif "Missing FileEntry for ExRootAnalysis" in line: AdvPrint.mute() elif "requested to autoload type" in line: AdvPrint.mute() AdvPrint.cout(line.rstrip() + "") AdvPrint.set_cout_file("#None") AdvPrint.unmute() AdvPrint.cout("") # Abort if there was an error result.wait() if result.returncode != 0: AdvPrint.cerr_exit( "Fritz returned with error. Check logfiles in result folder for more information!" ) # Remove all empty analysisstdout files for f in [ x for x in os.listdir(Info.paths['output_analysis']) if x.startswith("analysisstdout") ]: if os.stat(os.path.join(Info.paths['output_analysis'], f)).st_size == 0: os.remove(os.path.join(Info.paths['output_analysis'], f)) # Associate result files to event for a in Info.analyses: event.analysis_signal_files[a] = os.path.join( Info.paths['output_analysis'], event.identifier + '_' + a + '_signal.dat') if os.path.isfile(event.analysis_signal_files[a]): AdvPrint.format_columnated_file( event.analysis_signal_files[a]) event.analysis_cutflow_files[a] = os.path.join( Info.paths['output_analysis'], event.identifier + '_' + a + '_cutflow.dat') if os.path.isfile(event.analysis_cutflow_files[a]): AdvPrint.format_columnated_file( event.analysis_cutflow_files[a]) # finish event.processed = True
def printInfo(self): AdvPrint.cout("\t\tMG5_aMC@NLO Events") AdvPrint.cout("\t\t\t - internal identifier: '"+self.identifier+"'") if self.mg5_cards["proc"] != "": AdvPrint.cout("\t\t\t - proc_card: "+self.mg5_cards["proc"]) if self.commandstring != "": AdvPrint.cout("\t\t\t - command: "+self.commandstring.replace("\n", "\n\t\t\t ")) if self.mg5_cards["run"] != "": AdvPrint.cout("\t\t\t - run_card: "+self.mg5_cards["run"]) if self.mg5_cards["config"] != "": AdvPrint.cout("\t\t\t - config_card: "+self.mg5_cards["config"]) if self.mg5_cards["param"] != "": AdvPrint.cout("\t\t\t - param_card: "+self.mg5_cards["param"]) if self.maxEvents > 0: AdvPrint.cout("\t\t\t - at most "+str(self.maxEvents)+" events are generated/analsed") if self.py8_infile != "": AdvPrint.cout("\t\t\t - Pythia8 settings file used for showering: "+self.py8_infile) if self.have_xsth: AdvPrint.cout("\t\t\t - Pythia8 won't run if parton cross section falls below: "+str(self.xsth)+" "+str(self.xsth_unit))
def get_resultCollectors(self): resultCollectors = dict() # list of all collectors of all analyses and all signal regions resultCollector = ResultCollector(self.identifier, "", "") # base collector object which we will just edit and copy for analysis in Info.analyses: # check if results file exists if not os.path.isfile(self.analysis_signal_files[analysis]): AdvPrint.cerr_exit("\t events::get_resultCollector() \n" "\t Required analysis result file does not exist: \n " "\t\t"+self.analysis_signal_files[analysis]+"\n" "\t It is very likely that something went wrong in the delphes and/or the analysis step. \n" "\t Please check \n " "\t \t "+Info.files['delphes_log']+" \n " "\t \t "+Info.files['analysis_log']+"* \n " "\t for error messages and, should you not be able to fix them yourself, contact the authors under \n" "\t \t [email protected]") # setup resultCollector object resultCollector.analysis = analysis resultCollectors[analysis] = dict() signal_regions = Info.get_analysis_parameters(analysis)["signal_regions"] # Read result file f = open(self.analysis_signal_files[analysis], "r") for line in f: # Ignore empty or commented lines line = line.rstrip() if line == "" or line[0] == "#": continue # Read file: line = AdvPrint.remove_extra_spaces(line) tokens = [t for t in line.split(" ") if t != ""] # First, read information on total events number if tokens[0] == "MCEvents:": resultCollector.total_mcevents = float(tokens[1]) elif tokens[0] == " SumOfWeights:": resultCollector.total_sumofweights = float(tokens[1]) elif tokens[0] == " SumOfWeights2:": resultCollector.total_sumofweights2 = float(tokens[1]) elif tokens[0] == " NormEvents:": resultCollector.total_normevents = float(tokens[1]) elif tokens[0] == "XSect:": xsect = float(tokens[1].split(" ")[0]) elif tokens[0] == " Error:": xsecterr = float(tokens[1].split(" ")[0]) else: # SR Sum_W Sum_W2 Acc N_Norm for sr in signal_regions: if tokens[0].startswith(sr): resultCollector.sr = sr # Read number of events resultCollector.signal_sumofweights = float(tokens[1]) resultCollector.signal_sumofweights2 = float(tokens[2]) resultCollector.signal_normevents = float(tokens[4]) # Calculate errors if resultCollector.signal_sumofweights > 0: resultCollector.signal_err_stat = resultCollector.signal_normevents*sqrt(resultCollector.signal_sumofweights2)/resultCollector.signal_sumofweights resultCollector.signal_err_sys = resultCollector.signal_normevents*xsecterr/xsect resultCollector.signal_err_tot = sqrt(resultCollector.signal_err_stat**2+resultCollector.signal_err_sys**2) else: resultCollector.signal_err_stat = 0 resultCollector.signal_err_sys = 0 resultCollector.signal_err_tot = 0 # put copy of resultCollector in collector dict resultCollectors[analysis][sr] = deepcopy(resultCollector) f.close() # Write events file, if wanted if Info.parameters["EventResultFileColumns"] != []: AdvPrint.mute() AdvPrint.set_cout_file(self.result_output_file, True) for col in Info.parameters["EventResultFileColumns"]: AdvPrint.cout(col+" ", "nlb") AdvPrint.cout("") for a in sorted(resultCollectors.keys()): for sr in sorted(resultCollectors[a].keys()): AdvPrint.cout(resultCollectors[a][sr].line_from_data(Info.parameters["EventResultFileColumns"])) AdvPrint.format_columnated_file(self.result_output_file) AdvPrint.set_cout_file("#None") AdvPrint.unmute() return resultCollectors
def setup_pythia8_from_process_string(self): """ Generates Pythia8 input card """ out_path = Info.paths['output_pythia'] procnum = len(Info.files['pythia_cards']) filename = self.name + "card_" + str(procnum) + ".in" fpath = os.path.join(out_path, filename) f = open(fpath, 'w') # Part 2: Generate cards based on process string part = self.processString.split('>')[1].strip() proclist = list() if part == 'go go': proclist.append('SUSY:gg2gluinogluino = on\n') proclist.append('SUSY:qqbar2gluinogluino = on\n') elif part == 'go sq': proclist.append('SUSY:qg2squarkgluino = on\n') proclist.append( 'SUSY:idVecA = 1000001,1000002,1000003,1000004,2000001,2000002,2000003,2000004' ) elif part == 'sq sq~': proclist.append('SUSY:gg2squarkantisquark = on\n') proclist.append('SUSY:qqbar2squarkantisquark = on\n') proclist.append( 'SUSY:idVecA = 1000001,1000002,1000003,1000004,2000001,2000002,2000003,2000004' ) elif part == 't1 t1~': proclist.append('SUSY:gg2squarkantisquark = on\n') proclist.append('SUSY:qqbar2squarkantisquark = on\n') proclist.append('SUSY:idA = 1000006\n') elif part == '3gen': proclist.append('SUSY:gg2squarkantisquark = on\n') proclist.append('SUSY:qqbar2squarkantisquark = on\n') proclist.append('SUSY:qq2squarksquark = on\n') proclist.append('SUSY:idVecA = 1000005,100006,2000005,2000006\n') elif part == 'sq sq': proclist.append('SUSY:qq2squarksquark = on\n') proclist.append( 'SUSY:idVecA = 1000001,1000002,1000003,1000004,2000001,2000002,2000003,2000004\n' ) elif part.lower() == "ewsusy": proclist.append('SUSY:qqbar2chi0chi0 = on\n') proclist.append('SUSY:qqbar2chi+-chi0 = on\n') proclist.append('SUSY:qqbar2chi+chi- = on\n') elif part.lower() == "colsusy": proclist.append('SUSY:gg2gluinogluino = on\n') proclist.append('SUSY:qqbar2gluinogluino = on\n') proclist.append('SUSY:qg2squarkgluino = on\n') proclist.append('SUSY:gg2squarkantisquark = on\n') proclist.append('SUSY:qqbar2squarkantisquark = on\n') proclist.append('SUSY:qq2squarksquark = on\n') proclist.append( 'SUSY:idVecA = 1000001,1000002,1000003,1000004,1000005,100006,2000001,2000002,2000003,2000004,2000005,2000006\n' ) elif part.lower() == "allsusy": proclist.append('SUSY:all = on\n') else: AdvPrint.cerr_exit( "\t Process:genPy8card():: Cannot understand process " + part) # Write Pythia cards ecm_str = "Beams:eCM = 8000.\n" if (float(Info.parameters["ecm"]) == 7.0): ecm_str = "Beams:eCM = 7000.\n" elif (float(Info.parameters["ecm"]) == 13.0): ecm_str = "Beams:eCM = 13000.\n" elif (float(Info.parameters["ecm"]) == 14.0): ecm_str = "Beams:eCM = 14000.\n" if len(proclist) == 0: AdvPrint.cerr_exit("No processes found") default = open(Info.files['pythia_settings_template'], 'r') slhafile = Info.files['slha'] for line in default: f.write(line) f.write(ecm_str + '\n') if len(slhafile) > 0: f.write('SLHA:file = ') f.write(slhafile + '\n') else: AdvPrint.cerr_exit("\t Process:genPy8card():: No SLHA file found") for item in proclist: f.write(item) default.close() # if no maximal number set, set number of generated events to 5000 if self.maxEvents == -1: AdvPrint.cout( "\t " + self.name + ":genPy8card(): Setting number of to-be-generated events to 5000. Use --maxevents parameter to change this behaviour." ) self.maxEvents = 5000 f.close() Info.files['pythia_cards'].append(fpath) self.py8_infile = fpath
def evaluate(self): """ Performs statistical evaluation of the result """ AdvPrint.cout("Evaluating Results") resultCollectors = self.get_resultCollectors() # evaluate all results evaluators = dict() for analysis in resultCollectors: evaluators[analysis] = dict() # only process those results and those signal regions that are given in the reference file for analysis in Info.analyses: signal_regions = Info.get_analysis_parameters(analysis)["signal_regions"] for sr in signal_regions: evaluator = Evaluator(resultCollectors[analysis][sr]) # Calculate everything that should be calculated # TODO: Beware analyses with unknown background evaluator.calc_efficiencies() evaluator.calc_r_values() if Info.flags["likelihood"]: evaluator.calc_likelihood() if Info.flags["fullcls"]: evaluator.calc_cls_values() if Info.flags["zsig"]: evaluator.calc_zsig() evaluators[analysis][sr] = evaluator if Info.parameters["bestcls"] != 0: AdvPrint.cout("Calculating CLs for the "+str(Info.parameters["bestcls"])+" most sensitive signal regions!") best_evaluators = find_strongest_evaluators(evaluators, Info.parameters["bestcls"]) # if "bestcls" is 1, find_strongest_evaluators does not return a list but just the single best if Info.parameters["bestcls"] == 1: best_evaluators = [best_evaluators] for ev in best_evaluators: ev.calc_cls_values() # find best result best_evaluator_per_analysis = dict() for analysis in evaluators: # Find bes of all SRs in analysis best_evaluator_per_analysis[analysis] = find_strongest_evaluators(evaluators[analysis], 1) best_evaluator = find_strongest_evaluators(best_evaluator_per_analysis, 1) AdvPrint.set_cout_file(Info.files['output_totalresults'], True) AdvPrint.mute() for col in Info.parameters["TotalEvaluationFileColumns"]: AdvPrint.cout(col+" ", "nlb") AdvPrint.cout("") for a in sorted(evaluators.keys()): for sr in sorted(evaluators[a].keys()): AdvPrint.cout(evaluators[a][sr].line_from_data(Info.parameters["TotalEvaluationFileColumns"])) AdvPrint.format_columnated_file(Info.files['output_totalresults']) AdvPrint.set_cout_file(Info.files['output_bestsignalregions'], True) AdvPrint.mute() for col in Info.parameters["BestPerAnalysisEvaluationFileColumns"]: AdvPrint.cout(col+" ", "nlb") AdvPrint.cout("") # print analyses in alphabetic order for a in sorted(best_evaluator_per_analysis.keys()): AdvPrint.cout(best_evaluator_per_analysis[a].line_from_data(Info.parameters["BestPerAnalysisEvaluationFileColumns"])) AdvPrint.format_columnated_file(Info.files['output_bestsignalregions']) AdvPrint.set_cout_file("#None") AdvPrint.unmute() best_evaluator.check_warnings() best_evaluator.print_result() if Info.flags['zsig']: _print_zsig(evaluators) if Info.flags['likelihood']: _print_likelihood(evaluators)
def setup_pythia8_from_process_string(self): """ Generates Pythia8 input card """ out_path = Info.paths['output_pythia'] procnum = len(Info.files['pythia_cards']) filename = self.name+"card_"+str(procnum)+".in" fpath = os.path.join(out_path,filename) f = open(fpath,'w') # Part 2: Generate cards based on process string part = self.processString.split('>')[1].strip() proclist = list() if part == 'go go': proclist.append('SUSY:gg2gluinogluino = on\n') proclist.append('SUSY:qqbar2gluinogluino = on\n') elif part == 'go sq': proclist.append('SUSY:qg2squarkgluino = on\n') proclist.append('SUSY:idVecA = 1000001,1000002,1000003,1000004,2000001,2000002,2000003,2000004') elif part == 'sq sq~': proclist.append('SUSY:gg2squarkantisquark = on\n') proclist.append('SUSY:qqbar2squarkantisquark = on\n') proclist.append('SUSY:idVecA = 1000001,1000002,1000003,1000004,2000001,2000002,2000003,2000004') elif part == 't1 t1~': proclist.append('SUSY:gg2squarkantisquark = on\n') proclist.append('SUSY:qqbar2squarkantisquark = on\n') proclist.append('SUSY:idA = 1000006\n') elif part == '3gen': proclist.append('SUSY:gg2squarkantisquark = on\n') proclist.append('SUSY:qqbar2squarkantisquark = on\n') proclist.append('SUSY:qq2squarksquark = on\n') proclist.append('SUSY:idVecA = 1000005,100006,2000005,2000006\n') elif part == 'sq sq': proclist.append('SUSY:qq2squarksquark = on\n') proclist.append('SUSY:idVecA = 1000001,1000002,1000003,1000004,2000001,2000002,2000003,2000004\n') elif part.lower() == "ewsusy": proclist.append('SUSY:qqbar2chi0chi0 = on\n') proclist.append('SUSY:qqbar2chi+-chi0 = on\n') proclist.append('SUSY:qqbar2chi+chi- = on\n') elif part.lower() == "colsusy": proclist.append('SUSY:gg2gluinogluino = on\n') proclist.append('SUSY:qqbar2gluinogluino = on\n') proclist.append('SUSY:qg2squarkgluino = on\n') proclist.append('SUSY:gg2squarkantisquark = on\n') proclist.append('SUSY:qqbar2squarkantisquark = on\n') proclist.append('SUSY:qq2squarksquark = on\n') proclist.append('SUSY:idVecA = 1000001,1000002,1000003,1000004,1000005,100006,2000001,2000002,2000003,2000004,2000005,2000006\n') elif part.lower() == "allsusy": proclist.append('SUSY:all = on\n') else: AdvPrint.cerr_exit("\t Process:genPy8card():: Cannot understand process " + part) # Write Pythia cards ecm_str = "Beams:eCM = 8000.\n" if(Info.parameters["ecm"] == 7.0): ecm_str = "Beams:eCM = 7000.\n" elif (Info.parameters["ecm"] == 13.0): ecm_str = "Beams:eCM = 13000\n." elif (Info.parameters["ecm"] == 14.0): ecm_str = "Beams:eCM = 14000.\n" if len(proclist) == 0: AdvPrint.cerr_exit("No processes found") default = open(Info.files['pythia_settings_template'],'r') slhafile = Info.files['slha'] for line in default: f.write(line) f.write(ecm_str + '\n' ) if len(slhafile) > 0 : f.write('SLHA:file = ') f.write(slhafile + '\n') else: AdvPrint.cerr_exit("\t Process:genPy8card():: No SLHA file found") for item in proclist: f.write(item) default.close() # if no maximal number set, set number of generated events to 5000 if self.maxEvents == -1: AdvPrint.cout("\t "+self.name+":genPy8card(): Setting number of to-be-generated events to 5000. Use --maxevents parameter to change this behaviour.") self.maxEvents = 5000 f.close() Info.files['pythia_cards'].append(fpath) self.py8_infile = fpath
def printInfo(self): AdvPrint.cout("\t\t Pythia8 Events") AdvPrint.cout("\t\t\t - internal identifier: '"+self.identifier+"'") if self.py8_infile != "": AdvPrint.cout("\t\t\t - .in settings file: "+self.py8_infile) if self.processString != "": AdvPrint.cout("\t\t\t - simplified SUSY process: "+self.processString) if self.maxEvents > 0: AdvPrint.cout("\t\t\t - at most "+str(self.maxEvents)+" events are generated and analysed") if self.processed: AdvPrint.cout("\t\t\t [Events already processed, results from earlier run are used!]")
def printInfo(self): AdvPrint.cout("\t\tMG5_aMC@NLO Events") AdvPrint.cout("\t\t\t - internal identifier: '" + self.identifier + "'") if self.mg5_cards["proc"] != "": AdvPrint.cout("\t\t\t - proc_card: " + self.mg5_cards["proc"]) if self.commandstring != "": AdvPrint.cout( "\t\t\t - command: " + self.commandstring.replace("\n", "\n\t\t\t ")) if self.mg5_cards["run"] != "": AdvPrint.cout("\t\t\t - run_card: " + self.mg5_cards["run"]) if self.mg5_cards["config"] != "": AdvPrint.cout("\t\t\t - config_card: " + self.mg5_cards["config"]) if self.mg5_cards["param"] != "": AdvPrint.cout("\t\t\t - param_card: " + self.mg5_cards["param"]) if self.maxEvents > 0: AdvPrint.cout("\t\t\t - at most " + str(self.maxEvents) + " events are generated/analsed") if self.py8_infile != "": AdvPrint.cout( "\t\t\t - Pythia8 settings file used for showering: " + self.py8_infile) if self.have_xsth: AdvPrint.cout( "\t\t\t - Pythia8 won't run if parton cross section falls below: " + str(self.xsth) + " " + str(self.xsth_unit))
def printUsage(self): self.printLogo() AdvPrint.cout( """ ___ |__| _ | _ | |(_)\)/ |(_) """) AdvPrint.cout( "Method 1: Input Parameters") AdvPrint.cout( "\trun -n {name_for_this_run} -a {analysis} -p {process} -xs {crosssection} -xse {crosssection error} -ev {eventfile}") AdvPrint.cout( "Method 2: Input File") AdvPrint.cout( "\trun {inputfile}") AdvPrint.cout( "") AdvPrint.cout( "Examples:") AdvPrint.cout( "\t./CheckMATE -n testrun -a atlas_1405_7875 -p \"gg\" -xs \"1*FB\" -xse \"0.1 FB\" -ev /scratch/all/gluinopair.hepmc") AdvPrint.cout( "\t./CheckMATE testparam.dat") AdvPrint.cout( "") AdvPrint.cout( "Type './CheckMATE -h' for more information about available parameters or check") AdvPrint.cout( "the given 'testparam.dat' file for the desired structure of input files") exit(1)
def print_result(self): AdvPrint.set_cout_file(Info.files["output_result"], True) if self.cls_obs != -1: AdvPrint.cout("Test: Calculation of CLs(S, dS, B, dB, O) using profile likelihood") elif self.likelihood != -1: AdvPrint.cout("Test: Calculation of approximate (fast) likelihood given in results folder") elif self.r_obs != -1: AdvPrint.cout("Test: Calculation of r = signal/(95%CL limit on signal)") else: AdvPrint.cerr_exit("evaluator::printResult(): No result has been evaluated!") for w in self.warnings: AdvPrint.cout("\033[33mWarning: "+w+"\033[0m") result = "\033[31mExcluded\033[0m" if self.allowed(): result = "\033[32mAllowed\033[0m" AdvPrint.cout("Result: "+result) if self.cls_obs != -1: AdvPrint.cout("Result for CLs: "+str(self.cls_obs)) #if self.likelihood != -1: # AdvPrint.cout("Result for likelihood: "+str(self.likelihood)) elif self.r_obs != -1: AdvPrint.cout("Result for r: "+str(self.r_obs_cons)) else: AdvPrint.cerr_exit("evaluator::printResult(): No result has been evaluated!") AdvPrint.cout("Analysis: "+self.resultCollector.analysis) AdvPrint.cout("SR: "+self.resultCollector.sr) AdvPrint.set_cout_file("#None")
def printInfo(self): AdvPrint.cout("\t\tLHE Events") AdvPrint.cout("\t\t\t - internal identifier: '"+self.identifier+"'") counter = 1 for filename in self.full_filenames: if len(self.full_filenames) == 1: AdvPrint.cout("\t\t\t - path to .lhe file: "+filename) else: AdvPrint.cout("\t\t\t - path to .lhe file #"+str(counter)+": "+filename) counter += 1 if self.py8_infile != "": AdvPrint.cout("\t\t\t - Pythia8 .in settings file for showering: "+self.py8_infile) if self.maxEvents > 0: AdvPrint.cout("\t\t\t - at most "+str(self.maxEvents)+" events are generated/analysed") if self.processed: AdvPrint.cout("\t\t\t [Events already processed, results from earlier run are used!]")