def Read1DGraphErrors(self, iline): """ Read 1D graph section """ ngraphs = self.GetNhist( self.lines[iline]) # graph and hist format is the same xarray = [] data = {} errors = {} for igraph in range(ngraphs): data[igraph] = [] errors[igraph] = [] for line in self.lines[iline + 1:]: line = line.strip() if line == '': break elif re.search("^#", line): continue words = line.split() xarray.append(float(words[0])) for igraph in range(ngraphs): data[igraph].append(float(words[(igraph + 1) * 2 - 1])) errors[igraph].append(float(words[(igraph + 1) * 2])) npoints = len(xarray) for igraph in range(ngraphs): if self.subtitles[igraph]: subtitle = ' - ' + self.subtitles[igraph] else: subtitle = '' self.FixTitles() g = TGraphErrors(npoints) # self.ihist+1 - start from ONE as in Angel - easy to compare g.SetNameTitle( "g%d" % (self.ihist + 1), "%s%s;%s;%s" % (self.title, subtitle, self.xtitle, self.ytitle)) self.ihist += 1 for i in range(npoints): x = xarray[i] y = data[igraph][i] ey = errors[igraph][i] g.SetPoint(i, x, y) g.SetPointError(i, 0, ey * y) self.histos.Add(g) del self.subtitles[:]
def main(argv): #Usage controls from OptionParser parser_usage = "outputfile.root" parser = OptionParser(usage=parser_usage) (options, args) = parser.parse_args(argv) if (len(args) != 1): parser.print_help() return #Get files, add to another dictionary run_dict = get_run_dict() f_list = {} for runnum in run_dict: print "Run: " + str(runnum) filesmatching = glob.glob(base_directory + "*_" + str(runnum) + "*.root") if (len(filesmatching) != 1): print "ERROR FINDING FILE: " + base_directory + "hd_root_" + str( runnum) + "*.root" print "exiting..." return f = TFile.Open(filesmatching[0]) f_list[runnum] = f rocid_arr = array('i', [31, 32, 34, 35, 37, 38, 40, 41 ]) #These are the rocids associated with BCAL fADC250s slot_arr = array( 'i', [3, 4, 5, 6, 7, 8, 9, 10, 13, 14, 15, 16 ]) #These are the slots used for BCAL fADC250s, common to all crates channel_arr = array( 'i', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ]) #These are the channels used on each slot, common to all slots if (year == "2015"): rocid_arr = array( 'i', [31, 32, 34, 35 ]) #These are the rocids associated with BCAL fADC250s h_layer1_US_RMS_arr = [] h_layer2_US_RMS_arr = [] h_layer3_US_RMS_arr = [] h_layer4_US_RMS_arr = [] h_global_US_RMS_arr = [] h_layer1_DS_RMS_arr = [] h_layer2_DS_RMS_arr = [] h_layer3_DS_RMS_arr = [] h_layer4_DS_RMS_arr = [] h_global_DS_RMS_arr = [] h_global_all_RMS_arr = [] #A whole big bunch of arrays for TGraphErrors mean_layer1_US_RMS = array('d', []) mean_layer2_US_RMS = array('d', []) mean_layer3_US_RMS = array('d', []) mean_layer4_US_RMS = array('d', []) mean_global_US_RMS = array('d', []) mean_layer1_DS_RMS = array('d', []) mean_layer2_DS_RMS = array('d', []) mean_layer3_DS_RMS = array('d', []) mean_layer4_DS_RMS = array('d', []) mean_global_DS_RMS = array('d', []) mean_global_all_RMS = array('d', []) mean_err_layer1_US_RMS = array('d', []) mean_err_layer2_US_RMS = array('d', []) mean_err_layer3_US_RMS = array('d', []) mean_err_layer4_US_RMS = array('d', []) mean_err_global_US_RMS = array('d', []) mean_err_layer1_DS_RMS = array('d', []) mean_err_layer2_DS_RMS = array('d', []) mean_err_layer3_DS_RMS = array('d', []) mean_err_layer4_DS_RMS = array('d', []) mean_err_global_DS_RMS = array('d', []) mean_err_global_all_RMS = array('d', []) bias_arr = array('d', []) bias_err_arr = array('d', []) mean_layer1_diff_RMS = array('d', []) mean_layer2_diff_RMS = array('d', []) mean_layer3_diff_RMS = array('d', []) mean_layer4_diff_RMS = array('d', []) mean_global_diff_RMS = array('d', []) mean_err_layer1_diff_RMS = array('d', []) mean_err_layer2_diff_RMS = array('d', []) mean_err_layer3_diff_RMS = array('d', []) mean_err_layer4_diff_RMS = array('d', []) mean_err_global_diff_RMS = array('d', []) #Loop over all files for curr_run in f_list: print "Runnum: " + str(curr_run) print "bias V: " + str(run_dict[curr_run]) curr_file = f_list[curr_run] curr_bias = run_dict[curr_run] #Create histograms hist_min = 0. hist_max = 4. h_layer1_US_RMS = TH1F("h_layer1_US_RMS_bias" + str(curr_bias), "RMS of US Layer 1 Channels;RMS (ADC units)", 1000, hist_min, hist_max) h_layer2_US_RMS = TH1F("h_layer2_US_RMS_bias" + str(curr_bias), "RMS of US Layer 2 Channels;RMS (ADC units)", 1000, hist_min, hist_max) h_layer3_US_RMS = TH1F("h_layer3_US_RMS_bias" + str(curr_bias), "RMS of US Layer 3 Channels;RMS (ADC units)", 1000, hist_min, hist_max) h_layer4_US_RMS = TH1F("h_layer4_US_RMS_bias" + str(curr_bias), "RMS of US Layer 4 Channels;RMS (ADC units)", 1000, hist_min, hist_max) h_global_US_RMS = TH1F("h_global_US_RMS_bias" + str(curr_bias), "RMS of All US BCAL Channels;RMS (ADC units)", 1000, hist_min, hist_max) h_layer1_DS_RMS = TH1F("h_layer1_DS_RMS_bias" + str(curr_bias), "RMS of DS Layer 1 Channels;RMS (ADC units)", 1000, hist_min, hist_max) h_layer2_DS_RMS = TH1F("h_layer2_DS_RMS_bias" + str(curr_bias), "RMS of DS Layer 2 Channels;RMS (ADC units)", 1000, hist_min, hist_max) h_layer3_DS_RMS = TH1F("h_layer3_DS_RMS_bias" + str(curr_bias), "RMS of DS Layer 3 Channels;RMS (ADC units)", 1000, hist_min, hist_max) h_layer4_DS_RMS = TH1F("h_layer4_DS_RMS_bias" + str(curr_bias), "RMS of DS Layer 4 Channels;RMS (ADC units)", 1000, hist_min, hist_max) h_global_DS_RMS = TH1F("h_global_DS_RMS_bias" + str(curr_bias), "RMS of All DS BCAL Channels;RMS (ADC units)", 1000, hist_min, hist_max) h_global_all_RMS = TH1F("h_global_all_RMS_bias" + str(curr_bias), "RMS of All BCAL Channels;RMS (ADC units)", 1000, hist_min, hist_max) #Loop over all channels for rocid in rocid_arr: for slot in slot_arr: for channel in channel_arr: hist = get_hist_from_rocid_slot_channel( curr_file, rocid, slot, channel) quadrant = getquadrant(rocid) layer = getlayer(slot, channel) is_downstream = getend(slot, channel) # if(quadrant==3): continue #Skip quadrant 3, it has LEDs firing (in 2019 at least) if (quadrant == 3 or quadrant == 2 or quadrant == 4): continue #Skip quadrant 3, it has LEDs firing (in 2019 at least) #Fill appropriate histograms if (layer == 1 and is_downstream == 0): h_layer1_US_RMS.Fill(hist.GetRMS() / sqrt(layer)) if (layer == 2 and is_downstream == 0): h_layer2_US_RMS.Fill(hist.GetRMS() / sqrt(layer)) if (layer == 3 and is_downstream == 0): h_layer3_US_RMS.Fill(hist.GetRMS() / sqrt(layer)) if (layer == 4 and is_downstream == 0): h_layer4_US_RMS.Fill(hist.GetRMS() / sqrt(layer)) if (is_downstream == 0): h_global_US_RMS.Fill(hist.GetRMS()) if (layer == 1 and is_downstream == 1): h_layer1_DS_RMS.Fill(hist.GetRMS() / sqrt(layer)) if (layer == 2 and is_downstream == 1): h_layer2_DS_RMS.Fill(hist.GetRMS() / sqrt(layer)) if (layer == 3 and is_downstream == 1): h_layer3_DS_RMS.Fill(hist.GetRMS() / sqrt(layer)) if (layer == 4 and is_downstream == 1): h_layer4_DS_RMS.Fill(hist.GetRMS() / sqrt(layer)) if (is_downstream == 1): h_global_DS_RMS.Fill(hist.GetRMS()) h_global_all_RMS.Fill(hist.GetRMS()) #End of file: add histograms to list h_layer1_US_RMS_arr.append(h_layer1_US_RMS) h_layer2_US_RMS_arr.append(h_layer2_US_RMS) h_layer3_US_RMS_arr.append(h_layer3_US_RMS) h_layer4_US_RMS_arr.append(h_layer4_US_RMS) h_global_US_RMS_arr.append(h_global_US_RMS) h_layer1_DS_RMS_arr.append(h_layer1_DS_RMS) h_layer2_DS_RMS_arr.append(h_layer2_DS_RMS) h_layer3_DS_RMS_arr.append(h_layer3_DS_RMS) h_layer4_DS_RMS_arr.append(h_layer4_DS_RMS) h_global_DS_RMS_arr.append(h_global_DS_RMS) h_global_all_RMS_arr.append(h_global_all_RMS) mean_layer1_US_RMS.append(h_layer1_US_RMS.GetMean()) mean_layer2_US_RMS.append(h_layer2_US_RMS.GetMean()) mean_layer3_US_RMS.append(h_layer3_US_RMS.GetMean()) mean_layer4_US_RMS.append(h_layer4_US_RMS.GetMean()) mean_global_US_RMS.append(h_global_US_RMS.GetMean()) mean_layer1_DS_RMS.append(h_layer1_DS_RMS.GetMean()) mean_layer2_DS_RMS.append(h_layer2_DS_RMS.GetMean()) mean_layer3_DS_RMS.append(h_layer3_DS_RMS.GetMean()) mean_layer4_DS_RMS.append(h_layer4_DS_RMS.GetMean()) mean_global_DS_RMS.append(h_global_DS_RMS.GetMean()) mean_global_all_RMS.append(h_global_all_RMS.GetMean()) mean_err_layer1_US_RMS.append(h_layer1_US_RMS.GetRMS()) mean_err_layer2_US_RMS.append(h_layer2_US_RMS.GetRMS()) mean_err_layer3_US_RMS.append(h_layer3_US_RMS.GetRMS()) mean_err_layer4_US_RMS.append(h_layer4_US_RMS.GetRMS()) mean_err_global_US_RMS.append(h_global_US_RMS.GetRMS()) mean_err_layer1_DS_RMS.append(h_layer1_DS_RMS.GetRMS()) mean_err_layer2_DS_RMS.append(h_layer2_DS_RMS.GetRMS()) mean_err_layer3_DS_RMS.append(h_layer3_DS_RMS.GetRMS()) mean_err_layer4_DS_RMS.append(h_layer4_DS_RMS.GetRMS()) mean_err_global_DS_RMS.append(h_global_DS_RMS.GetRMS()) mean_err_global_all_RMS.append(h_global_all_RMS.GetRMS()) bias_arr.append(curr_bias) bias_err_arr.append(0) mean_layer1_diff_RMS.append(h_layer1_DS_RMS.GetMean() - h_layer1_US_RMS.GetMean()) mean_layer2_diff_RMS.append(h_layer2_DS_RMS.GetMean() - h_layer2_US_RMS.GetMean()) mean_layer3_diff_RMS.append(h_layer3_DS_RMS.GetMean() - h_layer3_US_RMS.GetMean()) mean_layer4_diff_RMS.append(h_layer4_DS_RMS.GetMean() - h_layer4_US_RMS.GetMean()) mean_global_diff_RMS.append(h_global_DS_RMS.GetMean() - h_global_US_RMS.GetMean()) mean_err_layer1_diff_RMS.append( sqrt(h_layer1_DS_RMS.GetRMS()**2 + h_layer1_US_RMS.GetRMS()**2)) mean_err_layer2_diff_RMS.append( sqrt(h_layer2_DS_RMS.GetRMS()**2 + h_layer2_US_RMS.GetRMS()**2)) mean_err_layer3_diff_RMS.append( sqrt(h_layer3_DS_RMS.GetRMS()**2 + h_layer3_US_RMS.GetRMS()**2)) mean_err_layer4_diff_RMS.append( sqrt(h_layer4_DS_RMS.GetRMS()**2 + h_layer4_US_RMS.GetRMS()**2)) mean_err_global_diff_RMS.append( sqrt(h_global_DS_RMS.GetRMS()**2 + h_global_US_RMS.GetRMS()**2)) gr_layer1_US_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_layer1_US_RMS, bias_err_arr, mean_err_layer1_US_RMS) gr_layer2_US_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_layer2_US_RMS, bias_err_arr, mean_err_layer2_US_RMS) gr_layer3_US_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_layer3_US_RMS, bias_err_arr, mean_err_layer3_US_RMS) gr_layer4_US_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_layer4_US_RMS, bias_err_arr, mean_err_layer4_US_RMS) gr_global_US_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_global_US_RMS, bias_err_arr, mean_err_global_US_RMS) gr_layer1_DS_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_layer1_DS_RMS, bias_err_arr, mean_err_layer1_DS_RMS) gr_layer2_DS_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_layer2_DS_RMS, bias_err_arr, mean_err_layer2_DS_RMS) gr_layer3_DS_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_layer3_DS_RMS, bias_err_arr, mean_err_layer3_DS_RMS) gr_layer4_DS_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_layer4_DS_RMS, bias_err_arr, mean_err_layer4_DS_RMS) gr_global_DS_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_global_DS_RMS, bias_err_arr, mean_err_global_DS_RMS) gr_global_all_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_global_all_RMS, bias_err_arr, mean_err_global_DS_RMS) gr_layer1_diff_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_layer1_diff_RMS, bias_err_arr, mean_err_layer1_diff_RMS) gr_layer2_diff_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_layer2_diff_RMS, bias_err_arr, mean_err_layer2_diff_RMS) gr_layer3_diff_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_layer3_diff_RMS, bias_err_arr, mean_err_layer3_diff_RMS) gr_layer4_diff_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_layer4_diff_RMS, bias_err_arr, mean_err_layer4_diff_RMS) gr_global_diff_RMS = TGraphErrors(len(bias_arr), bias_arr, mean_global_diff_RMS, bias_err_arr, mean_err_global_diff_RMS) if (subtract_floor_term): gr_layer1_US_RMS = remove_floor_term_from_gr(gr_layer1_US_RMS) gr_layer2_US_RMS = remove_floor_term_from_gr(gr_layer2_US_RMS) gr_layer3_US_RMS = remove_floor_term_from_gr(gr_layer3_US_RMS) gr_layer4_US_RMS = remove_floor_term_from_gr(gr_layer4_US_RMS) gr_global_US_RMS = remove_floor_term_from_gr(gr_global_US_RMS) gr_layer1_DS_RMS = remove_floor_term_from_gr(gr_layer1_DS_RMS) gr_layer2_DS_RMS = remove_floor_term_from_gr(gr_layer2_DS_RMS) gr_layer3_DS_RMS = remove_floor_term_from_gr(gr_layer3_DS_RMS) gr_layer4_DS_RMS = remove_floor_term_from_gr(gr_layer4_DS_RMS) gr_global_DS_RMS = remove_floor_term_from_gr(gr_global_DS_RMS) gr_global_all_RMS = remove_floor_term_from_gr(gr_global_all_RMS) gr_layer1_diff_RMS = remove_floor_term_from_gr_diff( gr_layer1_US_RMS, gr_layer1_DS_RMS) gr_layer2_diff_RMS = remove_floor_term_from_gr_diff( gr_layer2_US_RMS, gr_layer2_DS_RMS) gr_layer3_diff_RMS = remove_floor_term_from_gr_diff( gr_layer3_US_RMS, gr_layer3_DS_RMS) gr_layer4_diff_RMS = remove_floor_term_from_gr_diff( gr_layer4_US_RMS, gr_layer4_DS_RMS) gr_global_diff_RMS = remove_floor_term_from_gr_diff( gr_global_US_RMS, gr_global_DS_RMS) gr_layer1_US_RMS.SetNameTitle( "gr_layer1_US_RMS", "Layer 1 Upstream;Bias (V);Pedestal Width (ADC units)") gr_layer2_US_RMS.SetNameTitle( "gr_layer2_US_RMS", "Layer 2 Upstream;Bias (V);Pedestal Width (ADC units)") gr_layer3_US_RMS.SetNameTitle( "gr_layer3_US_RMS", "Layer 3 Upstream;Bias (V);Pedestal Width (ADC units)") gr_layer4_US_RMS.SetNameTitle( "gr_layer4_US_RMS", "Layer 4 Upstream;Bias (V);Pedestal Width (ADC units)") gr_global_US_RMS.SetNameTitle( "gr_global_US_RMS", "ALL Upstream;Bias (V);Pedestal Width (ADC units)") gr_layer1_DS_RMS.SetNameTitle( "gr_layer1_DS_RMS", "Layer 1 Downstream;Bias (V);Pedestal Width (ADC units)") gr_layer2_DS_RMS.SetNameTitle( "gr_layer2_DS_RMS", "Layer 2 Downstream;Bias (V);Pedestal Width (ADC units)") gr_layer3_DS_RMS.SetNameTitle( "gr_layer3_DS_RMS", "Layer 3 Downstream;Bias (V);Pedestal Width (ADC units)") gr_layer4_DS_RMS.SetNameTitle( "gr_layer4_DS_RMS", "Layer 4 Downstream;Bias (V);Pedestal Width (ADC units)") gr_global_DS_RMS.SetNameTitle( "gr_global_DS_RMS", "ALL Downstream;Bias (V);Pedestal Width (ADC units)") gr_global_all_RMS.SetNameTitle( "gr_global_all_RMS", "ALL Channels;Bias (V);Pedestal Width (ADC units)") gr_layer1_diff_RMS.SetNameTitle( "gr_layer1_diff_RMS", "Layer 1 Downstream - Upstream;Bias (V);Pedestal Width Difference (ADC units)" ) gr_layer2_diff_RMS.SetNameTitle( "gr_layer2_diff_RMS", "Layer 2 Downstream - Upstream;Bias (V);Pedestal Width Difference (ADC units)" ) gr_layer3_diff_RMS.SetNameTitle( "gr_layer3_diff_RMS", "Layer 3 Downstream - Upstream;Bias (V);Pedestal Width Difference (ADC units)" ) gr_layer4_diff_RMS.SetNameTitle( "gr_layer4_diff_RMS", "Layer 4 Downstream - Upstream;Bias (V);Pedestal Width Difference (ADC units)" ) gr_global_diff_RMS.SetNameTitle( "gr_global_diff_RMS", "ALL Downstream - Upstream;Bias (V);Pedestal Width Difference (ADC units)" ) #Save results to file f_out = TFile(argv[0], "RECREATE") f_out.cd() gr_layer1_US_RMS.Write() gr_layer2_US_RMS.Write() gr_layer3_US_RMS.Write() gr_layer4_US_RMS.Write() gr_global_US_RMS.Write() gr_layer1_DS_RMS.Write() gr_layer2_DS_RMS.Write() gr_layer3_DS_RMS.Write() gr_layer4_DS_RMS.Write() gr_global_DS_RMS.Write() gr_global_all_RMS.Write() gr_layer1_diff_RMS.Write() gr_layer2_diff_RMS.Write() gr_layer3_diff_RMS.Write() gr_layer4_diff_RMS.Write() gr_global_diff_RMS.Write() for i in range(0, len(h_layer1_US_RMS_arr)): h_layer1_US_RMS_arr[i].Write() h_layer2_US_RMS_arr[i].Write() h_layer3_US_RMS_arr[i].Write() h_layer4_US_RMS_arr[i].Write() h_global_US_RMS_arr[i].Write() h_layer1_DS_RMS_arr[i].Write() h_layer2_DS_RMS_arr[i].Write() h_layer3_DS_RMS_arr[i].Write() h_layer4_DS_RMS_arr[i].Write() h_global_DS_RMS_arr[i].Write() h_global_all_RMS_arr[i].Write() f_out.Close() print("Done") return
def RunAnalysis(input_name, output_name=""): # Check output name, set by default if not passed to the function if not output_name: output_name = input_name.split("_")[0] + "_analysed.root" print("INPUT:", input_name, "\nOUTPUT:", output_name) # Configuring thresholds (thr_start, thr_end, thr_step) = (0.3, 8, 0.1) thr_range = np.arange(thr_start, thr_end + thr_step, thr_step) n_thr = len(thr_range) # Get hit dictionary hit_dict = GetHitDict(input_name) # Open root file to write the results into write_file = TFile("data/" + output_name, "recreate") write_file.cd() # Initialize efficiency and cluster size objects eff = TEfficiency("Efficiency", "Efficiency;Threshold [fC];Efficiency", n_thr, thr_start, thr_end) clus_graph = TGraphErrors(n_thr) clus_graph.SetNameTitle("Average_cluster_size", "Average_cluster_size") clus_graph.GetXaxis().SetTitle("Threshold [fC]") clus_graph.GetYaxis().SetTitle("Average cluster size") # Perform threshold scanning for thr in thr_range: thrE = thr * 6242.2 print("Threshold scanning:", round(thr / thr_end * 100, 1), "%", end="\r") # Scan a threshold and obtain a list of clusters cluster_list = ScanThreshold(hit_dict, thrE) # Analyze cluster list and fill TEfficiency object for cluster in cluster_list: eff.Fill(bool(cluster), thr) # Replace zeros with NaN to obtain proper average later cluster_list = [ np.nan if cluster == 0 else cluster for cluster in cluster_list ] try: # Calculate average cluster size and add points to clus_graph cluster_size = np.nanmean(cluster_list) N = np.count_nonzero(~np.isnan(cluster_list)) err = np.nanstd(cluster_list) / sqrt(N - 1) clus_graph.SetPoint(int(np.where(thr_range == thr)[0][0]), thr, cluster_size) clus_graph.SetPointError(int(np.where(thr_range == thr)[0][0]), ex=0, ey=err) except RuntimeWarning: pass print("\nDone.") # Efficiency fit fit_form = "0.5*[0]*TMath::Erfc((x-[1])/(TMath::Sqrt(2)*[2])*(1-0.6*TMath::TanH([3]*(x-[1])/TMath::Sqrt(2)*[2])))" fit_func = TF1("Efficiency_fit", fit_form, 0, 8) # Set initial parameter values and limits fit_func.SetParameters(1, 4, 1, 1, 0.5) fit_func.SetParLimits(1, 0, 5) fit_func.SetParLimits(2, 0, 2) fit_func.SetParLimits(3, 0, 2) fit_func.SetParLimits(4, 0.5, 0.7) # Perform fit eff.Fit(fit_func, "R") # Write outputs to file eff.Write() clus_graph.Write() # Collect info source = "allpix" angle = input_name.split("-")[0] descr = input_name.split("_")[0].strip("0deg-") n_events = str(int(eff.GetTotalHistogram().GetEntries() / n_thr)) title = source + "," + angle + "," + descr + "(" + str(n_events) + "ev)" vt50 = str(fit_func.GetParameter(1)) vt50_err = str(fit_func.GetParError(1)) thr_range = str(thr_start) + ":" + str(thr_end) + ":" + str(thr_step) # Write info to Info directory info_dir = write_file.mkdir("Info") info_dir.cd() info_dir.WriteObject(TString(source), "source") info_dir.WriteObject(TString(angle), "angle") info_dir.WriteObject(TString(descr), "descr") info_dir.WriteObject(TString(n_events), "n_events") info_dir.WriteObject(TString(title), "title") info_dir.WriteObject(TString(vt50), "vt50") info_dir.WriteObject(TString(vt50), "vt50_err") info_dir.WriteObject(TString(thr_range), "thr_range") write_file.Close()
RecSA_MinMax_graph.SetPointError(count, 0, tmp_RecSA_M.std()) RecSA_Quantiles_graph.SetPoint(count, RealSignalAmplitude, mean_RecSA_Q) RecSA_Quantiles_graph.SetPointError(count, 0, tmp_RecSA_Q.std()) ghost_graph.SetPoint(count, RealSignalAmplitude, tmp_Ghosts.mean()) minimas_graph.SetPoint(count, RealSignalAmplitude, tmp_Minimas.mean()) count += 1 canvas = TCanvas("canvas", "canvas") canvas.SetGrid() pad = canvas.GetPad(0) success_graph.SetNameTitle( "success", "MC Performance Analysis Result ({0} Hits)".format(hits)) success_graph.GetXaxis().SetTitle("Relative Real Signal Amplitude") success_graph.GetYaxis().SetTitle( "Peak Finding Efficiency | Reconstructed Signal Amplitude") success_graph.GetYaxis().SetRangeUser(0, 1.1) success_graph.Draw("ALP*") ghost_graph.SetNameTitle("ghost_graph", "Ghost Peaks") ghost_graph.Draw("SAME LP*") func = TF1("func", "x", 0, 1) func.Draw("SAME") RecSA_Quantiles_graph.SetMarkerColor(ROOT.kRed) RecSA_Quantiles_graph.Draw("SAME P*") PngSaveName = PerfResults + foldername + ( "MC_PerformanceAnalysis_{0}_" + peaks + "_{1}rep_" + str(binning) + "_" +