def B_Sub_Work(hist_name, it, b_num, count): hist_o = gROOT.FindObject(hist_name) s = TSpectrum() B_hist = s.Background(hist_o, it, "same") b_x = [] b_count = [] nb_x = [] nb_count = [] b_x_max = B_hist.GetNbinsX() i = 0 while i < b_x_max: #Iterates over all the bins getting their content b_count.append(B_hist.GetBinContent(i)) b_x.append(i) i += 1 nb_x_max = hist_o.GetNbinsX() j = 0 while j < nb_x_max: #Iterates over all the bins getting their content nb_count.append(hist_o.GetBinContent(j)) nb_x.append(j) j += 1 k = 0 while k < len(b_x): count.append(nb_count[k] - b_count[k]) b_num.append(k) k += 1
def find_maxima(h, n=3, sigma=2, sort_x=False): s = TSpectrum(n) n = s.Search(h, sigma) # return how many maxima were found v = array([ frombuffer(getattr(s, 'GetPosition{}'.format(i))(), dtype='d', count=n) for i in ['X', 'Y'] ]).T return v[v[:, 0].argsort()] if sort_x else v
def __init__(self, nomefile, sigma): n, nstep, step = [int(i) for i in nomefile.split(".")[0].split("_")] self.nfenditure = n self.canvas = TCanvas("canvas{0}".format(n), "canvas {0} fenditure".format(n)) self.isto = TH1F("isto{0}".format(n), "{0} fenditure;bin ;-log(intensity)".format(n), nstep, -nstep / 2 * step / 100, (nstep / 2 + 1) * step / 100) self.isto.SetStats(0) self.file2isto(nomefile) self.spectrum = TSpectrum() self.spectrum.Search(self.isto, sigma, "nobackground", 0.01) self.peaks = [ self.spectrum.GetPositionX()[i] for i in range(self.spectrum.GetNPeaks()) ]
class TrovaPicchi: def __init__(self, nomefile, sigma): n, nstep, step = [int(i) for i in nomefile.split(".")[0].split("_")] self.nfenditure = n self.canvas = TCanvas("canvas{0}".format(n), "canvas {0} fenditure".format(n)) self.isto = TH1F("isto{0}".format(n), "{0} fenditure;bin ;-log(intensity)".format(n), nstep, -nstep / 2 * step / 100, (nstep / 2 + 1) * step / 100) self.isto.SetStats(0) self.file2isto(nomefile) self.spectrum = TSpectrum() self.spectrum.Search(self.isto, sigma, "nobackground", 0.01) self.peaks = [ self.spectrum.GetPositionX()[i] for i in range(self.spectrum.GetNPeaks()) ] def file2isto(self, nomefile): file = open(nomefile) for line in file: bin, content = [float(i) for i in line.split()] self.isto.Fill(bin / 100, -content + 500) file.close()
def get_gpeaks(h, lrange=[0, 100], sigma=1, opt="", thres=0.01, niter=10): s = TSpectrum(niter, 3) h.GetXaxis().SetRangeUser(lrange[0], lrange[1]) npeaks = s.Search(h, sigma, opt, thres) bufX, bufY = s.GetPositionX(), s.GetPositionY() pos = [] for i in range(s.GetNPeaks()): pos.append([bufX[i], bufY[i]]) print pos pos.sort() return npeaks, pos
hSum.SetBinContent(i + 1, tJ, hList[i].GetBinContent(j)) return hSum #if not os.path.isfile(os.path.join(HIST_BASE,'all_IC_EdE.root')): run_hists(HIST_BASE, 'hist', 'bkgd', 'IC_EdE', 'all_IC_EdE') pX, pY = make_projections(os.path.join(HIST_BASE, 'all_IC_EdE.root'), 'IC_EdE') # make summary spectra sDE = make_summary_spectra(pX, 'ic_dE_raw', 'ic dE summary') sE = make_summary_spectra(pY, 'ic_E_raw', 'ic E summary') # now find the peaks and plot them s = TSpectrum(10) r = [] pDE, pE = [], [] for h in pX: if 'bkgd' not in h.GetName(): h.SetAxisRange(1000, 3000, 'X') nFound = s.Search(h, 2, "goff", 0.05) xpeak = s.GetPositionX()[0] hK = int((h.GetName()).replace('hist', '').replace('_IC_EdE_px', '')) r.append(hK) pDE.append(xpeak) for h in pY: if 'bkgd' not in h.GetName(): h.SetAxisRange(1000, 1500, 'X') nFound = s.Search(h, 2, "goff", 0.05) xpeak = s.GetPositionX()[0]
def FindPeaksInHistogram(Hist, MinimumSeparation=500): "Find peaks in spectrum using TSpectrum. This scans entire histogram in specified window width" HistMin = Hist.GetXaxis().GetXmin() HistMax = Hist.GetXaxis().GetXmax() # Get 2 most prom peaks to set the width s = TSpectrum(20) s.Search(Hist, 10, '', 0.05) if (s.GetNPeaks() < 1): print 'ERROR: did not find 1 peak in setting width' return print 'Found initial peaks for', Hist.GetName(), s.GetNPeaks() PeaksX = s.GetPositionX() lowestX = PeaksX[0] lowestI = 0 for i in range(s.GetNPeaks()): x = PeaksX[i] if x < lowestX: lowestX = x lowestI = i Width = 2.0 * PeaksX[lowestI] * 0.9 print 'For width using peak and witdh: ', PeaksX[lowestI], Width NScans = int(2 * (HistMax - HistMin) / Width) StepSize = Width / 2 Peaks = dict() PeaksArray = [] for i in range(NScans): Start = HistMin + i * StepSize Stop = Start + Width Hist.GetXaxis().SetRangeUser(Start, Stop) s = TSpectrum(5) s.Search(Hist, 2, '', 0.35) N = s.GetNPeaks() PeaksX = s.GetPositionX() PeaksY = s.GetPositionY() print 'Range', Start, Stop, N for j in range(N): AddThisPeak = True for key in Peaks: if abs(PeaksX[j] - key) < MinimumSeparation: AddThisPeak = False break if AddThisPeak: Peaks[PeaksX[j]] = PeaksY[j] PeaksArray.append(PeaksX[j]) # Set range back to starting point Hist.GetXaxis().SetRangeUser(HistMin, HistMax) # Peaks array needs to be sorted PeaksArray.sort() # If there are zero or 1 peaks return if (len(Peaks.keys()) <= 1): return Peaks # Take peaks only from odd harmonics using first two points as reference FirstDiff = PeaksArray[1] - PeaksArray[0] OddPeaksArray = [PeaksArray[0]] for i in range(1, len(PeaksArray)): if (abs(PeaksArray[i] - OddPeaksArray[-1] - FirstDiff) < 0.10 * FirstDiff): OddPeaksArray.append(PeaksArray[i]) # Get a dict with only odd peaks OddPeaks = dict() for peak in OddPeaksArray: OddPeaks[peak] = Peaks[peak] print 'Integral', Hist.Integral() * 1.602E-19 return OddPeaks
def plot_sequence(df, respondents, Events, variable, phasic=False): for evt in Events: # ROOT hist for calculating phsic component summed_df = df.loc[respondents[0]] summed_df = summed_df.loc[summed_df['tag__info_StudioEventData'] == evt][['position', variable]] #tags might occour at different positions if there is a user defined wait between movies - thus the first position in the sequence should be set to zero for plotting. summed_df['position'] = (summed_df['position'] - summed_df['position'].min()) / 1000. maxbin = int(summed_df['position'].max() / 2) * 2 summed_df = summed_df.set_index('position') ax = summed_df.plot(cmap=sns.cubehelix_palette(as_cmap=True), label="first", legend=True, title=evt + variable) if variable == definitions.eda_data: # add constant to avoid negative values: #summed_df[variable] = summed_df[variable] + definitions.constanttoadd # normalise to an integral of 1: #summed_df[variable] = summed_df[variable] / summed_df[variable].sum() #max is per sequence and first bin is set to zero, so that max i length of given sequence if phasic: rawedahist = TH1F("rawedahist", "sum of normalized EDA per sequence", maxbin + 1, 0, maxbin) rawedahistentries = TH1F("rawedahistentries", "rawedahistentries", maxbin + 1, 0, maxbin) phasichist = TH1F("phasichist", "phasic part per sequence", maxbin + 1, 0, maxbin) tonichist = TH1F("tonichist", "tonic part per sequence", maxbin + 1, 0, maxbin) # make sure errors are sum of squares of weights - well actually dont as hists are 'added' #phasichist.Sumw2() #tonichist.Sumw2() for ix in summed_df.index: rawedahist.Fill(ix, summed_df[variable][ix]) rawedahistentries.Fill(ix) for ix in range(rawedahist.GetNbinsX() + 1): # add constant to avoid negative values: content = rawedahist.GetBinContent( ix) / rawedahistentries.GetBinContent( ix) if rawedahistentries.GetBinContent( ix) > 0 else 0 rawedahist.SetBinContent( ix, content + definitions.constanttoadd) # rawedahist.GetXaxis().SetRangeUser(summed_df.index[1],summed_df.index[-2]) # normalise to an integral of 1: rawedahist.Scale(1. / rawedahist.Integral()) s = TSpectrum() bg = s.Background(rawedahist, definitions.bgiter, "Compton same") tonichist.Add(bg) phasichist.Add(rawedahist) phasichist.Add(bg, -1) #loop over respondents if len(respondents) > 1: for i in range(1, len(respondents)): df1 = df.loc[respondents[i]] df1 = df1.loc[df1['tag__info_StudioEventData'] == evt][[ 'position', variable ]] df1['position'] = (df1['position'] - df1['position'].min()) / 1000. df1 = df1.set_index('position') df1.plot(label=respondents[i].decode('latin'), ax=ax, legend=True, title=evt + variable) #, c=i/len(respondents)) #resp=respondents[i] if phasic: if variable == definitions.eda_data: hist = TH1F("loophist", "loophisthist", maxbin + 1, 0, maxbin) histunweighted = TH1F("loophist", "loophisthist", maxbin + 1, 0, maxbin) # add constant to avoid negative values: #df1[variable] = df1[variable] + definitions.constanttoadd # normalise to an integral of 1: #df1[variable] = df1[variable] / df1[variable].sum() for ix in df1.index: hist.Fill(ix, df1[variable][ix]) histunweighted.Fill(ix) for ix in range(hist.GetNbinsX() + 1): # add constant to avoid negative values: content = hist.GetBinContent( ix) / histunweighted.GetBinContent( ix) if histunweighted.GetBinContent( ix) > 0 else 0 hist.SetBinContent( ix, content + definitions.constanttoadd) ''' # add constant to handle negative values. BSS calibrates EDA signal but can introduce a systematic error that sets the lowest arrousal level as negative. #print hist.Integral() #print df1.head() lastcontent=hist.GetBinContent(0) / histunweighted.GetBinContent(0) if histunweighted.GetBinContent(0)>0 else 0 for ix in range(hist.GetNbinsX()+1): #print ix #print hist.GetBinContent(ix) print ix print 'ix above, bincontent and entries:' print hist.GetBinContent(ix) print histunweighted.GetBinContent(ix) content = hist.GetBinContent(ix) / histunweighted.GetBinContent(ix) if histunweighted.GetBinContent(ix)>0 else 0 print 'content:' + str(content) print 'lastcontent:' + str(lastcontent) if ix < hist.GetNbinsX()-1: nextcontent = hist.GetBinContent(ix+1) / histunweighted.GetBinContent(ix+1) if histunweighted.GetBinContent(ix+1)>0 else 0 if lastcontent != 0 and math.fabs(content - lastcontent)/math.fabs(lastcontent)>0.2 and content > lastcontent and content > nextcontent: test=0 lastcontent = hist.GetBinContent(ix) / histunweighted.GetBinContent(ix) if histunweighted.GetBinContent(ix)>0 else 0 hist.SetBinContent(i, content) ''' hist.Scale(1. / hist.Integral()) #print hist.Integral() cmark = TCanvas("c", "c", 1200, 800) cmark.cd() hist.Draw() s = TSpectrum() np = s.Search(hist, definitions.sigmapeaksinterval, "noMarkov same nobackground", definitions.peakamplitude) bg = s.Background(hist, definitions.bgiter, "Compton same") cmark.Update() cmark.SaveAs('./out/respondents/PhasicModelling_' + respondents[i] + '_' + evt + '_respondents.png') cmark.SaveAs('./out/respondents/PhasicModelling_' + respondents[i] + '_' + evt + '_respondents.root') rawedahist.Add(hist) tonichist.Add(bg) hist.Add(bg, -1) phasichist.Add(hist) #phasichist.Draw() #cmark.Update() #cmark.Close() cmark.Close() del hist #cmark.Close() #add constant to avoid negative values: #df1[variable] = df1[variable] + definitions.constanttoadd #normalise to an integral of 1: #df1[variable] = df1[variable]/df1[variable].sum() summed_df = summed_df.add(df1, fill_value=0) summed_df[variable] = summed_df[variable] / len(respondents) test = 0 if phasic: cmark = TCanvas("c", "c", 1200, 800) cmark.cd() tonichist.Draw() cmark.Update() cmark.SaveAs('./out/TonicEda_' + evt + '_respondents.png') cmark.SaveAs('./out/rootfiles/TonicEda_' + evt + '_respondents.root') tonichist.SaveAs('./out/rootfiles/TonicEdaHist_' + evt + '_respondents.root') print 'tonic mean of sequence: ' + str(evt) + str( tonichist.GetMean()) cmark.cd() phasichist.Draw() cmark.Update() cmark.SaveAs('./out/PhasicEda_' + evt + '_respondents.png') cmark.SaveAs('./out/rootfiles/PhasicEda_' + evt + '_respondents.root') phasichist.SaveAs('./out/rootfiles/PhasicEdaHist_' + evt + '_respondents.root') print 'phasic mean of sequence: ' + str(evt) + str( phasichist.GetMean()) rawedahist.Draw() cmark.Update() cmark.SaveAs('./out/RawEda_' + evt + '_respondents.png') cmark.SaveAs('./out/rootfiles/RawEda_' + evt + '_respondents.root') rawedahist.SaveAs('./out/rootfiles/RawEdaHist_' + evt + '_respondents.root') print 'raw mean of sequence: ' + str(evt) + str( phasichist.GetMean()) if definitions.sequence_splitting.has_key(evt): try: nx = len(definitions.sequence_names[evt]) meanphasic = TH1F("meanphasic", "mean phasic EDA per sequence", nx, 0, nx) meaneda = TH1F("meaneda", "mean EDA per sequence", nx, 0, nx) for i in range(1, nx + 1): meanphasic.GetXaxis().SetBinLabel( i, definitions.sequence_names[evt][i - 1]) meaneda.GetXaxis().SetBinLabel( i, definitions.sequence_names[evt][i - 1]) for irange in range( 1, len(definitions.sequence_splitting[evt])): min = definitions.sequence_splitting[evt][irange - 1] max = definitions.sequence_splitting[evt][irange] meanphasic.Fill( definitions.sequence_names[evt][irange - 1], phasichist.Integral( phasichist.GetXaxis().FindBin(min), phasichist.GetXaxis().FindBin(max)) / float(max - min)) meaneda.Fill( definitions.sequence_names[evt][irange - 1], rawedahist.Integral( rawedahist.GetXaxis().FindBin(min), rawedahist.GetXaxis().FindBin(max)) / float(max - min)) except IndexError, e: test = 0 cmark.cd() meanphasic.Draw() meanphasic.SaveAs('./out/rootfiles/PhasicEdaHist_splitMean_' + evt + '.root') cmark.Update() cmark.SaveAs('./out/PhasicEda_splitMean_' + evt + '.png') cmark.SaveAs('./out/rootfiles/PhasicEda_splitMean_' + evt + '.root') meaneda.Draw() cmark.Update() cmark.SaveAs('./out/RawEda_splitMean_' + evt + '.png') cmark.SaveAs('./out/rootfiles/RawEda_splitMean_' + evt + '.root') meaneda.SaveAs('./out/rootfiles/RawEdaHist_splitMean_' + evt + '.root') cmark.Close() #del tonichist,phasichist,rawedahist ax.legend([x.decode('latin') for x in respondents]) fig = ax.get_figure() fig.set_size_inches(40, 10, forward=True) fig.savefig('./out/' + variable + evt + '_respondents.png') #h = Hist(10, 0, 1, name="some name", title="some title") #ax_sum = sns.regplot(x=summed_df.index.values, y=summed_df[variable].values, x_bins=summed_df.index.b(), fit_reg=None) #from https://stackoverflow.com/questions/23709403/plotting-profile-hitstograms-in-python plt.close() #if variable==definitions.eda_data: #3 sec bins fig1, ax_sum = plt.subplots(nrows=1) functions.Profile(x=summed_df.index.values, y=summed_df[variable].values, nbins=int(summed_df.index.max() / 3), xmin=summed_df.index.min(), xmax=summed_df.index.max(), ax=ax_sum, variable=variable) #fig = ax_sum.get_figure() fig1.set_size_inches(40, 10, forward=True) fig1.savefig('./out/' + variable + evt + '_summed_3secbins.png') #summed_df[variable] = summed_df[variable].mean() #ax_mean = summed_df.plot(cmap=sns.cubehelix_palette(as_cmap=True), title=evt, legend=True) #fig = ax_mean.get_figure() #fig.savefig('./out/'+variable + evt + '_average.png') plt.close() # 10 sec bins fig1, ax_sum = plt.subplots(nrows=1) functions.Profile(x=summed_df.index.values, y=summed_df[variable].values, nbins=int(summed_df.index.max() / 10), xmin=summed_df.index.min(), xmax=summed_df.index.max(), ax=ax_sum, variable=variable) # fig = ax_sum.get_figure() fig1.set_size_inches(40, 10, forward=True) fig1.savefig('./out/' + variable + evt + '_summed_10secbins.png') plt.close() fig1, ax_sum = plt.subplots(nrows=1) functions.Profile(x=summed_df.index.values, y=summed_df[variable].values, nbins=int(summed_df.index.max()), xmin=summed_df.index.min(), xmax=summed_df.index.max(), ax=ax_sum, variable=variable) # fig = ax_sum.get_figure() fig1.set_size_inches(40, 10, forward=True) fig1.savefig('./out/' + variable + evt + '_summed.png') maxbin = int(summed_df.index.max()) minbin = int(summed_df.index.min()) variablehist = TH1F("variablehist", "variablehist", int(maxbin - minbin), minbin, maxbin) variablehistentries = TH1F("variablehistentries", "variablehistentries", int(maxbin - minbin), minbin, maxbin) for ind in summed_df.index: variablehist.Fill(ind, summed_df[variable][ind]) variablehistentries.Fill(ind) for ibin in range(variablehist.GetNbinsX() + 1): content = variablehist.GetBinContent( ibin) / variablehistentries.GetBinContent( ibin) if variablehistentries.GetBinContent(ibin) > 0 else 0 variablehistentries.SetBinContent(ibin, content) cmark = TCanvas("c", "c", 1200, 800) cmark.cd() variablehist.Draw() cmark.Update() variablehist.SaveAs('./out/' + variable + evt + '_summed.root') cmark.SaveAs('./out/' + variable + evt + '_summedcanvas.root') del variablehist # summed_df[variable] = summed_df[variable].mean() # ax_mean = summed_df.plot(cmap=sns.cubehelix_palette(as_cmap=True), title=evt, legend=True) # fig = ax_mean.get_figure() # fig.savefig('./out/'+variable + evt + '_average.png') plt.close() del fig, fig1
def __init__(self): self._f_map = ROOT.TMap() self._gaussian_f1 = TF1('gauss_f1', '[0]*TMath::Gaus(x,[1],[2],1)', 0, 256, 3) self._f_map.Add(ROOT.TObjString("gauss_f1"), self._gaussian_f1) self._gaussian_f1.SetNpx(2560) self._gaussian_f1.SetTitle("Gaussian") self._gaussian_f1.SetMarkerColor(ROOT.kRed) self._gaussian_f1.SetLineColor(ROOT.kRed) self._gaussian_f1_extended = TF1( 'gauss_f1_ext', #'[0]*TMath::Gaus(x,[1],[2],0)', '[0]*TMath::Gaus(x,[1],[2],1)*(1-[3]*1/[2]**4*(x-[1]-[2])*(x-[1]+[2]))', 0, 256, 4) self._gaussian_f1_extended.SetTitle("Extended_Gaussian") self._gaussian_f1_extended.SetNpx(2560) self._gaussian_f1_extended.SetMarkerColor(ROOT.kGreen) self._gaussian_f1_extended.SetLineColor(ROOT.kGreen) self._f_map.Add(ROOT.TObjString("gauss_f1_ext"), self._gaussian_f1_extended) self._erf_f1 = TF1('Erf_f1', '[0]*.5*(1+TMath::Erf(([1]-x)/[2]))', 0, 256, 3) self._erf_f1.SetNpx(2560) self._erf_f1.SetTitle("Erf") self._erf_f1.SetMarkerColor(ROOT.kOrange) self._erf_f1.SetLineColor(ROOT.kOrange) self._f_map.Add(ROOT.TObjString("Erf_f1"), self._erf_f1) self._s = TSpectrum() self._threshold = 2 self._initial_expt_signal = 0.0025 self._run_no = ROOT.std.string() self._Noise_Norm = 0 self._Noise_Mean = 0 self._Noise_Sigma = 0 self._Noise_Chisqrndf = 0 self._Noise_Fit_Error = 0 self._Found_Signal = 0 self._Signal_Norm = 0 self._Signal_Mean = 0 self._Signal_Sigma = 0 self._Signal_Chisqrndf = 0 self._Signal_Fit_Error = 0 self._Err_Noise_Norm = 0 self._Err_Noise_Mean = 0 self._Err_Noise_Sigma = 0 self._Err_Signal_Norm = 0 self._Err_Signal_Mean = 0 self._Err_Signal_Sigma = 0 self._Channel_No = 0 self._Result_Dict = [] self._build_dict() self._tree = TTree('datatree', 'datatree') self._f_map.Add(ROOT.TObjString("tree"), self._tree) self._treevars = {} self._initialize_ttree()
class MaPSA_fitter: def __init__(self): self._f_map = ROOT.TMap() self._gaussian_f1 = TF1('gauss_f1', '[0]*TMath::Gaus(x,[1],[2],1)', 0, 256, 3) self._f_map.Add(ROOT.TObjString("gauss_f1"), self._gaussian_f1) self._gaussian_f1.SetNpx(2560) self._gaussian_f1.SetTitle("Gaussian") self._gaussian_f1.SetMarkerColor(ROOT.kRed) self._gaussian_f1.SetLineColor(ROOT.kRed) self._gaussian_f1_extended = TF1( 'gauss_f1_ext', #'[0]*TMath::Gaus(x,[1],[2],0)', '[0]*TMath::Gaus(x,[1],[2],1)*(1-[3]*1/[2]**4*(x-[1]-[2])*(x-[1]+[2]))', 0, 256, 4) self._gaussian_f1_extended.SetTitle("Extended_Gaussian") self._gaussian_f1_extended.SetNpx(2560) self._gaussian_f1_extended.SetMarkerColor(ROOT.kGreen) self._gaussian_f1_extended.SetLineColor(ROOT.kGreen) self._f_map.Add(ROOT.TObjString("gauss_f1_ext"), self._gaussian_f1_extended) self._erf_f1 = TF1('Erf_f1', '[0]*.5*(1+TMath::Erf(([1]-x)/[2]))', 0, 256, 3) self._erf_f1.SetNpx(2560) self._erf_f1.SetTitle("Erf") self._erf_f1.SetMarkerColor(ROOT.kOrange) self._erf_f1.SetLineColor(ROOT.kOrange) self._f_map.Add(ROOT.TObjString("Erf_f1"), self._erf_f1) self._s = TSpectrum() self._threshold = 2 self._initial_expt_signal = 0.0025 self._run_no = ROOT.std.string() self._Noise_Norm = 0 self._Noise_Mean = 0 self._Noise_Sigma = 0 self._Noise_Chisqrndf = 0 self._Noise_Fit_Error = 0 self._Found_Signal = 0 self._Signal_Norm = 0 self._Signal_Mean = 0 self._Signal_Sigma = 0 self._Signal_Chisqrndf = 0 self._Signal_Fit_Error = 0 self._Err_Noise_Norm = 0 self._Err_Noise_Mean = 0 self._Err_Noise_Sigma = 0 self._Err_Signal_Norm = 0 self._Err_Signal_Mean = 0 self._Err_Signal_Sigma = 0 self._Channel_No = 0 self._Result_Dict = [] self._build_dict() self._tree = TTree('datatree', 'datatree') self._f_map.Add(ROOT.TObjString("tree"), self._tree) self._treevars = {} self._initialize_ttree() def Print_Result_Dict(self): for k, v in self._Result_Dict: print k, v def Return_Result_Dict(self): return self._Result_Dict def Find_signal_in_res(self, h, h1, minimum): upper_lim = h.GetBinCenter(h.GetNbinsX()) h.SetAxisRange(minimum, upper_lim, "X") maximum = h.GetMaximum() maximum_x = h.GetBinCenter(h.GetMaximumBin()) tmpbin = h.FindLastBinAbove(abs(h.GetMaximum()) / 2) bin1 = tmpbin if (tmpbin > 0) else 0 half_max = h.GetBinCenter(bin1) bin2 = h.FindLastBinAbove(h.GetMaximum() / 4) q_max = h.GetBinCenter(bin2) sigma = abs(q_max - half_max) * 2 tmp_range = [maximum_x + self._threshold, upper_lim] if (half_max - maximum_x - self._threshold > 0): print "Signal peak found" self._Found_Signal = 1 tmp = self._fit_erf(h1, tmp_range, self._initial_expt_signal, half_max, sigma) self._Signal_Norm = tmp[0] self._Signal_Mean = tmp[1] self._Signal_Sigma = tmp[2] self._Err_Signal_Norm = tmp[3] self._Err_Signal_Mean = tmp[4] self._Err_Signal_Sigma = tmp[5] self._Signal_Chisqrndf = tmp[6] self._Signal_Fit_Error = tmp[7] def Find_signal(self, h, channel, initial_expt_signal=0.0025, threshold=2): self._reset() self._initial_expt_signal = initial_expt_signal self._threshold = threshold self._Channel_No = channel fwhm = 10 bin1 = h.FindFirstBinAbove(h.GetMaximum() / fwhm) bin2 = h.FindLastBinAbove(h.GetMaximum() / fwhm) tmp_norm = h.GetMaximum() / 2 tmp_mean = h.GetBinCenter(h.GetMaximumBin()) tmp_sigma = 2.3548 * (bin2 - bin1) tmp_range = [h.GetBinCenter(bin1) - .5, h.GetBinCenter(bin2) + .5] results = self._fit_gauss(h, tmp_range, tmp_norm, tmp_mean, tmp_sigma) self._Noise_Norm = results[0] self._Noise_Mean = results[1] self._Noise_Sigma = results[2] self._Err_Noise_Norm = results[3] self._Err_Noise_Mean = results[4] self._Err_Noise_Sigma = results[5] self._Noise_Chisqrndf = results[6] self._Noise_Fit_Error = results[7] if results[7] == 0: #self._Noise_Chisqrndf=results[6] resid_hist = self._residual_hist(h, self._gaussian_f1, 1) resid_hist1 = self._residual_hist(h, self._gaussian_f1, 0) #resid_hist=self._residual_hist(h,self._gaussian_f1_extended,1) self.Find_signal_in_res( resid_hist, resid_hist1, self._Err_Noise_Mean + self._Err_Noise_Sigma) if self._Found_Signal > 0: self.Write_signal(resid_hist1, resid_hist, self._gaussian_f1, self._erf_f1, channel) self._build_dict() self._fill_tree() def Set_run_no(self, run_no): self._run_no.replace(0, ROOT.std.string.npos, str(run_no)) def Make_dirs(self): #print ROOT.gDirectory.GetPathStatic() ROOT.gDirectory.mkdir("Signal") def Write_signal(self, resid, resid_norm, gaussian_tf1, erf_tf1, channel): ROOT.gDirectory.cd("Signal") pixel = str(channel).zfill(3) resid.Write(pixel + "_resid") resid_norm.Write(pixel + "_resid_norm") #gaussian_tf1.Write(pixel+"_resid_gaussian_tf1") #tmperf=erf_tf1.Clone() #tmperf.Write(pixel+"_resid_erf_tf1") ROOT.gDirectory.cd("..") def Write_tree(self): self._tree.Write('tree', ROOT.TObject.kOverwrite) self._f_map.DeleteAll() def _initialize_ttree(self): for k, v in self._Result_Dict: self._treevars[k] = array.array('f', [0]) for key in self._treevars.keys(): self._tree.Branch(key, self._treevars[key], key + "[1]/f") self._tree.Branch('FILENAME', self._run_no) def _reset(self): self._Noise_Norm = 0 self._Noise_Mean = 0 self._Noise_Sigma = 0 self._Noise_Chisqrndf = 0 self._Noise_Fit_Error = 0 self._Found_Signal = 0 self._Signal_Norm = 0 self._Signal_Mean = 0 self._Signal_Sigma = 0 self._Signal_Chisqrndf = 0 self._Signal_Fit_Error = 0 self._Err_Noise_Norm = 0 self._Err_Noise_Mean = 0 self._Err_Noise_Sigma = 0 self._Err_Signal_Norm = 0 self._Err_Signal_Mean = 0 self._Err_Signal_Sigma = 0 self._Channel_No = 0 def _build_dict(self): _Keys = [ 'Noise_Norm', 'Noise_Mean', 'Noise_Sigma', 'Noise_Chisqrndf', 'Noise_Fit_Error', 'Err_Noise_Norm', 'Err_Noise_Mean', 'Err_Noise_Sigma', 'Signal_Norm', 'Signal_Mean', 'Signal_Sigma', 'Signal_Chisqrndf', 'Signal_Fit_Error', 'Err_Signal_Norm', 'Err_Signal_Mean', 'Err_Signal_Sigma', 'Found_Signal', 'Channel_No' ] _Values = [ self._Noise_Norm, self._Noise_Mean, self._Noise_Sigma, self._Noise_Chisqrndf, self._Noise_Fit_Error, self._Err_Noise_Norm, self._Err_Noise_Mean, self._Err_Noise_Sigma, self._Signal_Norm, self._Signal_Mean, self._Signal_Sigma, self._Signal_Chisqrndf, self._Signal_Fit_Error, self._Err_Signal_Norm, self._Err_Signal_Mean, self._Err_Signal_Sigma, self._Found_Signal, self._Channel_No ] self._Result_Dict = zip(_Keys, _Values) def _fill_tree(self): for k, v in self._Result_Dict: self._treevars[k][0] = v self._tree.Fill() def _get_peaks(self, h, lrange=[0, 100], sigma=1, opt="goff", thres=0.01, niter=10): self._s.SetDeconIterations(niter) h.GetXaxis().SetRangeUser(lrange[0], lrange[1]) npeaks = self._s.Search(h, sigma, opt, thres) bufX, bufY = self._s.GetPositionX(), self._s.GetPositionY() pos = [] for i in range(self._s.GetNPeaks()): pos.append([bufX[i], bufY[i]]) print pos pos.sort() return npeaks, pos def _fit_gauss(self, h, lrange=[0, 100], norm=1, mean=1, sigma=1): self._gaussian_f1.SetParameters(norm, mean, sigma) #self._gaussian_f1_extended.SetParameters(norm,mean,sigma) self._gaussian_f1.SetRange(lrange[0], lrange[1]) #self._gaussian_f1_extended.SetRange(lrange[0],lrange[1]) #fit_ptr=h.Fit(self._gaussian_f1,'rWL0q+','',lrange[0],lrange[1]) fit_ptr = h.Fit(self._gaussian_f1, 'r0q', '', lrange[0], lrange[1]) fit_error = ROOT.gMinuit.GetStatus() #h.Fit(self._gaussian_f1_extended,'r0q','',lrange[0],lrange[1]) norm = self._gaussian_f1.GetParameter(0) mean = self._gaussian_f1.GetParameter(1) sigma = self._gaussian_f1.GetParameter(2) err_norm = self._gaussian_f1.GetParError(0) err_mean = self._gaussian_f1.GetParError(1) err_sigma = self._gaussian_f1.GetParError(2) chisquare_ndf = 0 if (self._erf_f1.GetChisquare() > 0 and self._erf_f1.GetNDF() > 0): chisquare_ndf = self._erf_f1.GetChisquare() / self._erf_f1.GetNDF() return norm, mean, sigma, err_norm, err_mean, err_sigma, chisquare_ndf, fit_error def _fit_erf(self, h, lrange=[0, 100], norm=1, mean=1, sigma=1): self._erf_f1.SetParameters(norm, mean, sigma) self._erf_f1.SetRange(lrange[0], lrange[1]) #h.Fit(self._erf_f1,'rWL0q+','',lrange[0],lrange[1]) h.Fit(self._erf_f1, 'r0q+', '', lrange[0], lrange[1]) fit_error = ROOT.gMinuit.GetStatus() norm = self._erf_f1.GetParameter(0) mean = self._erf_f1.GetParameter(1) sigma = self._erf_f1.GetParameter(2) err_norm = self._erf_f1.GetParError(0) err_mean = self._erf_f1.GetParError(1) err_sigma = self._erf_f1.GetParError(2) chisquare_ndf = 0 if (self._erf_f1.GetChisquare() > 0 and self._erf_f1.GetNDF() > 0): chisquare_ndf = self._erf_f1.GetChisquare() / self._erf_f1.GetNDF() return norm, mean, sigma, err_norm, err_mean, err_sigma, chisquare_ndf, fit_error def _residual_hist(self, h, fitfunc, knormalize): resid_hist = h.Clone() resid_hist.Reset() resid_hist.SetName(fitfunc.GetTitle()) if (knormalize == 0): resid_hist.SetTitle(str("Residuals_" + fitfunc.GetTitle())) else: resid_hist.SetTitle(str("Residuals_" + fitfunc.GetTitle())) resid_hist.SetMarkerColor(fitfunc.GetMarkerColor()) resid_hist.SetLineColor(fitfunc.GetLineColor()) tmp_arr = np.zeros((h.GetSize(), ), dtype=np.double) buffer = array.array('f', tmp_arr) #print str(buffer) #buffer=[] if (knormalize == 0): for i in range(0, h.GetSize()): res = h.GetBinContent(i) - fitfunc.Eval(h.GetBinCenter(i)) res_err = h.GetBinError(i) resid_hist.SetBinContent(i, res) resid_hist.SetBinError(i, res_err) if (knormalize == 1): for i in range(0, h.GetSize()): res = h.GetBinContent(i) - fitfunc.Eval(h.GetBinCenter(i)) res_err = h.GetBinError(i) if (res_err > 0): resid_hist.SetBinContent(i, res / res_err) resid_hist.SetBinError(i, res_err) return resid_hist