def showDecay(): tree1 = TTree() tree1.ReadFile( '/data/repos/Mic4Test_KC705/Software/Analysis/data/ENC/ENC_Chip5Col12_scan1.dat', 'idX/i:vL/F:vH:A:D/i:R:W') tree1.ReadFile( '/data/repos/Mic4Test_KC705/Software/Analysis/data/ENC/ENC_Chip5Col12_scan2_mod.dat' ) # p1 = TProfile('p1','p1;#DeltaU [V];Prob',50,0.12,0.2) # tree1.Draw("W:(vH-vL)>>p1","","profE") tree1.Draw("W*0.0001:(vH-vL)>>p1", "", "profs") p1 = gPad.GetPrimitive('p1') p1.GetYaxis().SetTitle('<Width> [us]') # p1.GetYaxis().SetTitle('<Raising time> [us]') p1.GetXaxis().SetTitle('#Delta U [V]') # fun1 = TF1('fun1','0.5*(1+TMath::Erf((x-[0])/(TMath::Sqrt(2)*[1])))',0.05,0.3) # fun1.SetParameter(0,0.155); # fun1.SetParameter(1,0.005); # # p1.Fit(fun1) # fun1a = p1.GetFunction('fun1') # fun1a.SetLineColor(2) # # p1.Draw("Esame") # # v0 = fun1a.GetParameter(0) # e0 = fun1a.GetParError(0) # v1 = fun1a.GetParameter(1) # e1 = fun1a.GetParError(1) # # print v0, v1 # # fUnit = 1000. # lt = TLatex() # lt.DrawLatexNDC(0.185,0.89,'#mu = {0:.1f} #pm {1:.1f} mV'.format(v0*fUnit, e0*fUnit)) # lt.DrawLatexNDC(0.185,0.84,'#sigma = {0:.1f} #pm {1:.1f} mV'.format(v1*fUnit, e1*fUnit)) # # print 'TMath::Gaus(x,{0:.5f},{1:.5f})'.format(v0, v1) # fun2 = TF1('gaus1','TMath::Gaus(x,{0:.5f},{1:.5f})'.format(v0, v1)) # fun2.SetLineColor(4) # fun2.SetLineStyle(2) # fun2.Draw('same') # # lg = TLegend(0.7,0.4, 0.95, 0.5) # lg.SetFillStyle(0) # lg.AddEntry(p1,'Measurement','p') # lg.AddEntry(fun1a,'Fit','l') # lg.AddEntry(fun2,'Gaus','l') # lg.Draw() waitRootCmdX()
def test2(): add_fit_menu() t150 = TTree() t150.ReadFile('Jan05a_150mV.dat') t150.ReadFile('Jan05a_50mV.dat') # t150.ReadFile('Jan05a_100mV.dat') t150.ReadFile('tt2.dat') t150.ReadFile('Jan05a_250mV.dat') t150.Draw('A','ch==12') waitRootCmdX()
def compareX(): # fl = ['Jan05a_100mV.dat', 'Jan08a_100mV_r30p0us.dat','Jan08a_100mV_r40p0us.dat','Jan08a_100mV_r50p0us.dat'] # fl = ['Jan05a_100mV.dat', 'Jan08a_100mV_r30p0us.dat','Jan08a_100mV_r40p0us.dat','Jan08a_100mV_r50p0us.dat'] fl = ['data/fpgaLin/Jan22a_C2_100mV_f{0:d}.dat'.format(x) for x in [100,200,500,1000]] fs = [TTree() for f in fl] opt = '' chx = TTree() for f in fl: chx.ReadFile(f) chx.Draw('A','ch==12','axis') opt = 'same' for i in range(len(fl)): fs[i].ReadFile(fl[i]) fs[i].SetFillStyle(0) fs[i].SetLineColor(i+1) fs[i].Draw('A','ch==12',opt) waitRootCmdX()
def test1(fname='tt2.dat'): add_fit_menu() t1 = TTree() t1.ReadFile(fname) t1.Show(0) for i in range(18): c1.cd(i+1) lt.DrawLatexNDC(0.2,0.4,'Ch={0:d}'.format(i)) t1.Draw('A','ch=={0:d}'.format(i)) # t1.Draw('A>>h1','ch=={0:d}'.format(i),'goff') # h1 = gDirectory.Get('h1') # h1.SetName('ch'+str(i)) # # c1.cd(i) # h1.Draw() # if i>1: break c1.cd() waitRootCmdX()
def inspectCh(ds, chs): t = TTree() t.SetFillColor(0) t.SetFillStyle(0) for d in ds: t.ReadFile(d[1]) cv1 = TCanvas() cv1.Divide(2,4) icv = 1 for ch in chs: cv1.cd(icv) t.Draw('A','ch=={0:d}'.format(ch)) hx = gPad.GetPrimitive('htemp') hx.GetXaxis().SetTitle('U_{Out} [V]') hx.SetName('h{0:d}'.format(ch)) lt.DrawLatexNDC(0.7,0.9,"Ch {0:d}".format(ch)) icv += 1 cv1.cd(0) waitRootCmdX()
tree = TTree("tree_name", "tree title") nevts = 100 Nmax = 10 nParticles = array( 'i', [ 0 ] ) pt = array( 'd', Nmax*[ 0. ] ) tree.Branch( 'nParticles', nParticles, 'nParticles/I' ) tree.Branch( 'pt', pt, 'pt[nParticles]/D' ) for i in xrange(nevts): nParticles[0] = int(gRandom.Uniform()*10) for j in range(nParticles[0]): pt[j] = gRandom.Gaus(20,2) tree.Fill() tree.Draw("pt[0] >> h(100,0,10)") hist = gDirectory.Get("h") print gDirectory.GetName() print gDirectory.ls() print hist print type(hist) file.Write() file.Close() def main(): print "\n>>> Hello, world!" writeTree()
def showENC(): fname1 = '/data/repos/Mic4Test_KC705/Software/Analysis/data/ENC/ENC_Chip5Col12_scan1.dat' tree1 = TTree() tree1.ReadFile( '/data/repos/Mic4Test_KC705/Software/Analysis/data/ENC/ENC_Chip5Col12_scan1.dat', 'idX/i:vL/F:vH:A:D/i:R:W') tree1.ReadFile( '/data/repos/Mic4Test_KC705/Software/Analysis/data/ENC/ENC_Chip5Col12_scan2_mod.dat' ) tree1.Show(500) p1 = TProfile('p1', 'p1;#DeltaU [V];Prob', 50, 0.12, 0.2) tree1.Draw("D:(vH-vL)>>p1", "", "profE") ### change it to tgraph g1 = TGraphErrors() for i in range(p1.GetNbinsX() + 2): N = p1.GetBinEntries(i) if N > 0: print i, N, p1.GetXaxis().GetBinCenter(i), p1.GetBinContent( i), p1.GetBinError(i) n = g1.GetN() g1.SetPoint(n, p1.GetXaxis().GetBinCenter(i), p1.GetBinContent(i)) g1.SetPointError(n, 0, p1.GetBinError(i)) # g1.SetMarkerColor(3) # g1.SetLineColor(3) p1.Draw("axis") g1.Draw('Psame') fun1 = TF1('fun1', '0.5*(1+TMath::Erf((x-[0])/(TMath::Sqrt(2)*[1])))', 0.05, 0.3) fun1.SetParameter(0, 0.155) fun1.SetParameter(1, 0.005) g1.Fit(fun1) fun1a = g1.GetFunction('fun1') # p1.Fit(fun1) # fun1a = p1.GetFunction('fun1') fun1a.SetLineColor(2) # p1.Draw("Esame") v0 = fun1a.GetParameter(0) e0 = fun1a.GetParError(0) v1 = fun1a.GetParameter(1) e1 = fun1a.GetParError(1) print v0, v1 fUnit = 1000. lt = TLatex() lt.DrawLatexNDC( 0.185, 0.89, '#mu = {0:.1f} #pm {1:.1f} mV'.format(v0 * fUnit, e0 * fUnit)) lt.DrawLatexNDC( 0.185, 0.84, '#sigma = {0:.1f} #pm {1:.1f} mV'.format(v1 * fUnit, e1 * fUnit)) print 'TMath::Gaus(x,{0:.5f},{1:.5f})'.format(v0, v1) fun2 = TF1('gaus1', 'TMath::Gaus(x,{0:.5f},{1:.5f})'.format(v0, v1)) fun2.SetLineColor(4) fun2.SetLineStyle(2) fun2.Draw('same') lg = TLegend(0.7, 0.4, 0.95, 0.5) lg.SetFillStyle(0) lg.AddEntry(p1, 'Measurement', 'p') lg.AddEntry(fun1a, 'Fit', 'l') lg.AddEntry(fun2, 'Gaus', 'l') lg.Draw() waitRootCmdX()
class Run(Analysis): """ Run class containing all the information for a single run. """ NTelPlanes = 4 def __init__(self, number=None, testcampaign=None, load_tree=True, verbose=None): """ :param number: if None is provided it creates a dummy run :param testcampaign: if None is provided ... :param load_tree: load the ROOT TTree :param verbose: turn on more output """ # Basics super(Run, self).__init__(testcampaign, verbose=verbose, pickle_dir='Run') self.Number = number # Directories / Test Campaign self.IrradiationFile = join(self.Dir, self.MainConfig.get('MISC', 'irradiation file')) # Configuration & Root Files self.Config = self.load_run_config() self.RootFileDir = self.load_rootfile_dirname() self.RootFilePath = self.load_rootfile_path() # Run Info self.InfoFile = join(self.TCDir, 'run_log.json') self.Info = self.load_run_info() self.RootFile = None self.Tree = TTree() self.TreeName = self.Config.get('BASIC', 'treename') self.DUTs = [self.dut(i + 1, self.Info) for i in range(self.get_n_diamonds())] if self.Number is not None else None # Settings self.Plane = Plane() self.TriggerPlanes = self.load_trigger_planes() # General Information self.Flux = self.get_flux() self.Type = self.get_type() # Times self.LogStart = self.load_log_start() self.LogEnd = self.load_log_stop() self.Duration = self.LogEnd - self.LogStart self.Converter = Converter(self) if self.set_run(number, load_tree): # tree info self.TimeOffset = None self.Time = self.load_time_vec() self.StartEvent = 0 self.NEvents = int(self.Tree.GetEntries()) self.EndEvent = self.NEvents - 1 self.StartTime = self.get_time_at_event(self.StartEvent) self.EndTime = self.get_time_at_event(self.EndEvent) self.TotalTime = self.load_total_time() self.TotalMinutes = self.TotalTime / 60000. self.Duration = timedelta(seconds=self.TotalTime) self.LogEnd = self.LogStart + self.Duration # overwrite if we know exact duration self.NPlanes = self.load_n_planes() self.TInit = time() - self.InitTime def __str__(self): return f'{self.__class__.__name__} {self.Number}{self.evt_str} ({self.TCString})' def __repr__(self): return self.__str__() def __call__(self, number, load_tree=False): self.set_run(number, load_tree) return self def __gt__(self, other): return self.Number > (other.Number if isinstance(other, Run) else other) @property def evt_str(self): return f' with {make_ev_str(self.Info["events"])} ev' if 'events' in self.Info else f' with {make_ev_str(self.NEvents)} ev' if self.Tree.Hash() else '' def set_run(self, number, load_tree): if number is None: return False if number < 0 and type(number) is not int: critical('incorrect run number') self.Number = number self.load_run_info() self.Flux = self.get_flux() # check for conversion if load_tree: self.Converter.convert_run() self.load_rootfile() else: return False if not self.rootfile_is_valid(): self.Converter.convert_run() self.load_rootfile() return True def get_type(self): return self.Config.get('BASIC', 'type') if self.Number is not None else None def set_estimate(self, n=None): self.Tree.SetEstimate(choose(n, -1)) def is_volt_scan(self): return any(name in self.Info['runtype'] for name in ['voltage', 'hv']) # ---------------------------------------- # region INIT @property def dut(self): return DUT def load_rootfile(self, prnt=True): self.info('Loading information for rootfile: {file}'.format(file=basename(self.RootFilePath)), endl=False, prnt=prnt) self.RootFile = TFile(self.RootFilePath) self.Tree = self.RootFile.Get(self.TreeName) return self.Tree def load_run_config(self): base_file_name = join(get_base_dir(), 'config', self.TCString, 'RunConfig.ini') if not file_exists(base_file_name): critical('RunConfig.ini does not exist for {0}! Please create it in config/{0}!'.format(self.TCString)) parser = Config(base_file_name) # first read the main config file with general information for all splits if parser.has_section('SPLIT') and self.Number is not None: split_runs = [0] + loads(parser.get('SPLIT', 'runs')) + [inf] config_nr = next(i for i in range(1, len(split_runs)) if split_runs[i - 1] <= self.Number < split_runs[i]) parser.read(join(get_base_dir(), 'config', self.TCString, 'RunConfig{nr}.ini'.format(nr=config_nr))) # add the content of the split config return parser @staticmethod def make_root_filename(run): return f'TrackedRun{run:0>3}.root' def make_root_subdir(self): return join('root', 'pads' if self.get_type() == 'pad' else self.get_type()) def load_rootfile_path(self, run=None): run = choose(run, self.Number) return None if run is None else join(self.RootFileDir, self.make_root_filename(run)) def load_rootfile_dirname(self): return ensure_dir(join(self.TCDir, self.make_root_subdir())) if self.Number is not None else None def load_trigger_planes(self): return array(self.Config.get_list('BASIC', 'trigger planes', [1, 2])) def get_n_diamonds(self, run_number=None): run_info = self.load_run_info(run_number) return len([key for key in run_info if key.startswith('dia') and key[-1].isdigit()]) def load_dut_numbers(self): return [i + 1 for i in range(len([key for key in self.Info.keys() if key.startswith('dia') and key[-1].isdigit()]))] def load_dut_type(self): dut_type = self.Config.get('BASIC', 'type') if self.Number is not None else None if dut_type not in ['pixel', 'pad', None]: critical("The DUT type {0} has to be either 'pixel' or 'pad'".format(dut_type)) return dut_type def load_default_info(self): with open(join(self.Dir, 'Runinfos', 'defaultInfo.json')) as f: return load(f) def load_run_info_file(self): if not file_exists(self.InfoFile): critical('Run Log File: "{f}" does not exist!'.format(f=self.InfoFile)) with open(self.InfoFile) as f: return load(f) def load_run_info(self, run_number=None): data = self.load_run_info_file() run_number = self.Number if run_number is None else run_number if run_number is not None: run_info = data.get(str(run_number)) if run_info is None: # abort if the run is still not found critical('Run {} not found in json run log file!'.format(run_number)) self.Info = run_info self.Info['masked pixels'] = [0] * 4 self.translate_diamond_names() return run_info else: self.Info = self.load_default_info() return self.Info def load_dut_names(self): return [self.Info['dia{nr}'.format(nr=i)] for i in range(1, self.get_n_diamonds() + 1)] def load_biases(self): return [int(self.Info['dia{nr}hv'.format(nr=i)]) for i in range(1, self.get_n_diamonds() + 1)] def load_log_start(self): return conv_log_time(self.Info['starttime0']) def load_log_stop(self): return conv_log_time(self.Info['endtime']) def load_total_time(self): return (self.Time[-1] - self.Time[0]) / 1000 def load_n_planes(self): if self.has_branch('cluster_col'): self.Tree.Draw('@cluster_col.size()', '', 'goff', 1) return int(self.Tree.GetV1()[0]) else: return 4 def load_time_vec(self): t = get_time_vec(self.Tree) t0 = datetime.fromtimestamp(t[0] / 1000) if t[0] < 1e12 else None self.TimeOffset = None if t0 is None or t0.year > 2000 and t0.day == self.LogStart.day else t[0] - time_stamp(self.LogStart) * 1000 return t if self.TimeOffset is None else t - self.TimeOffset def load_plane_efficiency(self, plane): return self.load_plane_efficiencies()[plane - 1] def load_plane_efficiencies(self): return [ufloat(e, .03) for e in self.Config.get_list('BASIC', 'plane efficiencies', default=[.95, .95])] # endregion INIT # ---------------------------------------- # ---------------------------------------- # region MASK def load_mask_file_path(self): mask_dir = self.MainConfig.get('MAIN', 'maskfile directory') if self.MainConfig.has_option('MAIN', 'maskfile directory') else join(self.DataDir, self.TCDir, 'masks') if not dir_exists(mask_dir): warning('Mask file directory does not exist ({})!'.format(mask_dir)) return join(mask_dir, basename(self.Info['maskfile'])) def load_mask(self, plane=None): mask_file = self.load_mask_file_path() if basename(mask_file).lower() in ['no mask', 'none', 'none!', ''] or self.Number is None: return try: data = genfromtxt(mask_file, [('id', 'U10'), ('pl', 'i'), ('x', 'i'), ('y', 'i')]) if 'cornBot' not in data['id']: warning('Invalid mask file: "{}". Not taking any mask!'.format(mask_file)) mask = [[data[where((data['pl'] == pl) & (data['id'] == n))][0][i] for n in ['cornBot', 'cornTop'] for i in [2, 3]] for pl in sorted(set(data['pl']))] mask = [[max(1, m[0]), max(1, m[1]), min(self.Plane.NCols - 2, m[2]), min(self.Plane.NRows - 2, m[3])] for m in mask] # outer pixels are ignored return mask if plane is None else mask[plane - 1] if plane - 1 < len(mask) else None except Exception as err: warning(err) warning('Could not read mask file... not taking any mask!') def get_mask_dim(self, plane=1, mm=True): return Plane.get_mask_dim(self.load_mask(plane), mm) def get_mask_dims(self, mm=True): return array([self.get_mask_dim(pl, mm) for pl in [1, 2]]) def get_unmasked_area(self, plane): return None if self.Number is None else Plane.get_area(self.load_mask(plane)) def find_for_in_comment(self): for name in ['for1', 'for2']: if name not in self.Info: for cmt in self.Info['comments'].split('\r\n'): cmt = cmt.replace(':', '') cmt = cmt.split(' ') if str(cmt[0].lower()) == name: self.Info[name] = int(cmt[1]) return 'for1' in self.Info # endregion MASK # ---------------------------------------- # ---------------------------------------- # region HELPERS def translate_diamond_names(self): for key, value in [(key, value) for key, value in self.Info.items() if key.startswith('dia') and key[-1].isdigit()]: self.Info[key] = self.translate_dia(value) def register_new_dut(self): if input('Do you want to add a new diamond? [y,n] ').lower() in ['y', 'yes']: dut_type = int(input('Enter the DUT type (1 for pCVD, 2 for scCVD, 3 for silicon): ')) - 1 dut_name = input('Enter the name of the DUT (no "_"): ') alias = input(f'Enter the alias (no "_", for default {dut_name.lower()} press enter): ') self.add_alias(alias, dut_name, dut_type) self.add_dut_info(dut_name) return True else: return False @staticmethod def add_alias(alias, dut_name, dut_type): alias_file = join(Dir, 'config', 'DiamondAliases.ini') with open(alias_file, 'r+') as f: lines = [line.strip(' \n') for line in f.readlines()] i0 = lines.index(['# pCVD', '# scCVD', '# Silicon'][dut_type]) i = next(i for i, line in enumerate(lines[i0:], i0) if line.strip() == '') lines.insert(i, f'{(alias if alias else dut_name).lower()} = {dut_name}') f.seek(0) f.writelines([f'{line}\n' for line in lines]) info(f'added entry: {(alias if alias else dut_name).lower()} = {dut_name} in {alias_file}') def add_dut_info(self, dut_name): dia_info_file = join(Dir, 'Runinfos', 'dia_info.json') data = load_json(dia_info_file) if dut_name in data: return warning('The entered DUT name already exists!') tc = get_input(f'Enter the beam test [YYYYMM]', self.TCString) data[dut_name] = {'irradiation': {tc: get_input(f'Enter the irradiation for {tc}', '0')}, 'boardnumber': {tc: get_input(f'Enter the board number for {tc}')}, 'thickness': get_input('Enter the thickness'), 'size': get_input('Enter the lateral size ([x, y])'), 'manufacturer': get_input('Enter the manufacturer')} with open(dia_info_file, 'w') as f: dump(data, f, indent=2) info(f'added {dut_name} to {dia_info_file}') def translate_dia(self, dia): name, suf = dia.split('_')[0].lower(), '_'.join(dia.split('_')[1:]) if name not in Config(join(self.Dir, 'config', 'DiamondAliases.ini')).options('ALIASES'): warning(f'{dia} was not found in config/DiamondAliases.ini!') if not self.register_new_dut(): critical(f'unknown diamond {dia}') parser = Config(join(self.Dir, 'config', 'DiamondAliases.ini')) return '_'.join([parser.get('ALIASES', name)] + ([suf] if suf else [])) def reload_run_config(self, run_number): self.Number = run_number self.Config = self.load_run_config() self.Info = self.load_run_info() self.RootFileDir = self.load_rootfile_dirname() self.RootFilePath = self.load_rootfile_path() return self.Config def rootfile_is_valid(self, file_path=None): tfile = self.RootFile if file_path is None else TFile(file_path) ttree = self.Tree if file_path is None else tfile.Get(self.TreeName) is_valid = not tfile.IsZombie() and tfile.ClassName() == 'TFile' and ttree and ttree.ClassName() == 'TTree' if not is_valid: warning('Invalid TFile or TTree! Deleting file {}'.format(tfile.GetName())) remove_file(tfile.GetName()) return is_valid def calculate_plane_flux(self, plane=1, corr=True): """estimate the flux [kHz/cm²] through a trigger plane based on Poisson statistics.""" rate, eff, area = self.Info[f'for{plane}'], self.load_plane_efficiency(plane), self.get_unmasked_area(plane) return -log(1 - rate / Plane.Frequency) * Plane.Frequency / area / 1000 / (eff if corr else ufloat(1, .05)) # count zero hits of Poisson def find_n_events(self, n, cut, start=0): evt_numbers = self.get_tree_vec(var='Entry$', cut=cut, nentries=self.NEvents, firstentry=start) return int(evt_numbers[:n][-1] + 1 - start) def get_max_run(self): return int(max(self.load_run_info_file(), key=int)) # endregion HELPERS # ---------------------------------------- # ---------------------------------------- # region GET def get_flux(self, plane=None, corr=True): if self.Number is None: return if not self.find_for_in_comment(): # warning('no plane rates in the data...') return self.Info['measuredflux'] / (mean(self.load_plane_efficiencies()) if corr else 1) return self.get_mean_flux(corr) if plane is None else self.calculate_plane_flux(plane, corr) def get_mean_flux(self, corr=True): return mean([self.get_flux(pl, corr) for pl in [1, 2]]) def get_time(self): return ufloat(time_stamp(self.LogStart + self.Duration / 2), self.Duration.seconds / 2) def get_channel_name(self, channel): self.Tree.GetEntry() return self.Tree.sensor_name[channel] def get_time_at_event(self, event): """ For negative event numbers it will return the time stamp at the startevent. """ return self.Time[min(event, self.EndEvent)] / 1000. def get_event_at_time(self, seconds, rel=True): """ Returns the event nunmber at time dt from beginning of the run. Accuracy: +- 1 Event """ if seconds - (0 if rel else self.StartTime) >= self.TotalTime or seconds == -1: # return time of last event if input is too large return self.NEvents - 1 return where(self.Time <= 1000 * (seconds + (self.StartTime if rel else 0)))[0][-1] def get_tree_vec(self, var, cut='', dtype=None, nentries=None, firstentry=0): return get_tree_vec(self.Tree, var, cut, dtype, nentries, firstentry) def get_tree_tuple(self): return (self.Tree, self.RootFile) if self.Tree is not None else False def get_time_vec(self): return self.Time if hasattr(self, 'Time') else None def get_bias_strings(self): return [str(b) for b in self.load_biases()] @save_pickle('HR', suf_args=0) def get_high_rate_run(self, high=True): from src.run_selection import RunSelector return int(RunSelector(testcampaign=self.TCString).get_high_rate_run(self.Number, high)) def get_low_rate_run(self): return self.get_high_rate_run(high=False) # endregion GET # ---------------------------------------- # ---------------------------------------- # region SHOW def show_info(self): print('Run information for', self) for key, value in sorted(self.Info.items()): print(f'{key:<13}: {value}') def has_branch(self, name): return bool(self.Tree.GetBranch(name)) def info(self, msg, endl=True, blank_lines=0, prnt=True): return info(msg, endl, prnt=self.Verbose and prnt, blank_lines=blank_lines) def add_to_info(self, t, txt='Done', prnt=True): return add_to_info(t, txt, prnt=self.Verbose and prnt)
def showENC(self): tree1 = TTree() header = 'idX/i:vL/F:vH:A:D/i:R:W' first = True for f in self.dataFiles: if first: tree1.ReadFile(f, header) else: tree1.ReadFile(f) p1 = TProfile('p1', 'p1;#DeltaU [V];Prob', self.bins[0], tree1.GetMinimum('vH-vL') * 0.8, tree1.GetMaximum('vH-vL') * 1.2) tree1.Draw("D:(vH-vL)>>p1", "", "profE") ### change it to tgraph g1 = TGraphErrors() for i in range(p1.GetNbinsX() + 2): N = p1.GetBinEntries(i) if N > 0: print i, N, p1.GetXaxis().GetBinCenter(i), p1.GetBinContent( i), p1.GetBinError(i) n = g1.GetN() g1.SetPoint(n, p1.GetXaxis().GetBinCenter(i), p1.GetBinContent(i)) g1.SetPointError(n, 0, p1.GetBinError(i)) p1.Draw("axis") g1.Draw('Psame') fun1 = TF1('fun1', '0.5*(1+TMath::Erf((x-[0])/(TMath::Sqrt(2)*[1])))', 0.05, 0.3) fun1.SetParameter(0, 0.155) fun1.SetParameter(1, 0.005) g1.Fit(fun1) fun1a = g1.GetFunction('fun1') fun1a.SetLineColor(2) v0 = fun1a.GetParameter(0) e0 = fun1a.GetParError(0) v1 = fun1a.GetParameter(1) e1 = fun1a.GetParError(1) print v0, v1 fUnit = 1000. self.lt.DrawLatexNDC( 0.185, 0.89, '#mu = {0:.1f} #pm {1:.1f} mV'.format(v0 * fUnit, e0 * fUnit)) self.lt.DrawLatexNDC( 0.185, 0.84, '#sigma = {0:.1f} #pm {1:.1f} mV'.format(v1 * fUnit, e1 * fUnit)) if self.Info: self.lt.DrawLatexNDC(0.185, 0.6, self.Info) print 'TMath::Gaus(x,{0:.5f},{1:.5f})'.format(v0, v1) fun2 = TF1('gaus1', 'TMath::Gaus(x,{0:.5f},{1:.5f})'.format(v0, v1)) fun2.SetLineColor(4) fun2.SetLineStyle(2) fun2.Draw('same') lg = TLegend(0.7, 0.4, 0.95, 0.5) lg.SetFillStyle(0) lg.AddEntry(p1, 'Measurement', 'p') lg.AddEntry(fun1a, 'Fit', 'l') lg.AddEntry(fun2, 'Gaus', 'l') lg.Draw() waitRootCmdX()
from ROOT import TTree, TRandom3, TCanvas from array import array mass_tree = TTree("mass_tree", "Tree containing invariant mass points") random_generator = TRandom3() maxNevents = 1000 mass = array("f", [0.]) mass_tree.Branch('mass', mass, 'mass/F') for i in range(maxNevents): mass[0] = random_generator.BreitWigner(125., 0.004) mass_tree.Fill() print mass c1 = TCanvas('c1', 'Tree Data Structure') mass_tree.Draw("mass", "mass < 140. && mass > 110.") c1.SaveAs("smear_mass.gif")
# events.Fill() for i in range(h_zvm.GetNbinsX()): uncM[0] = h_zvm.GetXaxis().GetBinCenter(i + 1) for j in range(h_zvm.GetNbinsY()): uncVZ[0] = h_zvm.GetYaxis().GetBinCenter(j + 1) for k in range(int(h_zvm.GetBinContent(i + 1, j + 1))): events.Fill() events.Print() outrootfile.Write() #outrootfile.Close() #events = inFile.Get("cut") #events.Print() events.Draw("uncVZ:{0}>>hnew(100,0,0.1,100,-60,60)".format(massVar), "", "colz") gDirectory.Get("hnew").SetTitle("vertexing data") gDirectory.Get("hnew").GetXaxis().SetTitle("mass [GeV]") gDirectory.Get("hnew").GetYaxis().SetTitle("vertex z [mm]") c.Print(remainder[0] + ".pdf") gStyle.SetOptStat(1111) effFile = remainder[2] #acceptanceFile = TFile(remainder[2]) #tailsFile = TFile(remainder[3]) #radfracFile = TFile(remainder[4]) #fitfunc = TF1("fitfunc","[0]*exp( ((x-[1])<[3])*(-0.5*(x-[1])^2/[2]^2) + ((x-[1])>=[3])*(-0.5*[3]^2/[2]^2-(x-[1]-[3])/[4]))",-50,50) #fitfunc.SetParName(0,"Amplitude") #fitfunc.SetParName(1,"Mean") #fitfunc.SetParName(2,"Sigma")
if (line or noline) and not one_run: data += ":runnumber" elif vsbin: data +=":labels" elif one_run: data += ":luminum" canv = TCanvas("PlotCalib","plotCalib",0,0,1600,800) if opt2d: hhh = TH2D("hhh","hhh",modmax-modmin+1,modmin-0.5,modmax+0.5,chanmax-chanmin+1,chanmin-0.5,chanmax+0.5) if vsbin: tree.Draw(data,cut_cond,"goff",15) else: tree.Draw(data,cut_cond,"goff") if (not opt2d and tree.GetSelectedRows() <= 1) or tree.GetSelectedRows() <= 0: print "Not enough points to plot" sys.exit(2) if vsbin: if tree.GetSelectedRows() >= 15: print "Maximum number of bins is 15" if line or noline: gr = TGraph(tree.GetSelectedRows(),tree.GetV2(),tree.GetV1()) gr.SetTitle(modulename +" " + " Channel "+ str(chan_n) +" " + gainvalue + titsuff) gr.SetMarkerStyle(20)
def main(argv): startT = time.clock() inDir, outDir = ".", "." inFile, inFileName, outFileName = TFile(), "", "" gatTree = TTree() dsNum, subNum, runNum, theCut, inFileName = -1, -1, -1, "", "" saveLAT, savePacket, saveWave = False, False, False if len(argv) == 0: return for i, opt in enumerate(argv): if opt == "-f": # dsNum, subNum, inDir, outDir = int(argv[i+1]), int(argv[i+2]), str(argv[i+3]), str(argv[i+4]) inDir, inFileName, outDir = str(argv[i + 1]), str( argv[i + 2]), str(argv[i + 3]) # inFileName = "waveSkimDS{}_{}.root".format(dsNum, subNum) if opt == "-r": inDir, inFileName, outDir = str(argv[i + 1]), str( argv[i + 2]), str(argv[i + 3]) # dsNum, runNum, inDir, outDir = int(argv[i+1]), int(argv[i+2]), str(argv[i+3]), str(argv[i+4]) # inFileName = "waveSkimDS{}_run{}.root".format(dsNum, runNum) # inFileName = inDir + inFile print("Scanning File: {}".format(inFileName)) inFile = TFile("%s/%s" % (inDir, inFileName)) gatTree = inFile.Get("skimTree") theCut = inFile.Get("theCut").GetTitle() # Make files smaller for tests # theCut += " && sumEHL > 236 && sumEHL < 240 && mHL==2 && trapENFCal < 5" # Select only pulsers # theCut += " && EventDC1Bits > 0" print "Using cut:\n", theCut gatTree.Draw(">>elist", theCut, "entrylist") elist = gDirectory.Get("elist") gatTree.SetEntryList(elist) nList = elist.GetN() print "Found", gatTree.GetEntries(), "input entries." print "Found", nList, "entries passing cuts." # Gimmicky but works... this bypasses creating the branches... gatTree.GetEntry(0) # Mess of various branches channel = std.vector("int")() trapENFCal = std.vector("double")() trapENM = std.vector("double")() # Create map of branches to put into dataframe # This map is only for branches that we want to keep! keepMapBase = { 'trapENFCal': gatTree.trapENFCal, 'trapENM': gatTree.trapENM, "channel": gatTree.channel, "run": gatTree.run, "mHL": gatTree.mHL } # Combine dictionaries, if keepMapLAT is empty it won't add any branches keepMap = dict(keepMapBase) # keepMap.update(keepMapLAT) dataList = [] print 'Writing to: ', '%s/proc%s.h5' % (outDir, inFileName.split('.')[0]) iList, removeNBeg, removeNEnd = -1, 500, 500 # Loop over events while True: iList += 1 if iList >= nList: break # if iList >= 5000: break entry = gatTree.GetEntryNumber(iList) gatTree.LoadTree(entry) gatTree.GetEntry(entry) nChans = gatTree.channel.size() numPass = gatTree.Draw("channel", theCut, "GOFF", 1, iList) chans = gatTree.GetV1() chanList = list(set(int(chans[n]) for n in xrange(numPass))) hitList = (iH for iH in xrange(nChans) if gatTree.channel.at(iH) in chanList ) # a 'generator expression' for iH in hitList: dataMap = {} wf = gatTree.MGTWaveforms.at(iH) signal = wl.processWaveform(wf, removeNBeg, removeNEnd) wave = np.array(signal.GetWaveRaw(), dtype=np.int16) for key, branch in keepMap.items(): # Save branches that aren't vector<Template> (so far only run and mHL) if key == 'run' or key == 'mHL': dataMap[key] = int(branch) elif key == 'channel': dataMap[key] = int(branch.at(iH)) else: dataMap[key] = float(branch.at(iH)) dataMap['waveform'] = wave.tolist() dataList.append(dataMap) if iList % 5000 == 0 and iList != 0: print "%d / %d entries saved (%.2f %% done), time: %s" % ( iList, nList, 100 * (float(iList) / nList), time.strftime('%X %x %Z')) df = pd.DataFrame.from_dict(dataList) print(df.head()) print(df.info()) print(np.unique(df.channel)) print(np.unique(df.mHL)) print(np.unique(df.run)) # Suppress stupid warning warnings.filterwarnings(action="ignore", module="pandas", message="^\nyour performance") # Chunk write like a sucker chunksize = 50000 start = 0 end = chunksize - 1 i = 0 # for i in len(df): while end < df.shape[0]: # chunk = df.iloc[(i*chunksize):min((i+1)*chunksize,len(df))] chunk = df.iloc[start:end] try: chunk.to_hdf('{}/proc{}_{}.h5'.format(outDir, inFileName.split('.')[0], i), key='skimTree', data_columns=[ 'trapENFCal', 'trapENM', 'channel', 'mHL', 'waveform' ], format='fixed', mode='w', complevel=9) except (Exception) as e: print e print chunk print chunk.info() start += chunksize end += chunksize i += 1 # df.to_hdf('%s/proc%s.h5' % (outDir,inFileName.split('.')[0]), key="skimTree", data_columns=['trapENFCal', 'trapENM','channel','mHL','waveform'], format='fixed', mode='w', complevel=9) stopT = time.clock() print("Stopped:", time.strftime('%X %x %Z'), "\nProcess time (min):", (stopT - startT) / 60) print(float(nList) / ((stopT - startT) / 60.), "entries per minute.")
outdir = dirname.rstrip("/") try: os.makedirs(outdir) except: pass ofname = outdir + "/" + fname.rstrip(".txt") + ".root" #output file and tree outfile = TFile(ofname, "RECREATE") tree = TTree("tree", "tree of " + fname) outfile.cd() #create tree from text file desc = "MUEFFECTIVE:EFFICIENCY" #this gives names and types of variables tree.ReadFile(ifname, desc) # delimiter = space #tree.ReadFile(ifname,desc,",") # delimiter = comma c1 = TCanvas("c1", "c1", 500, 500) tree.Draw( "EFFICIENCY:MUEFFECTIVE>>MvsE") # draws MuE vs Eff and saves to histogram c1.SaveAs("MvsE.root") #save output tree tree.Write() outfile.Close() #print message print "Created tree in root file: %s" % ofname
def main(argv): print("=======================================") print("LAT started:",time.strftime('%X %x %Z')) startT = time.clock() # gROOT.ProcessLine("gErrorIgnoreLevel = 3001;") # suppress ROOT error messages global batMode intMode, batMode, rangeMode, fileMode, gatMode, singleMode, pathMode, cutMode = False, False, False, False, False, False, False, False dontUseTCuts = False dsNum, subNum, runNum, plotNum = -1, -1, -1, 1 pathToInput, pathToOutput, manualInput, manualOutput, customPar = ".", ".", "", "", "" if len(argv)==0: return for i,opt in enumerate(argv): if opt == "-r": rangeMode, dsNum, subNum = True, int(argv[i+1]), int(argv[i+2]) print("Scanning DS-%d sub-range %d" % (dsNum, subNum)) if opt == "-p": pathMode, manualInput, manualOutput = True, argv[i+1], argv[i+2] print("Manually set input/output files:\nInput: %s\nOutput: %s" % (manualInput, manualOutput)) if opt == "-d": pathToInput, pathToOutput = argv[i+1], argv[i+2] print("Custom paths: Input %s, Output %s" % (pathToInput,pathToOutput)) if opt == "-f": fileMode, dsNum, runNum = True, int(argv[i+1]), int(argv[i+2]) print("Scanning DS-%d, run %d" % (dsNum, runNum)) if opt == "-g": gatMode, runNum = True, int(argv[i+1]) print("GATDataSet mode. Scanning run %d" % (runNum)) if opt == "-s": singleMode, pathToInput = True, argv[i+1] print("Single file mode. Scanning {}".format(pathToInput)) if opt == "-i": intMode, plotNum = True, int(argv[i+1]) print("Interactive mode selected. Use \"p\" for previous and \"q\" to exit.") if opt == "-x": dontUseTCuts = True print("DC TCuts deactivated. Retaining all events ...") if opt == "-c": cutMode, customPar = True, str(argv[i+1]) print("Using custom cut parameter: {}".format(customPar)) if opt == "-b": batMode = True import matplotlib # if os.environ.get('DISPLAY','') == '': # print('No display found. Using non-interactive Agg backend') matplotlib.use('Agg') print("Batch mode selected. A new file will be created.") import matplotlib.pyplot as plt from matplotlib import gridspec import matplotlib.ticker as mtick plt.style.use('pltTalks.mplstyle') from matplotlib.colors import LogNorm, Normalize # File I/O inFile, outFile, bltFile = TFile(), TFile(), TFile() gatTree, bltTree, out = TTree(), TTree(), TTree() theCut, inPath, outPath = "", "", "" # Set input and output files if rangeMode: inPath = "%s/waveSkimDS%d_%d.root" % (pathToInput, dsNum, subNum) outPath = "%s/latSkimDS%d_%d.root" % (pathToOutput, dsNum, subNum) if fileMode: inPath = "%s/waveSkimDS%d_run%d.root" % (pathToInput, dsNum, runNum) outPath = "%s/latSkimDS%d_run%d.root" % (pathToOutput, dsNum, runNum) if pathMode: inPath, outPath = manualInput, manualOutput if gatMode: ds = GATDataSet() gatPath = ds.GetPathToRun(runNum,GATDataSet.kGatified) bltPath = ds.GetPathToRun(runNum,GATDataSet.kBuilt) outPath = "%s/lat_run%d.root" % (pathToOutput, runNum) if pathMode and gatMode: outPath = manualOutput # Initialize trees if rangeMode or fileMode or pathMode: inFile = TFile(inPath) gatTree = inFile.Get("skimTree") print(gatTree.GetEntries(),"entries in input tree.") elif gatMode: inFile = TFile(gatPath) bltFile = TFile(bltPath) gatTree = inFile.Get("mjdTree") bltTree = bltFile.Get("MGTree") gatTree.AddFriend(bltTree) if singleMode: inFile = TFile(pathToInput) gatTree = inFile.Get("skimTree") # apply cut to tree if (rangeMode or fileMode or pathMode) and not dontUseTCuts: try: theCut = inFile.Get("theCut").GetTitle() except ReferenceError: theCut = "" if cutMode: # theCut += customPar # theCut = "(channel==672 || channel==674) && mH==2" # sync chan: 672, extp chan: 674 # theCut += " && fitSlo < 10" # theCut = "trapENFCal > 1 && trapENFCal < 10 && riseNoise > 2" theCut = "trapENFCal > 20 && trapENFCal < 100 && riseNoise > 2" print("WARNING: Custom cut in use! : ",theCut) gatTree.Draw(">>elist", theCut, "entrylist") elist = gDirectory.Get("elist") gatTree.SetEntryList(elist) nList = elist.GetN() print("Using cut:\n",theCut) print("Found",gatTree.GetEntries(),"input entries.") print("Found",nList,"entries passing cuts.") # Output: In batch mode (-b) only, create an output file+tree & append new branches. if batMode and not intMode: outFile = TFile(outPath, "RECREATE") print("Attempting tree copy to",outPath) out = gatTree.CopyTree("") out.Write() print("Wrote",out.GetEntries(),"entries.") cutUsed = TNamed("theCut",theCut) cutUsed.Write() waveS1, waveS2 = std.vector("double")(), std.vector("double")() waveS3, waveS4, waveS5 = std.vector("double")(), std.vector("double")(), std.vector("double")() bcMax, bcMin = std.vector("double")(), std.vector("double")() bandMax, bandTime = std.vector("double")(), std.vector("double")() den10, den50, den90 = std.vector("double")(), std.vector("double")(), std.vector("double")() oppie = std.vector("double")() fitMu, fitAmp, fitSlo = std.vector("double")(), std.vector("double")(), std.vector("double")() fitTau, fitBL = std.vector("double")(), std.vector("double")() matchMax, matchWidth, matchTime = std.vector("double")(), std.vector("double")(), std.vector("double")() pol0, pol1, pol2, pol3 = std.vector("double")(), std.vector("double")(), std.vector("double")(), std.vector("double")() fails, fitChi2, fitLL = std.vector("int")(), std.vector("double")(), std.vector("double")() riseNoise = std.vector("double")() t0_SLE, t0_ALE, lat, latF = std.vector("double")(), std.vector("double")(), std.vector("double")(), std.vector("double")() latAF, latFC, latAFC = std.vector("double")(), std.vector("double")(), std.vector("double")() nMS = std.vector("int")() tE50, latE50, wfStd = std.vector("double")(), std.vector("double")(), std.vector("double")() wfAvgBL, wfRMSBL = std.vector("double")(), std.vector("double")() fitErr = std.vector("int")() # It's not possible to put the "out.Branch" call into a class initializer (waveLibs::latBranch). You suck, ROOT. b1, b2 = out.Branch("waveS1",waveS1), out.Branch("waveS2",waveS2) b3, b4, b5 = out.Branch("waveS3",waveS3), out.Branch("waveS4",waveS4), out.Branch("waveS5",waveS5) b7, b8 = out.Branch("bcMax",bcMax), out.Branch("bcMin",bcMin) b9, b10 = out.Branch("bandMax",bandMax), out.Branch("bandTime",bandTime) b11, b12, b13 = out.Branch("den10",den10), out.Branch("den50",den50), out.Branch("den90",den90) b14 = out.Branch("oppie",oppie) b15, b16, b17 = out.Branch("fitMu", fitMu), out.Branch("fitAmp", fitAmp), out.Branch("fitSlo", fitSlo) b18, b19 = out.Branch("fitTau",fitTau), out.Branch("fitBL",fitBL) b20, b21, b22 = out.Branch("matchMax", matchMax), out.Branch("matchWidth", matchWidth), out.Branch("matchTime", matchTime) b23, b24, b25, b26 = out.Branch("pol0", pol0), out.Branch("pol1", pol1), out.Branch("pol2", pol2), out.Branch("pol3", pol3) b27, b28, b29 = out.Branch("fails",fails), out.Branch("fitChi2",fitChi2), out.Branch("fitLL",fitLL) b30 = out.Branch("riseNoise",riseNoise) b31, b32, b33, b34 = out.Branch("t0_SLE",t0_SLE), out.Branch("t0_ALE",t0_ALE), out.Branch("lat",lat), out.Branch("latF",latF) b35, b36, b37 = out.Branch("latAF",latAF), out.Branch("latFC",latFC), out.Branch("latAFC",latAFC) b38 = out.Branch("nMS",nMS) b39, b40, b41 = out.Branch("tE50", tE50), out.Branch("latE50", latE50), out.Branch("wfStd", wfStd) b42, b43 = out.Branch("wfAvgBL", wfAvgBL), out.Branch("wfRMSBL", wfRMSBL) b44 = out.Branch("fitErr",fitErr) # make a dictionary that can be iterated over (avoids code repetition in the loop) brDict = { "waveS1":[waveS1, b1], "waveS2":[waveS2, b2], "waveS3":[waveS3, b3], "waveS4":[waveS4, b4], "waveS5":[waveS5, b5], "bcMax":[bcMax, b7], "bcMin":[bcMin, b8], "bandMax":[bandMax, b9], "bandTime":[bandTime, b10], "den10":[den10, b11], "den50":[den50, b12], "den90":[den90, b13], "oppie":[oppie, b14], "fitMu":[fitMu, b15], "fitAmp":[fitAmp, b16], "fitSlo":[fitSlo, b17], "fitTau":[fitTau, b18], "fitBL":[fitBL,b19], "matchMax":[matchMax, b20], "matchWidth":[matchWidth, b21], "matchTime":[matchTime, b22], "pol0":[pol0, b23], "pol1":[pol1, b24], "pol2":[pol2, b25], "pol3":[pol3, b26], "fails":[fails,b27], "fitChi2":[fitChi2,b28], "fitLL":[fitLL,b29], "riseNoise":[riseNoise,b30], "t0_SLE":[t0_SLE,b31], "t0_ALE":[t0_ALE,b32], "lat":[lat,b33], "latF":[latF,b34], "latAF":[latAF,b35], "latFC":[latFC,b36], "latAFC":[latAFC,b37], "nMS":[nMS,b38], "tE50":[tE50,b39], "latE50":[latE50,b40], "wfStd":[wfStd,b41], "wfAvgBL":[wfAvgBL,b42], "wfRMSBL":[wfRMSBL,b43], "fitErr":[fitErr,b44] } # Make a figure (-i option: select different plots) # fig = plt.figure(figsize=(12,9), facecolor='w') fig = plt.figure() if plotNum==0 or plotNum==7 or plotNum==8: p0 = plt.subplot(111) # 0-raw waveform, 7-new trap filters elif plotNum==1 or plotNum==2: p0 = plt.subplot(211) # 1-wavelet, 2-time points, bandpass filters, tail slope p1 = plt.subplot(212) elif plotNum==3: p0 = plt.subplot2grid((2,5), (0,0), colspan=3) # oppie / freq-domain matched filter p1 = plt.subplot2grid((2,5), (0,3), colspan=2) p2 = plt.subplot2grid((2,5), (1,0), colspan=3) elif plotNum==4: p0 = plt.subplot(111) # time-domain matched filter elif plotNum==5: p0 = plt.subplot(111) # bandpass / bandTime elif plotNum==6: p0 = plt.subplot2grid((6,10), (0,0), colspan=10, rowspan=3) # waveform fit p1 = plt.subplot2grid((6,10), (3,0), colspan=10, rowspan=1) # residual p2 = plt.subplot2grid((6,10), (4,0), colspan=2, rowspan=2) # traces p3 = plt.subplot2grid((6,10), (4,2), colspan=2, rowspan=2) p4 = plt.subplot2grid((6,10), (4,4), colspan=2, rowspan=2) p5 = plt.subplot2grid((6,10), (4,6), colspan=2, rowspan=2) p6 = plt.subplot2grid((6,10), (4,8), colspan=2, rowspan=2) elif plotNum==9: p0 = plt.subplot2grid((5,1), (0,0)) # 9- wpt on wf fit residual p1 = plt.subplot2grid((5,1), (1,0), rowspan=2) p2 = plt.subplot2grid((5,1), (3,0), rowspan=2) if not batMode: plt.show(block=False) # Load a fast signal template - used w/ the freq-domain matched filter # print("Generating signal template ...") tSamp, tR, tZ, tAmp, tST, tSlo = 5000, 0, 15, 100, 2500, 10 # tOrig, tOrigTS = wl.MakeSiggenWaveform(tSamp,tR,tZ,tAmp,tST,tSlo) # Damn you to hell, PDSF templateFile = np.load("%s/data/lat_template.npz" % os.environ['LATDIR']) if dsNum==2 or dsNum==6: templateFile = np.load("%s/data/lat_ds2template.npz" % os.environ['LATDIR']) tOrig, tOrigTS = templateFile['arr_0'], templateFile['arr_1'] # Load stuff from DS1 forced acq. runs npzfile = np.load("%s/data/fft_forcedAcqDS1.npz" % os.environ['LATDIR']) noise_asd, noise_xFreq, avgPwrSpec, xPwrSpec, data_forceAcq, data_fft = npzfile['arr_0'],npzfile['arr_1'],npzfile['arr_2'],npzfile['arr_3'],npzfile['arr_4'],npzfile['arr_5'] # Loop over events print("Starting event loop ...") iList = -1 while True: iList += 1 if intMode==True and iList != 0: value = input() if value=='q': break # quit if value=='p': iList -= 2 # go to previous if (value.isdigit()): iList = int(value) # go to entry number elif intMode==False and batMode==False: plt.pause(0.00001) # rapid-draw mode if iList >= nList: break # bail out, goose! entry = gatTree.GetEntryNumber(iList); gatTree.LoadTree(entry) gatTree.GetEntry(entry) nChans = gatTree.channel.size() event = MGTEvent() if gatMode: event = bltTree.event # Reset all branch vectors # NOTE: The events sometimes contain 'straggler' hits that do not pass the # given TCut. This line sets ALL the new parameters to -88888 by default. # If you see this value in a plot, then you must be including hits that # passed the cut in wave-skim but did not pass the (different?) cut in LAT. for key in brDict: brDict[key][0].assign(nChans,-88888) brDict["fails"][0].assign(nChans,0) # set error code to 'true' by default errorCode = [0,0,0,0] # Loop over hits passing cuts numPass = gatTree.Draw("channel",theCut,"GOFF",1,iList) chans = gatTree.GetV1() chanList = list(set(int(chans[n]) for n in range(numPass))) hitList = (iH for iH in range(nChans) if gatTree.channel.at(iH) in chanList) # a 'generator expression' for iH in hitList: # ------------------------------------------------------------------------ # Waveform processing # load data run = gatTree.run chan = gatTree.channel.at(iH) dataENFCal = gatTree.trapENFCal.at(iH) dataENM = gatTree.trapENM.at(iH) dataTSMax = gatTree.trapENMSample.at(iH)*10. - 4000 wf = MGTWaveform() iEvent = 0 if gatMode: wf = event.GetWaveform(iH) iEvent = entry else: wf = gatTree.MGTWaveforms.at(iH) iEvent = gatTree.iEvent # print("%d: run %d chan %d trapENFCal %.2f" % (iList, run, chan, dataENFCal)) # be absolutely sure you're matching the right waveform to this hit if wf.GetID() != chan: print("ERROR -- Vector matching failed. iList %d run %d iEvent %d" % (iList,run,iEvent)) return # Let's start the show - grab a waveform. # Remove first 4 samples when we have multisampling # Remove last 2 samples to get rid of the ADC spike at the end of all wf's. truncLo, truncHi = 0, 2 if dsNum==6 or dsNum==2: truncLo = 4 signal = wl.processWaveform(wf,truncLo,truncHi) data = signal.GetWaveRaw() data_blSub = signal.GetWaveBLSub() dataTS = signal.GetTS() dataBL,dataNoise = signal.GetBaseNoise() # wavelet packet transform wp = pywt.WaveletPacket(data_blSub, 'db2', 'symmetric', maxlevel=4) nodes = wp.get_level(4, order='freq') wpCoeff = np.array([n.data for n in nodes],'d') wpCoeff = abs(wpCoeff) # wavelet parameters # First get length of wavelet on the time axis, the scale axis will always be the same # due to the number of levels in the wavelet wpLength = len(wpCoeff[1,:]) waveS1[iH] = np.sum(wpCoeff[0:1,1:wpLength//4+1]) # python3 : floor division (//) returns an int waveS2[iH] = np.sum(wpCoeff[0:1,wpLength//4+1:wpLength//2+1]) waveS3[iH] = np.sum(wpCoeff[0:1,wpLength//2+1:3*wpLength//4+1]) waveS4[iH] = np.sum(wpCoeff[0:1,3*wpLength//4+1:-1]) waveS5[iH] = np.sum(wpCoeff[2:-1,1:-1]) S6 = np.sum(wpCoeff[2:9,1:wpLength//4+1]) S7 = np.sum(wpCoeff[2:9,wpLength//4+1:wpLength//2+1]) S8 = np.sum(wpCoeff[2:9,wpLength//2+1:3*wpLength//4+1]) S9 = np.sum(wpCoeff[2:9,3*wpLength//4+1:-1]) S10 = np.sum(wpCoeff[9:,1:wpLength//4+1]) S11 = np.sum(wpCoeff[9:,wpLength//4+1:wpLength//2+1]) S12 = np.sum(wpCoeff[9:,wpLength//2+1:3*wpLength//4+1]) S13 = np.sum(wpCoeff[9:,3*wpLength//4+1:-1]) sumList = [S6, S7, S8, S9, S10, S11, S12, S13] bcMax[iH] = np.max(sumList) bcMin[iH] = 1. if np.min(sumList) < 1 else np.min(sumList) # reconstruct waveform w/ only lowest frequency. new_wp = pywt.WaveletPacket(data=None, wavelet='db2', mode='symmetric') new_wp['aaa'] = wp['aaa'].data data_wlDenoised = new_wp.reconstruct(update=False) # resize in a smart way diff = len(data_wlDenoised) - len(data_blSub) if diff > 0: data_wlDenoised = data_wlDenoised[diff:] # waveform high/lowpass filters - parameters are a little arbitrary B1,A1 = butter(2, [1e5/(1e8/2),1e6/(1e8/2)], btype='bandpass') data_bPass = lfilter(B1, A1, data_blSub) # used in the multisite tagger B2, A2 = butter(1, 0.08) data_filt = filtfilt(B2, A2, data_blSub) data_filtDeriv = wl.wfDerivative(data_filt) filtAmp = np.amax(data_filtDeriv) # scale the max to match the amplitude data_filtDeriv = data_filtDeriv * (dataENM / filtAmp) B3, A3 = butter(2,1e6/(1e8/2), btype='lowpass') data_lPass = lfilter(B3, A3, data_blSub) idx = np.where((dataTS > dataTS[0]+100) & (dataTS < dataTS[-1]-100)) windowingOffset = dataTS[idx][0] - dataTS[0] bandMax[iH] = np.amax(data_bPass[idx]) bandTime[iH] = dataTS[ np.argmax(data_bPass[idx])] - windowingOffset # timepoints of low-pass waveforms tpc = MGWFTimePointCalculator(); tpc.AddPoint(.2) tpc.AddPoint(.5) tpc.AddPoint(.9) mgtLowPass = wl.MGTWFFromNpArray(data_lPass) tpc.FindTimePoints(mgtLowPass) den10[iH] = tpc.GetFromStartRiseTime(0)*10 den50[iH] = tpc.GetFromStartRiseTime(1)*10 den90[iH] = tpc.GetFromStartRiseTime(2)*10 # ================ xgauss waveform fitting ================ amp, mu, sig, tau, bl = dataENM, dataTSMax, 600., -72000., dataBL floats = np.asarray([amp, mu, sig, tau, bl]) temp = xgModelWF(dataTS, floats) if not batMode: MakeTracesGlobal() # get the noise of the denoised wf denoisedNoise,_,_ = wl.baselineParameters(data_wlDenoised) # NOTE: fit is to wavelet-denoised data, BECAUSE there are no HF components in the model, # AND we'll still calculate fitChi2 w/r/t the data, not the denoised data. # datas = [dataTS, data, dataNoise] # fit data datas = [dataTS, data_wlDenoised + dataBL, denoisedNoise] # fit wavelet-denoised data w/ Bl added back in # Set bounds - A,mu,sig,tau,bl. # bnd = ((None,None),(None,None),(None,None),(None,None),(None,None)) # often gets caught at sig=0 bnd = ((None,None),(None,None),(2.,None),(-72001.,-71999.),(None,None)) # gets caught much less often. # L-BGFS-B with numerical gradient. start = time.clock() result = op.minimize(lnLike, floats, args=datas, method="L-BFGS-B", options=None, bounds=bnd) fitSpeed = time.clock() - start fitErr[iH] = 0 if not result["success"]: # print("fit fail: ", result["message"]) fitErr[iH] = 1 errorCode[0] = 1 amp, mu, sig, tau, bl = result["x"] # save parameters fitMu[iH], fitAmp[iH], fitSlo[iH], fitTau[iH], fitBL[iH] = mu, amp, sig, tau, bl floats = np.asarray([amp, mu, sig, tau, bl]) fit = xgModelWF(dataTS, floats) # print("%d/%d iH %d e %-10.2f fs %-8.2f f %d" % (iList, nList, iH, dataENFCal, fitSlo[iH], fitErr[iH])) # log-likelihood of this fit fitLL[iH] = result["fun"] # chi-square of this fit # Textbook is (observed - expected)^2 / expected, # but we'll follow MGWFCalculateChiSquare.cc and do (observed - expected)^2 / NDF. # NOTE: we're doing the chi2 against the DATA, though the FIT is to the DENOISED DATA. fitChi2[iH] = np.sum(np.square(data-fit)) / (len(data)-1)/dataNoise # get wavelet coeff's for rising edge only. normalize to bcMin # view this w/ plot 1 # find the window of rising edge fit_blSub = fit - bl fitMaxTime = dataTS[np.argmax(fit_blSub)] fitStartTime = dataTS[0] idx = np.where(fit_blSub < 0.1) if len(dataTS[idx] > 0): fitStartTime = dataTS[idx][-1] fitRiseTime50 = (fitMaxTime + fitStartTime)/2. # bcMin is 32 samples long in the x-direction. # if we make the window half as wide, it'll have the same # of coeff's as bcMin. # this is still 'cheating' since we're not summing over the same rows. numXRows = wpCoeff.shape[1] wpCtrRise = int((fitRiseTime50 - dataTS[0]) / (dataTS[-1] - dataTS[0]) * numXRows) wpLoRise = wpCtrRise - 8 if wpLoRise < 0: wpLoRise = 0 wpHiRise = wpCtrRise + 8 if wpHiRise > numXRows: wpHiRise = numXRows # sum all HF wavelet components for this edge. riseNoise[iH] = np.sum(wpCoeff[2:-1,wpLoRise:wpHiRise]) / bcMin[iH] # print("%d %d %d %d e %-5.2f bmax %-6.2f bmin %-6.2f mu %-5.2f a %-5.2f s %-5.2f bl %-5.2f rn %.2f" % (run,iList,iH,chan,dataENFCal,bcMax[iH],bcMin[iH],fitMu[iH],fitAmp[iH],fitSlo[iH],fitBL[iH],riseNoise[iH])) # ========================================================= # optimal matched filter (freq. domain) # we use the pysiggen fast template (not the fit result) to keep this independent of the wf fitter. # pull in the template, shift it, and make sure it's the same length as the data guessTS = tOrigTS - 15000. idx = np.where((guessTS > -5) & (guessTS < dataTS[-1])) guessTS, guess = guessTS[idx], tOrig[idx] if len(guess)!=len(data): if len(guess)>len(data): guess, guessTS = guess[0:len(data)], guessTS[0:len(data)] else: guess = np.pad(guess, (0,len(data)-len(guess)), 'edge') guessTS = np.pad(guessTS, (0,len(data)-len(guessTS)), 'edge') data_fft = np.fft.fft(data_blSub) # can also try taking fft of the low-pass data temp_fft = np.fft.fft(guess) datafreq = np.fft.fftfreq(data.size) * 1e8 power_vec = np.interp(datafreq, noise_xFreq, noise_asd) # load power spectra from file # Apply the filter optimal = data_fft * temp_fft.conjugate() / power_vec optimal_time = 2 * np.fft.ifft(optimal) # Normalize the output df = np.abs(datafreq[1] - datafreq[0]) # freq. bin size sigmasq = 2 * (temp_fft * temp_fft.conjugate() / power_vec).sum() * df sigma = np.sqrt(np.abs(sigmasq)) SNR = abs(optimal_time) / (sigma) oppie[iH] = np.amax(SNR) # time-domain matched filter. use the baseline-subtracted wf as data, and fit_blSub too. # make a longer best-fit waveform s/t it can be shifted L/R. matchTS = np.append(dataTS, np.arange(dataTS[-1], dataTS[-1] + 20000, 10)) # add 2000 samples match = xgModelWF(matchTS, [amp, mu+10000., sig, tau, bl]) # shift mu accordingly match = match[::-1] - bl # time flip and subtract off bl # line up the max of the 'match' (flipped wf) with the max of the best-fit wf # this kills the 1-1 matching between matchTS and dataTS (each TS has some offset) matchMaxTime = matchTS[np.argmax(match)] matchTS = matchTS + (fitMaxTime - matchMaxTime) # resize match, matchTS to have same # samples as data, dataTS. # this is the only case we really care about # ("too early" and "too late" also happen, but the shift is larger than the trigger walk, making it unphysical) if matchTS[0] <= dataTS[0] and matchTS[-1] >= dataTS[-1]: idx = np.where((matchTS >= dataTS[0]) & (matchTS <= dataTS[-1])) match, matchTS = match[idx], matchTS[idx] sizeDiff = len(dataTS)-len(matchTS) if sizeDiff < 0: match, matchTS = match[:sizeDiff], matchTS[:sizeDiff] elif sizeDiff > 0: match = np.hstack((match, np.zeros(sizeDiff))) matchTS = np.hstack((matchTS, dataTS[-1*sizeDiff:])) if len(match) != len(data): print("FIXME: match filter array manip is still broken.") # compute match filter parameters matchMax[iH], matchWidth[iH], matchTime[iH] = -888, -888, -888 if len(match)==len(data): smoothMF = gaussian_filter(match * data_blSub, sigma=5.) matchMax[iH] = np.amax(smoothMF) matchTime[iH] = matchTS[ np.argmax(smoothMF) ] idx = np.where(smoothMF > matchMax[iH]/2.) if len(matchTS[idx]>1): matchWidth[iH] = matchTS[idx][-1] - matchTS[idx][0] # Fit tail slope to polynomial. Guard against fit fails idx = np.where(dataTS >= fitMaxTime) tail, tailTS = data[idx], dataTS[idx] popt1,popt2 = 0,0 try: popt1,_ = op.curve_fit(wl.tailModelPol, tailTS, tail) pol0[iH], pol1[iH], pol2[iH], pol3[iH] = popt1[0], popt1[1], popt1[2], popt1[3] except: # print("curve_fit tailModelPol failed, run %i event %i channel %i" % (run, iList, chan)) errorCode[2] = 1 pass # ========================================================= # new trap filters. # params: t0_SLE, t0_ALE, lat, latF, latAF, latFC, latAFC # calculate trapezoids # standard trapezoid - prone to walking, less sensitive to noise. use to find energy eTrap = wl.trapFilter(data_blSub, 400, 250, 7200.) eTrapTS = np.arange(0, len(eTrap)*10., 10) eTrapInterp = interpolate.interp1d(eTrapTS, eTrap) # short trapezoid - triggers more quickly, sensitive to noise. use to find t0 sTrap = wl.trapFilter(data_blSub, 100, 150, 7200.) sTrapTS = np.arange(0, len(sTrap)*10., 10) # asymmetric trapezoid - used to find the t0 only aTrap = wl.asymTrapFilter(data_blSub, 4, 10, 200, True) # (0.04us, 0.1us, 2.0us) aTrapTS = np.arange(0, len(aTrap)*10., 10) # find leading edges (t0 times) # limit the range from 0 to 10us, and use an ADC threshold of 1.0 as suggested by DCR t0_SLE[iH],_ = wl.walkBackT0(sTrap, eTrapTS[-1]+7000-4000-2000, 1., 0, 1000) # (in ns) finds leading edge from short trap t0_ALE[iH],_ = wl.walkBackT0(aTrap, eTrapTS[-1]+7000-4000-2000, 1., 0, 1000) # (in ns) finds leading edge from asymmetric trap # standard energy trapezoid w/ a baseline padded waveform data_pad = np.pad(data_blSub,(200,0),'symmetric') pTrap = wl.trapFilter(data_pad, 400, 250, 7200.) pTrapTS = np.linspace(0, len(pTrap)*10, len(pTrap)) pTrapInterp = interpolate.interp1d(pTrapTS, pTrap) # calculate energy parameters # standard amplitude. basically trapEM, but w/o NL correction if the input WF doesn't have it. lat[iH] = np.amax(eTrap) # Calculate DCR suggested amplitude, using the 50% to the left and right of the maximum point t0_F50,t0fail1 = wl.walkBackT0(pTrap, thresh=lat[iH]*0.5, rmin=0, rmax=len(pTrap)-1) t0_B50,t0fail2 = wl.walkBackT0(pTrap, thresh=lat[iH]*0.5, rmin=0, rmax=len(pTrap)-1, forward=True) t0_E50 = (t0_F50 + t0_B50)/2.0 #TODO -- if it's necessary due to the trigger walk, we could potentially add a way to recursively increase the threshold until a timepoint is found, however it will still always fail for most noise events if not t0fail1 or not t0fail2: latE50[iH] = 0 # Set amplitude to 0 if one of the evaluations failed else: latE50[iH] = pTrapInterp(t0_E50) # Maybe I should call this latDCR50 to confuse people tE50[iH] = t0_B50 - t0_F50 # Save the difference between the middle points, can be used as a cut later # standard amplitude with t0 from the shorter traps # If either fixed pickoff time (t0) is < 0, use the first sample as the amplitude (energy). latF[iH] = eTrapInterp( np.amax([t0_SLE[iH]-7000+4000+2000, 0.]) ) # This should be ~trapEF latAF[iH] = eTrapInterp( np.amax([t0_ALE[iH]-7000+4000+2000, 0.]) ) # amplitude from padded trapezoid, with t0 from short traps and a correction function # function is under development. currently: f() = exp(p0 + p1*E), p0 ~ 7.8, p1 ~ -0.45 and -0.66 # functional walk back distance is *either* the minimum of the function value, or 5500 (standard value) # t0_corr = -7000+6000+2000 # no correction t0_corr = -7000+6000+2000 - np.amin([np.exp(7.8 - 0.45*lat[iH]),1000.]) t0A_corr = -7000+6000+2000 - np.amin([np.exp(7.8 - 0.66*lat[iH]),1000.]) latFC[iH] = pTrapInterp( np.amax([t0_SLE[iH] + t0_corr, 0.]) ) latAFC[iH] = pTrapInterp( np.amax([t0_ALE[iH] + t0A_corr, 0.]) ) # ========================================================= # the genius multisite event tagger - plot 8 # decide a threshold dIdx = np.argmax(data_filtDeriv) dMax = data_filtDeriv[dIdx] dRMS,_,_ = wl.baselineParameters(data_filtDeriv) # msThresh = np.amax([dMax * .2, dRMS * 5.]) # msThresh = dMax * .15 msThresh = 50. # I don't know. this seems like a good value # run peak detect algorithm maxtab,_ = wl.peakdet(data_filtDeriv, msThresh) # profit msList = [] for iMax in range(len(maxtab)): idx = int(maxtab[iMax][0]) val = maxtab[iMax][1] msList.append(dataTS[idx]) # print("%d idx %d TS %d val %.2f thresh %.2f" % (iList, idx, dataTS[idx], val, msThresh)) nMS[iH] = len(maxtab) # ========================================================= # wfStd analysis wfAvgBL[iH] = dataBL wfRMSBL[iH] = dataNoise wfStd[iH] = np.std(data[5:-5]) # ------------------------------------------------------------------------ # End waveform processing. # Calculate error code fails[iH] = 0 for i,j in enumerate(errorCode): if j==1: fails[iH] += int(j)<<i # print("fails:",fails[iH]) # Make plots! if batMode: continue if plotNum==0: # raw data p0.cla() p0.plot(dataTS,data,'b') p0.set_title("Run %d Entry %d Channel %d ENFCal %.2f" % (run,iList,chan,dataENFCal)) p0.set_xlabel("Time (ns)", ha='right', x=1.) p0.set_ylabel("Voltage (ADC)", ha='right', y=1.) if plotNum==1: # wavelet plot p0.cla() p0.margins(x=0) p0.plot(dataTS,data_blSub,color='blue',label='data (%.2f keV)' % dataENFCal) p0.plot(dataTS,data_wlDenoised,color='cyan',label='denoised',alpha=0.7) p0.axvline(fitRiseTime50,color='green',label='fit 50%',linewidth=2) p0.plot(dataTS,fit_blSub,color='red',label='bestfit',linewidth=2) # p0.set_title("Run %d Entry %d Channel %d ENFCal %.2f flo %.0f fhi %.0f fhi-flo %.0f" % (run,iList,chan,dataENFCal,fitStartTime,fitMaxTime,fitMaxTime-fitStartTime)) p0.legend(loc='best') p0.set_xlabel("Time (ns)", ha='right', x=1.) p0.set_ylabel("Voltage (ADC)", ha='right', y=1.) p1.cla() p1.imshow(wpCoeff, interpolation='nearest', aspect="auto", origin="lower",extent=[0, 1, 0, len(wpCoeff)],cmap='viridis') p1.axvline(float(wpLoRise)/numXRows,color='orange',linewidth=2) p1.axvline(float(wpHiRise)/numXRows,color='orange',linewidth=2) # p1.set_title("waveS5 %.2f bcMax %.2f bcMin %.2f riseNoise %.2f" % (waveS5[iH], bcMax[iH], bcMin[iH], riseNoise[iH])) # p1.set_xlabel("Time (%wf)", ha='right', x=1.) p1.set_ylabel("WPT Coefficients", ha='right', y=1.) if plotNum==2: # time points, bandpass filters, tail slope p0.cla() p0.plot(dataTS,data,color='blue',label='data') p0.axvline(den10[iH],color='black',label='lpTP') p0.axvline(den50[iH],color='black') p0.axvline(den90[iH],color='black') p0.plot(dataTS,fit,color='magenta',label='bestfit') if errorCode[2]!=1: p0.plot(tailTS, wl.tailModelPol(tailTS, *popt1), color='orange',linewidth=2, label='tailPol') p0.legend(loc='best') p0.set_title("Run %d Entry %d Channel %d ENFCal %.2f" % (run,iEvent,chan,dataENFCal)) p1.cla() p1.plot(dataTS,data_lPass,color='blue',label='lowpass') p1.plot(dataTS,data_filtDeriv,color='green',label='filtDeriv') p1.plot(dataTS,data_filt,color='black',label='filtfilt') p1.plot(dataTS,data_bPass,color='red',label='bpass') p1.axvline(bandTime[iH],color='orange',label='bandTime') p1.legend(loc='best') if plotNum==3: # freq-domain matched filter p0.cla() p0.plot(dataTS,data,'b') p0.plot(dataTS,temp,'r') p0.plot(dataTS,fit,color='cyan') p0.set_title("Run %d Entry %d Channel %d ENFCal %.2f" % (run,iEvent,chan,dataENFCal)) data_asd, data_xFreq = plt.psd(data, Fs=1e8, NFFT=2048, pad_to=2048, visible=False) temp_asd, temp_xFreq = plt.psd(temp, Fs=1e8, NFFT=2048, pad_to=2048, visible=False) p1.cla() p1.loglog(data_xFreq, np.sqrt(data_asd), 'b') p1.loglog(noise_xFreq, np.sqrt(noise_asd), 'g') p1.loglog(temp_xFreq, np.sqrt(temp_asd), 'r') p1.set_xlabel('Frequency (Hz)') p1.set_ylabel('ASD') p1.grid('on') p2.cla() p2.plot(dataTS, SNR) p2.set_title('oppie %.1f' % (oppie[iH])) p2.set_xlabel('Offset time (s)') p2.set_ylabel('SNR') if plotNum==4: # time domain match filter plot p0.cla() p0.plot(dataTS,data_blSub,color='blue',label='data',alpha=0.7) p0.plot(dataTS,fit_blSub,color='red',label='bestfit',linewidth=3) p0.axvline(matchTime[iH],color='orange',label='matchTime',linewidth=2) p0.plot(matchTS,smoothMF,color='magenta',label='smoothMF',linewidth=3) p0.plot(matchTS,match,color='cyan',label='match',linewidth=3) p0.set_xlabel('Time (s)') p0.set_ylabel('Voltage (arb)') p0.legend(loc='best') p0.set_title("Run %d Entry %d Channel %d ENFCal %.2f matchMax %.2f matchTime %.2f matchWidth %.2f" % (run,iEvent,chan,dataENFCal,matchMax[iH],matchTime[iH],matchWidth[iH])) if plotNum==5: # bandTime plot p0.cla() p0.plot(dataTS,data_blSub,color='blue',label='data',alpha=0.7) p0.plot(dataTS,data_lPass,color='magenta',label='lowpass',linewidth=4) p0.plot(dataTS,data_bPass,color='red',label='bpass',linewidth=4) p0.axvline(bandTime[iH],color='orange',label='bandTime',linewidth=4) p0.legend(loc='best') p0.set_xlabel('Time (ns)') p0.set_ylabel('ADC (arb)') p0.set_title("Run %d Entry %d Channel %d ENFCal %.2f" % (run,iEvent,chan,dataENFCal)) if plotNum==6: # waveform fit plot p0.cla() p0.plot(dataTS,data,color='blue',label='data') # p0.plot(dataTS,data_wlDenoised,color='cyan',label='wlDenoised',alpha=0.5) p0.plot(dataTS,temp,color='orange',label='xgauss guess') p0.plot(dataTS,fit,color='red',label='xgauss fit') p0.set_title("Run %d evt %d chan %d trapENFCal %.1f trapENM %.1f deltaBL %.1f\n amp %.2f mu %.2f sig %.2f tau %.2f chi2 %.2f spd %.3f" % (run,iList,chan,dataENFCal,dataENM,dataBL-bl,amp,mu,sig,tau,fitChi2[iH],fitSpeed)) p0.legend(loc='best') p1.cla() p1.plot(dataTS,data-fit,color='blue',label='residual') p1.legend(loc='best') p2.cla() p2.plot(ampTr[1:],label='amp',color='red') p2.legend(loc='best') p3.cla() p3.plot(muTr[1:],label='mu',color='green') p3.legend(loc='best') p4.cla() p4.plot(sigTr[1:],label='sig',color='blue') p4.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e')) p4.legend(loc='best') p5.cla() p5.plot(tauTr[1:],label='tau',color='black') p5.legend(loc='best') p6.cla() p6.plot(blTr[1:],label='bl',color='magenta') p6.legend(loc='best') print(gatTree.fitSlo.at(iH), sig) if plotNum==7: # new traps plot p0.cla() p0.plot(dataTS, data_blSub, color='blue', label='data') p0.plot(sTrapTS, sTrap, color='red', label='sTrap') p0.axvline(t0_SLE[iH], color='red') p0.plot(aTrapTS, aTrap, color='orange', label='aTrap') p0.axvline(t0_ALE[iH], color='orange') p0.plot(eTrapTS, eTrap, color='green', label='eTrap') p0.axhline(lat[iH],color='green') p0.plot(pTrapTS, pTrap, color='magenta', label='pTrap') p0.axhline(latAFC[iH], color='magenta') p0.axhline(latE50[iH], color='cyan') p0.set_title("trapENFCal %.2f trapENM %.2f || latEM %.2f latEF %.2f latEAF %.2f latEFC %.2f latEAFC %.2f latE50 %.2f" % (dataENFCal,dataENM,lat[iH],latF[iH],latAF[iH],latFC[iH],latAFC[iH], latE50[iH])) p0.legend(loc='best') if plotNum==8: # multisite tag plot p0.cla() p0.plot(dataTS, data_blSub, color='blue', label='data') p0.plot(dataTS, data_filtDeriv, color='red', label='filtDeriv') for mse in msList: p0.axvline(mse, color='green') p0.axhline(msThresh,color='red') p0.legend() if plotNum==9: # wavelet vs wf fit residual plot # wavelet packet transform on wf fit residual fitResid = data-fit wpRes = pywt.WaveletPacket(fitResid, 'db2', 'symmetric', maxlevel=4) nodesRes = wpRes.get_level(4, order='freq') wpCoeffRes = np.array([n.data for n in nodesRes], 'd') wpCoeffRes = abs(wpCoeffRes) R6 = np.sum(wpCoeffRes[2:9,1:wpLength//4+1]) R7 = np.sum(wpCoeffRes[2:9,wpLength//4+1:wpLength//2+1]) R8 = np.sum(wpCoeffRes[2:9,wpLength//2+1:3*wpLength//4+1]) R9 = np.sum(wpCoeffRes[2:9,3*wpLength//4+1:-1]) R10 = np.sum(wpCoeffRes[9:,1:wpLength//4+1]) R11 = np.sum(wpCoeffRes[9:,wpLength//4+1:wpLength//2+1]) R12 = np.sum(wpCoeffRes[9:,wpLength//2+1:3*wpLength//4+1]) R13 = np.sum(wpCoeffRes[9:,3*wpLength//4+1:-1]) RsumList = [R6, R7, R8, R9, R10, R11, R12, R13] bcMinRes = 1. if np.min(RsumList) < 1 else np.min(RsumList) riseNoiseRes = np.sum(wpCoeffRes[2:-1,wpLoRise:wpHiRise]) / bcMinRes rnCut = 1.1762 + 0.00116 * np.log(1 + np.exp((dataENFCal-7.312)/0.341)) p0.cla() p0.margins(x=0) p0.plot(dataTS,data_blSub,color='blue',label='data') # p0.plot(dataTS,data_wlDenoised,color='cyan',label='denoised',alpha=0.7) # p0.axvline(fitRiseTime50,color='green',label='fit 50%',linewidth=2) p0.plot(dataTS,fit_blSub,color='red',label='bestfit',linewidth=2) # p0.set_title("Run %d Entry %d Channel %d ENFCal %.2f flo %.0f fhi %.0f fhi-flo %.0f" % (run,iList,chan,dataENFCal,fitStartTime,fitMaxTime,fitMaxTime-fitStartTime)) # p0.legend(loc='best') p0.set_title("Run %d Entry %d Channel %d ENFCal %.2f flo %.0f fhi %.0f fhi-flo %.0f approxFitE %.2f" % (run,iList,chan,dataENFCal,fitStartTime,fitMaxTime,fitMaxTime-fitStartTime,fitAmp[iH]*0.4)) p1.cla() p1.plot(dataTS,fitResid,color='blue') p2.cla() p2.set_title("riseNoise %.2f rnCut %.2f riseNoiseRes %.2f bcMinRes %.2f bcMin %.2f max %.2f" % (riseNoise[iH],rnCut,riseNoiseRes,bcMinRes,bcMin[iH],wpCoeffRes.max())) p2.imshow(wpCoeffRes, interpolation='nearest', aspect="auto", origin="lower",extent=[0, 1, 0, len(wpCoeff)],cmap='viridis') plt.tight_layout() plt.pause(0.000001) # ------------------------------------------------------------------------ # End loop over hits, fill branches if batMode: for key in brDict: brDict[key][1].Fill() if iList%5000 == 0 and iList!=0: out.Write("",TObject.kOverwrite) print("%d / %d entries saved (%.2f %% done), time: %s" % (iList,nList,100*(float(iList)/nList),time.strftime('%X %x %Z'))) # End loop over events if batMode and not intMode: out.Write("",TObject.kOverwrite) print("Wrote",out.GetBranch("channel").GetEntries(),"entries in the copied tree,") print("and wrote",b1.GetEntries(),"entries in the new branches.") stopT = time.clock() print("Stopped:",time.strftime('%X %x %Z'),"\nProcess time (min):",(stopT - startT)/60) print(float(nList)/((stopT-startT)/60.),"entries per minute.")
'''RObjs['headerText'] = drawTitle(title) #Create pad to hold all plots. body = TPad("MainPad", "My main pad", 0, 0, 1, 0.9) body.Draw() halfSize = (int) (len(numReadsList)/2) body.Divide(halfSize,halfSize) i=1 for reads in numReadsList: body.cd(i) RObjs["graph%i" %i] = drawNext(reads) i+=1 ''' constraints = "ConcurrentReads==%i" % 6 numPoints = allData.Draw("VelocityMBps:BufferSize", constraints, "goff") v2 = allData.GetV2() v1 = allData.GetV1() print("not broken") graph = TGraph(numPoints, v2, v1) print("did we break yet?") graph.SetTitle("%i Concurrent Reads" % reads) graph.SetXTitle("Read Velocity (MB/s)") graph.Draw("ap") BG.SaveAs("%s.png" % data_file.split('.output')[0])
def test3(): ds = [(50, 'Jan05a_50mV.dat'), (100, 'Jan05a_100mV.dat'), (150, 'Jan05a_150mV.dat'), (250, 'Jan05a_250mV.dat'), (400, 'Jan05a_400mV.dat')] chs = [12, 8, 1, 4, 6, 9, 13,15] inspectCh(ds,chs) return # pars = array('d',[0]*3) # errs = array('d',[0]*3) # print(max(chs)) # return nch = max(chs)+1 g1s = [None]*nch g2s = [None]*nch g1M = -1 g2M = -1 for d in ds: tr = TTree() tr.ReadFile(d[1]) for ch in chs: gDirectory.DeleteAll('h1*') tr.Draw('A>>h1','ch=={0:d}'.format(ch),'goff') h1 = gDirectory.Get('h1') h1.Fit('gaus') fun1 = h1.GetFunction('gaus') fun1.SetLineColor(4) errs = fun1.GetParErrors() pars = fun1.GetParameters() # print(d[0],'--'*20) # print(pars[0],errs[0]) # print(pars[1],errs[1]) # print(pars[2],errs[2]) if g1s[ch] is None: g1s[ch] = TGraphErrors() g2s[ch] = TGraphErrors() g1 = g1s[ch] g2 = g2s[ch] n = g1.GetN() g1.SetPoint(n, d[0]*7, pars[1]) g1.SetPointError(n, 0, errs[1]) g2.SetPoint(n, d[0]*7, pars[2]*d[0]*7/pars[1]) g2.SetPointError(n, 0, errs[2]*d[0]*7/pars[1]) if pars[1]+errs[1]>g1M: g1M = pars[1]+errs[1] t = (pars[2]+errs[2])*d[0]*7/pars[1] if t>g2M: g2M = t opt = 'A' h1x = None for g1 in g1s: if g1 is None: continue g1.Draw('PL PMC PLC'+opt) opt = ' same' if h1x is None: h1x = g1.GetHistogram() h1x.GetYaxis().SetRangeUser(0, g1M*1.1) h1x.GetYaxis().SetTitle("U_{Out} [V]") h1x.GetXaxis().SetTitle("N_{Sig} [e^{-}]") waitRootCmdX() opt = 'A' h2x = None for g2 in g2s: if g2 is None: continue g2.Draw('PL PMC PLC'+opt) opt = ' same' if h2x is None: h2x = g2.GetHistogram() h2x.GetYaxis().SetRangeUser(0, g2M*1.1) h2x.GetYaxis().SetTitle("ENC [e^{-}]") h2x.GetXaxis().SetTitle("N_{Sig} [e^{-}]") waitRootCmdX()
def MakeHistsToFit2(file, f_out_str): E_arr = array('d', []) E_arr_err = array('d', []) effic_arr = array('d', []) effic_arr_err = array('d', []) c1 = TCanvas("c1", "c1", 1600, 900) c1.SetGridx(1) c1.SetGridy(1) #Find which energy files exist, create file list and energy array f_wogamma_list = [] curr_E_val = 0.1 while curr_E_val <= 3.0: normfile = REL_DIR + "tree_pi+pi-p_nofit_" + str(curr_E_val) + ".root" if (os.path.isfile(normfile)): E_arr.append(curr_E_val) E_arr_err.append(0) f_wogamma_list.append(TFile.Open(normfile, 'read')) # print "File found!!!" # print "Was looking for file: " + normfile # print "Current E val: " + str(curr_E_val) # else: # print "File not found!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # print "Was looking for file: " + normfile # print "Current E val: " + str(curr_E_val) curr_E_val += 0.05 #Get normalization factor norm_arr = array('d', []) for i in range(0, len(f_wogamma_list)): my_tr = TTree() my_tr = f_wogamma_list[i].Get("pi+pi-p_nofit_Tree") norm_arr.append(my_tr.GetEntries()) # print "Energy: " + str(E_arr[i]) # print "Normalization: " + str(norm_arr[i]) #Create and fit histograms in larger file f = TFile.Open(file, 'read') my_tr = TTree() my_tr = f.Get("gamma_pi+pi-p_nofit_Tree") h_DeltaPhi_list = [] h_DeltaTheta_list = [] h_EGammaPostCuts_list = [] for i in range(0, len(E_arr)): h_DeltaPhi_curr = TH1F() my_tr.Draw("DeltaPhi>>h_DeltaPhi_curr(1000,-180.,180.)", "") h_DeltaPhi_curr = gPad.GetPrimitive("h_DeltaPhi_curr") h_DeltaPhi_curr.SetName("h_DeltaPhi_" + str(E_arr[i])) h_DeltaPhi_list.append(h_DeltaPhi_curr) h_DeltaTheta_curr = TH1F() my_tr.Draw("DeltaTheta>>h_DeltaTheta_curr(1000,-25.,10.)", "") h_DeltaTheta_curr = gPad.GetPrimitive("h_DeltaTheta_curr") h_DeltaTheta_curr.SetName("h_DeltaTheta_" + str(E_arr[i])) h_DeltaTheta_list.append(h_DeltaTheta_curr) h_EGammaPostCuts_curr = TH1F() E_cut_str = "abs(ThrownE-" + str(E_arr[i]) + ")<0.0001" print "Cut string: " + E_cut_str my_tr.Draw("FoundE>>h_EGammaPostCuts_curr(1000,0,4)", "abs(DeltaPhi)<2.&&abs(DeltaTheta)<0.5&&" + E_cut_str) h_EGammaPostCuts_curr = gPad.GetPrimitive("h_EGammaPostCuts_curr") h_EGammaPostCuts_curr.SetName("h_EGammaPostCuts_" + str(E_arr[i])) h_EGammaPostCuts_list.append(h_EGammaPostCuts_curr) my_gaus_fit = TF1("my_gaus_fit", "gausn") my_gaus_fit.SetParLimits(0, 0, 30000) my_gaus_fit.SetParameter(1, E_arr[i]) my_gaus_fit.SetParLimits(1, E_arr[i] - 0.5, E_arr[i] + 0.1) my_gaus_fit.SetParLimits(2, 0.02, 0.5) my_gaus_fit.SetNpx(1000) h_EGammaPostCuts_curr.Fit(my_gaus_fit, "Q", "", E_arr[i] - 0.3, E_arr[i] + 0.2) c1.SaveAs(".plots/ReactionFilterFit_pipip_" + str(E_arr[i]) + ".png") effic = my_gaus_fit.GetParameter( 0) / h_EGammaPostCuts_curr.GetBinWidth(0) / norm_arr[i] effic_err = my_gaus_fit.GetParError( 0) / h_EGammaPostCuts_curr.GetBinWidth(0) / norm_arr[i] if (E_arr[i] < 0.2): effic = h_EGammaPostCuts_curr.GetEntries() / norm_arr[i] effic_err = sqrt(h_EGammaPostCuts_curr.GetEntries()) / norm_arr[i] if (h_EGammaPostCuts_curr.GetEntries() < 100.): effic = -1 effic_err = 0 print "File Entries: " + str(my_tr.GetEntries()) print "Histogram Entries: " + str(h_EGammaPostCuts_curr.GetEntries()) print "Gaussian yield: " + str( my_gaus_fit.GetParameter(0) / h_EGammaPostCuts_curr.GetBinWidth(0)) print "Normalization: " + str(norm_arr[i]) print "Efficiency: " + str(effic) effic_arr.append(effic) effic_arr_err.append(effic_err) gr_gauscore_effic = TGraphErrors(len(E_arr), E_arr, effic_arr, E_arr_err, effic_arr_err) gr_gauscore_effic.SetName("gr_gauscore_effic") f_out = TFile(f_out_str, "RECREATE") gr_gauscore_effic.Write() for i in range(0, len(E_arr)): h_DeltaPhi_list[i].Write() for i in range(0, len(E_arr)): h_DeltaTheta_list[i].Write() for i in range(0, len(E_arr)): h_EGammaPostCuts_list[i].Write() f_out.Close() # curr_E_val = 0.1 # curr_E_val = 2.0 # while curr_E_val <= 3.: # while curr_E_val <= 2.2: # print "Curr E thing " + str(curr_E_val) # normfile = REL_DIR+"tree_pi+pi-p_nofit_"+str(curr_E_val)+".root" # my_tr2 = TTree() # my_tr2 = f_wogamma_list[i].Get("pi+pi-p_nofit_Tree") # NormalizationFactor = my_tr2.GetEntries() # curr_E_val+=0.05 return
def MakeHistsToFit(file, MIN_E, MAX_E, E_STEP, f_out_str): E_arr = array('d', []) c1 = TCanvas("c1", "c1", 1600, 900) c1.SetGridx(1) c1.SetGridy(1) norm_arr = array('d', []) curr_E_val = MIN_E while curr_E_val <= MAX_E + 0.0001: E_arr.append(curr_E_val) curr_E_val += E_STEP #Create and fit histograms in larger file f = TFile.Open(file, 'read') my_tr = TTree() my_tr = f.Get(WITH_GAMMA_TREENAME) h_DeltaPhi_list = [] #List of deltaPhi histograms to save h_DeltaTheta_list = [] #List of deltaTheta histograms to save h_EGammaPreCuts_list = [] #List of E_gamma histograms to save h_EGammaPostCuts_list = [] #List of E_gamma histograms to save h_EThrown_list = [] #List of E_gamma histograms to save num_tree_entries = float(my_tr.GetEntries()) num_branch_entries = my_tr.Draw("ThrownE", "") norm_scale_factor = num_branch_entries / num_tree_entries # print "Num tree entries: " + str(num_tree_entries) # print "Num branch entries: " + str(num_branch_entries) print "Need to scale by: " + str(norm_scale_factor) for i in range(0, len(E_arr)): E_cut_str = "" if (NORMALIZE_EXTERNAL): E_cut_str = "abs(ThrownE-" + str(E_arr[i]) + ")<0.0001" if (not NORMALIZE_EXTERNAL): E_cut_str = str(E_arr[i] - E_STEP / 2.) + "<ThrownE&&ThrownE<" + str( E_arr[i] + E_STEP / 2.) + "&&ThrownTheta<9&&ThrownTheta>4" print "Cut string: " + E_cut_str #Make DeltaPhi and DeltaTheta histograms h_DeltaPhi_curr = TH1F() my_tr.Draw("DeltaPhi>>h_DeltaPhi_curr(1000,-180.,180.)", E_cut_str) h_DeltaPhi_curr = gPad.GetPrimitive("h_DeltaPhi_curr") if (NORMALIZE_EXTERNAL): h_DeltaPhi_curr.SetNameTitle("h_DeltaPhi_" + str(E_arr[i]), "h_DeltaPhi_" + str(E_arr[i])) if (not NORMALIZE_EXTERNAL): h_DeltaPhi_curr.SetNameTitle("h_DeltaPhi_" + E_cut_str, "h_DeltaPhi_" + E_cut_str) h_DeltaPhi_list.append(h_DeltaPhi_curr) print "h_DeltaPhi_curr hist entries:" + str( h_DeltaPhi_curr.GetEntries()) h_DeltaTheta_curr = TH1F() my_tr.Draw("DeltaTheta>>h_DeltaTheta_curr(1000,-25.,10.)", E_cut_str) h_DeltaTheta_curr = gPad.GetPrimitive("h_DeltaTheta_curr") if (NORMALIZE_EXTERNAL): h_DeltaTheta_curr.SetNameTitle("h_DeltaTheta_" + str(E_arr[i]), "h_DeltaTheta_" + str(E_arr[i])) if (not NORMALIZE_EXTERNAL): h_DeltaTheta_curr.SetNameTitle("h_DeltaTheta_" + E_cut_str, "h_DeltaTheta_" + E_cut_str) h_DeltaTheta_list.append(h_DeltaTheta_curr) print "h_DeltaTheta_curr hist entries:" + str( h_DeltaTheta_curr.GetEntries()) h_EGammaPreCuts_curr = TH1F() my_tr.Draw( "FoundE>>h_EGammaPreCuts_curr(1000,0," + str(MAX_E + 1) + ")", E_cut_str) h_EGammaPreCuts_curr = gPad.GetPrimitive("h_EGammaPreCuts_curr") if (NORMALIZE_EXTERNAL): h_EGammaPreCuts_curr.SetNameTitle( "h_EGammaPreCuts_" + str(E_arr[i]), "h_EGammaPreCuts_" + str(E_arr[i])) if (not NORMALIZE_EXTERNAL): h_EGammaPreCuts_curr.SetNameTitle("h_EGammaPreCuts_" + E_cut_str, "h_EGammaPreCuts_" + E_cut_str) h_EGammaPreCuts_list.append(h_EGammaPreCuts_curr) print "h_EGammaPreCuts_curr hist entries:" + str( h_EGammaPreCuts_curr.GetEntries()) h_EGammaPostCuts_curr = TH1F() my_tr.Draw( "FoundE>>h_EGammaPostCuts_curr(1000,0," + str(MAX_E + 1) + ")", "abs(DeltaPhi)<" + DELTA_PHI_CUT + "&&abs(DeltaTheta)<" + DELTA_THETA_CUT + "&&" + E_cut_str) h_EGammaPostCuts_curr = gPad.GetPrimitive("h_EGammaPostCuts_curr") if (NORMALIZE_EXTERNAL): h_EGammaPostCuts_curr.SetNameTitle( "h_EGammaPostCuts_" + str(E_arr[i]), "h_EGammaPostCuts_" + str(E_arr[i])) if (not NORMALIZE_EXTERNAL): h_EGammaPostCuts_curr.SetNameTitle("h_EGammaPostCuts_" + E_cut_str, "h_EGammaPostCuts_" + E_cut_str) h_EGammaPostCuts_curr.SetTitle("E_{#gamma} thrown = " + str(E_arr[i])) h_EGammaPostCuts_curr.GetXaxis().SetTitle("E_{#gamma}") h_EGammaPostCuts_curr.GetYaxis().SetTitle("Yield") h_EGammaPostCuts_list.append(h_EGammaPostCuts_curr) print "h_EGammaPostCuts_curr hist entries:" + str( h_EGammaPostCuts_curr.GetEntries()) h_EThrown_curr = TH1F() # my_tr.Draw("ThrownE>>h_EThrown_curr(1000,0,10",E_cut_str) my_tr.Draw("MCWeight>>h_EThrown_curr(1000,-10,10", E_cut_str) h_EThrown_curr = gPad.GetPrimitive("h_EThrown_curr") if (NORMALIZE_EXTERNAL): h_EThrown_curr.SetNameTitle("h_EThrown_" + str(E_arr[i]), "h_EThrown_" + str(E_arr[i])) if (not NORMALIZE_EXTERNAL): h_EThrown_curr.SetNameTitle("h_EThrown_" + E_cut_str, "h_EThrown_" + E_cut_str) h_EThrown_curr.SetTitle("E_{#gamma} thrown = " + str(E_arr[i])) h_EThrown_curr.GetXaxis().SetTitle("E_{#gamma}") h_EThrown_curr.GetYaxis().SetTitle("Yield") h_EThrown_list.append(h_EThrown_curr) print "h_EThrown_curr hist entries:" + str(h_EThrown_curr.GetEntries()) #Save to file f_out = TFile(f_out_str, "RECREATE") # gr_gauscore_effic.Write() for i in range(0, len(E_arr)): h_EGammaPostCuts_list[i].Write() for i in range(0, len(E_arr)): h_DeltaPhi_list[i].Write() for i in range(0, len(E_arr)): h_DeltaTheta_list[i].Write() for i in range(0, len(E_arr)): h_EGammaPreCuts_list[i].Write() for i in range(0, len(E_arr)): h_EThrown_list[i].Write() f_out.Close() return norm_scale_factor
cx = 1600 cy = 800 #if label is not None: # cy = int(1.05*cy) canv = TCanvas("PlotCalib", "plotCalib", 0, 0, cx, cy) if opt2d: hhh = TH2D("hhh", "hhh", modmax - modmin + 1, modmin - 0.5, modmax + 0.5, chanmax - chanmin + 1, chanmin - 0.5, chanmax + 0.5) if not many and not opt2d: print("Plotting", data) print("With cut", cut_cond) if vsbin: tree.Draw(data, cut_cond, "goff", 15) else: tree.Draw(data, cut_cond, "goff") if tree.GetSelectedRows() <= 0: print("Not enough points to plot") sys.exit(2) if vsbin: if tree.GetSelectedRows() >= 15: print("Maximum number of bins is 15") if line or noline: if many: first = True color = 1
histogram.SetXTitle( "Invariant mass of #mu^+mu^- pair(GeV/C^2)#rightarrow") histogram.SetYTitle(ytitle) histogram.GetYaxis().SetTitleOffset(1.4) histogram.GetYaxis().CenterTitle() histogram.GetXaxis().CenterTitle() # master_canvas.append(canvas) #histograms.append(histogram) #master_canvas[i].cd() canvas.cd() tree.Draw( "fRecPair_ParentMass>>histogram", cut[i], "e") #need to define the cut parameter which will be on cos theta histogram.SetDirectory(0) #tail_func.FixParameter(1,tail_func_parammeters[1]) #tail_func.FixParameter(2,tail_func_parammeters[2]) #tail_func.FixParameter(3,tail_func_parammeters[3]) #par 3 = jpsi n and par 9 = psin #par #sig_func.FixParameter() #histogram.Fit(ft.tail_func,"NQR") #histogram.Fit(ft.sig_func,"NQR+") #ft.comb_func.SetParameters(ft.sig_func.GetParameter(0),ft.sig_func.GetParameter(1),ft.sig_func.GetParameter(2),ft.sig_func.GetParameter(3),ft.tail_func.GetParameter(0),ft.tail_func.GetParameter(1),ft.tail_func.GetParameter(6),ft.tail_func.GetParameter(3)) # for comb function n1 = par3 +n2 = par9 a1 = par 2 a2 = par 10
def main(argv): """ Matplotlib animation tutorial: http://jakevdp.github.io/blog/2012/08/18/matplotlib-animation-tutorial/ Requires ffmpeg. (brew install ffmpeg) """ chans = [1176, 672, 648, 660] chan = chans[0] dummyTree = ROOT.TChain("skimTree") # dummyTree.Add("/global/homes/w/wisecg/project/cal/lat/latSkimDS5_run2549*.root") # dummyTree.Add("/global/homes/w/wisecg/project/cal/lat/latSkimDS5_run2550*.root") dummyTree.Add( "/global/homes/w/wisecg/project/bkg/waves/waveSkimDS6_*.root") print("Found %d entries." % (dummyTree.GetEntries())) theCut = "trapENFCal > 2000 && trapENFCal < 2200 && channel==%d && avse>-1" % chan gatTree = TTree() gatTree = dummyTree.CopyTree(theCut) outFile = "../plots/movie-5c-%d.mp4" % chan # Print cut and events passing cut print("Using cut:\n", theCut, "\n") gatTree.Draw(">>elist", theCut, "entrylist") elist = gDirectory.Get("elist") gatTree.SetEntryList(elist) nList = elist.GetN() print("Found", nList, "entries passing cuts.") nWFLimit = 1000 if nList > 1000 else nList # First set up the figure, the axis, and the plot element we want to animate fig = plt.figure(figsize=(20, 10), facecolor='w') fig.set_size_inches(20, 10, True) a1 = plt.subplot(111) a1.set_xlabel("Time (ns)") a1.set_ylabel("ADC") p1, = a1.plot(np.ones(1), np.ones(1), color='blue') # initialization function: plot the background of each frame def init(): p1.set_data([], []) return p1, # animation function. This is called sequentially (it's the loop over events.) def animate(iList): entry = gatTree.GetEntryNumber(iList) gatTree.LoadTree(entry) gatTree.GetEntry(entry) nChans = gatTree.channel.size() numPass = gatTree.Draw("channel", theCut, "GOFF", 1, iList) chans = gatTree.GetV1() chanList = list(set(int(chans[n]) for n in range(numPass))) # Loop over hits passing cuts hitList = (iH for iH in range(nChans) if gatTree.channel.at(iH) in chanList ) # a 'generator expression' for iH in hitList: wf = gatTree.MGTWaveforms.at(iH) iEvent = entry run = gatTree.run chan = gatTree.channel.at(iH) energy = gatTree.trapENFCal.at(iH) dcr = gatTree.dcr99.at(iH) # fs = gatTree.fitSlo.at(iH) # rn = gatTree.riseNoise.at(iH) signal = wl.processWaveform(wf) waveRaw = signal.GetWaveRaw() waveTS = signal.GetTS() baseline = np.sum(waveRaw[:50]) / 50 # fill the figure p1.set_ydata(waveRaw) p1.set_xdata(waveTS) # plt.title("Run %d Ch %d Entry %d trapENFCal %.1f fitSlo %.2f riseNoise %.2f" % (run,chan,iList,energy, fs, rn)) plt.title("Run %d Ch %d Entry %d trapENFCal %.1f dcr99 %.8f" % (run, chan, iList, energy, dcr)) # dynamically scale the axes xmin, xmax = np.amin(waveTS), np.amax(waveTS) # a1.set_xlim([xmin,xmax]) a1.set_xlim([9000, xmax]) # if energy > 1000: # a1.set_ylim(0,1000) # else: ymin, ymax = np.amin(waveRaw), np.amax(waveRaw) # a1.set_ylim([ymin-abs(0.1*ymin),ymax+abs(0.1*ymax)]) a1.set_ylim([ymax - abs(0.35 * ymax), ymax + abs(0.2 * ymax)]) # a1.set_ylim([5000,6000]) # print(progress) # print("%d / %d Run %d nCh %d chan %d trapE %.1f samp %d" % (iList,nList,run,nChans,chan,energy,wf.GetLength())) if iList % 500 == 0 and iList != 0: print("%d / %d entries saved (%.2f %% done)." % (iList, nList, 100 * (float(iList) / nList))) return p1, # call the animator. blit=True means only re-draw the parts that have changed. # anim = animation.FuncAnimation(fig, animate, init_func=init, frames=elist.GetN(), interval=0, blit=True) anim = animation.FuncAnimation(fig, animate, init_func=init, frames=nWFLimit, interval=0, blit=True) # save the animation as an mp4. This requires ffmpeg or mencoder to be # installed. The extra_args ensure that the x264 codec is used, so that # the video can be embedded in html5. You may need to adjust this for # your system: for more information, see # http://matplotlib.sourceforge.net/api/animation_api.html anim.save(outFile, fps=20) #, extra_args=['-vcodec', 'libx264'])