def raw_to_dsp(ds, overwrite=False, nevt=None, test=False, verbose=2, block=8, group=''): """ Run raw_to_dsp on a set of runs. [raw file] ---> [dsp_run{}.lh5] (digital signal processing results) """ for run in ds.runs: raw_file = "/lfs/l1/legend/users/dandrea/pygama/pgt/tier1/pgt_longtrace_run0117-20200110-105115-calib_raw.lh5" dsp_file = "/lfs/l1/legend/users/dandrea/pygama/pgt/tier2/pgt_longtrace_run0117-20200110-105115-calib_dsp.lh5" #raw_file = ds.paths[run]["raw_path"] #dsp_file = ds.paths[run]["dsp_path"] print("raw_file: ",raw_file) print("dsp_file: ",dsp_file) if dsp_file is not None and overwrite is False: continue if dsp_file is None: # declare new file name dsp_file = raw_file.replace('raw_', 'dsp_') if test: print("test mode (dry run), processing raw file:", raw_file) continue print("Definition of new LH5 version") #f_lh5 = lh5.Store() #data = f_lh5.read_object("raw", raw_file) #wf_in = data['waveform']['values'].nda #dt = data['waveform']['dt'].nda[0] * unit_parser.parse_unit(data['waveform']['dt'].attrs['units']) lh5_in = lh5.Store() #groups = lh5_in.ls(raw_file, group) f = h5py.File(raw_file,'r') print("File info: ",f.keys()) for group in f.keys(): print("Processing: " + raw_file + '/' + group) #data = lh5_in.read_object(group, raw_file) data = f[group]['raw'] #wf_in = data['waveform']['values'].nda #dt = data['waveform']['dt'].nda[0] * unit_parser.parse_unit(data['waveform']['dt'].attrs['units']) wf_in = data['waveform']['values'][()] dt = data['waveform']['dt'][0] * unit_parser.parse_unit(data['waveform']['dt'].attrs['units']) # Parameters for DCR calculation dcr_trap_int = 200 dcr_trap_flat = 1000 dcr_trap_startSample = 1200 # Set up processing chain proc = ProcessingChain(block_width=block, clock_unit=dt, verbosity=verbose) proc.add_input_buffer("wf", wf_in, dtype='float32') # Basic Filters proc.add_processor(mean_stdev, "wf[0:1000]", "bl", "bl_sig") proc.add_processor(np.subtract, "wf", "bl", "wf_blsub") proc.add_processor(pole_zero, "wf_blsub", 145*us, "wf_pz") proc.add_processor(trap_norm, "wf_pz", 10*us, 5*us, "wf_trap") proc.add_processor(asymTrapFilter, "wf_pz", 0.05*us, 2*us, 4*us, "wf_atrap") # Timepoint calculation proc.add_processor(np.argmax, "wf_blsub", 1, "t_max", signature='(n),()->()', types=['fi->i']) proc.add_processor(time_point_frac, "wf_blsub", 0.95, "t_max", "tp_95") proc.add_processor(time_point_frac, "wf_blsub", 0.8, "t_max", "tp_80") proc.add_processor(time_point_frac, "wf_blsub", 0.5, "t_max", "tp_50") proc.add_processor(time_point_frac, "wf_blsub", 0.2, "t_max", "tp_20") proc.add_processor(time_point_frac, "wf_blsub", 0.05, "t_max", "tp_05") proc.add_processor(time_point_thresh, "wf_atrap[0:2000]", 0, "tp_0") # Energy calculation proc.add_processor(np.amax, "wf_trap", 1, "trapEmax", signature='(n),()->()', types=['fi->f']) proc.add_processor(fixed_time_pickoff, "wf_trap", "tp_0+(5*us+9*us)", "trapEftp") proc.add_processor(trap_pickoff, "wf_pz", 1.5*us, 0, "tp_0", "ct_corr") # Current calculation proc.add_processor(avg_current, "wf_pz", 10, "curr(len(wf_pz)-10, f)") proc.add_processor(np.amax, "curr", 1, "curr_amp", signature='(n),()->()', types=['fi->f']) proc.add_processor(np.divide, "curr_amp", "trapEftp", "aoe") # DCR calculation: use slope using 1000 samples apart and averaging 200 # samples, with the start 1.5 us offset from t0 proc.add_processor(trap_pickoff, "wf_pz", 200, 1000, "tp_0+1.5*us", "dcr_unnorm") proc.add_processor(np.divide, "dcr_unnorm", "trapEftp", "dcr") # Tail slope. Basically the same as DCR, except with no PZ correction proc.add_processor(linear_fit, "wf_blsub[3000:]", "wf_b", "wf_m") proc.add_processor(np.divide, "-wf_b", "wf_m", "tail_rc") #add zac filter energy calculation sigma = 10*us flat = 1*us decay = 160*us proc.add_processor(zac_filter, "wf", sigma, flat, decay, "wf_zac(101, f)") proc.add_processor(np.amax, "wf_zac", 1, "zacE", signature='(n),()->()', types=['fi->f']) # Set up the LH5 output lh5_out = lh5.Table(size=proc._buffer_len) lh5_out.add_field("zacE", lh5.Array(proc.get_output_buffer("zacE"), attrs={"units":"ADC"})) lh5_out.add_field("trapEmax", lh5.Array(proc.get_output_buffer("trapEmax"), attrs={"units":"ADC"})) lh5_out.add_field("trapEftp", lh5.Array(proc.get_output_buffer("trapEftp"), attrs={"units":"ADC"})) lh5_out.add_field("ct_corr", lh5.Array(proc.get_output_buffer("ct_corr"), attrs={"units":"ADC*ns"})) lh5_out.add_field("bl", lh5.Array(proc.get_output_buffer("bl"), attrs={"units":"ADC"})) lh5_out.add_field("bl_sig", lh5.Array(proc.get_output_buffer("bl_sig"), attrs={"units":"ADC"})) lh5_out.add_field("A", lh5.Array(proc.get_output_buffer("curr_amp"), attrs={"units":"ADC"})) lh5_out.add_field("AoE", lh5.Array(proc.get_output_buffer("aoe"), attrs={"units":"ADC"})) lh5_out.add_field("dcr", lh5.Array(proc.get_output_buffer("dcr"), attrs={"units":"ADC"})) lh5_out.add_field("tp_max", lh5.Array(proc.get_output_buffer("tp_95", unit=us), attrs={"units":"us"})) lh5_out.add_field("tp_95", lh5.Array(proc.get_output_buffer("tp_95", unit=us), attrs={"units":"us"})) lh5_out.add_field("tp_80", lh5.Array(proc.get_output_buffer("tp_80", unit=us), attrs={"units":"us"})) lh5_out.add_field("tp_50", lh5.Array(proc.get_output_buffer("tp_50", unit=us), attrs={"units":"us"})) lh5_out.add_field("tp_20", lh5.Array(proc.get_output_buffer("tp_20", unit=us), attrs={"units":"us"})) lh5_out.add_field("tp_05", lh5.Array(proc.get_output_buffer("tp_05", unit=us), attrs={"units":"us"})) lh5_out.add_field("tp_0", lh5.Array(proc.get_output_buffer("tp_0", unit=us), attrs={"units":"us"})) lh5_out.add_field("tail_rc", lh5.Array(proc.get_output_buffer("tail_rc", unit=us), attrs={"units":"us"})) print("Processing:\n",proc) proc.execute() #groupname = group[:group.rfind('/')+1]+"data" groupname = group+"/data" print("Writing to: " + dsp_file + "/" + groupname) lh5_in.write_object(lh5_out, groupname, dsp_file)
lh5_out.add_field( "tp_80", lh5.Array(proc.get_output_buffer("tp_80", unit=us), attrs={"units": "us"})) lh5_out.add_field( "tp_50", lh5.Array(proc.get_output_buffer("tp_50", unit=us), attrs={"units": "us"})) lh5_out.add_field( "tp_20", lh5.Array(proc.get_output_buffer("tp_20", unit=us), attrs={"units": "us"})) lh5_out.add_field( "tp_05", lh5.Array(proc.get_output_buffer("tp_05", unit=us), attrs={"units": "us"})) lh5_out.add_field( "tp_0", lh5.Array(proc.get_output_buffer("tp_0", unit=us), attrs={"units": "us"})) lh5_out.add_field( "tail_rc", lh5.Array(proc.get_output_buffer("tail_rc", unit=us), attrs={"units": "us"})) proc.execute() groupname = group[:group.rfind('/') + 1] + "dsp" print("Writing to: " + out + "/" + groupname) lh5_in.write_object(lh5_out, groupname, out)
def raw_to_dsp(ds, overwrite=False, nevt=None, test=False, verbose=2, block=8, group='daqdata'): """ Run raw_to_dsp on a set of runs. [raw file] ---> [dsp_run{}.lh5] (digital signal processing results) """ for run in ds.runs: raw_file = ds.paths[run]["raw_path"] dsp_file = ds.paths[run]["dsp_path"] if dsp_file is not None and overwrite is False: continue if dsp_file is None: # declare new file name dsp_file = raw_file.replace('raw', 'dsp') if test: print("test mode (dry run), processing raw file:", raw_file) continue # new LH5 version lh5_in = lh5.Store() data = lh5_in.read_object("/ORSIS3302DecoderForEnergy", raw_file) wf_in = data['waveform']['values'].nda dt = data['waveform']['dt'].nda[0] * unit_parser.parse_unit( data['waveform']['dt'].attrs['units']) # Parameters for DCR calculation dcr_trap_int = 200 dcr_trap_flat = 1000 dcr_trap_startSample = 1200 # Set up processing chain proc = ProcessingChain(block_width=block, clock_unit=dt, verbosity=verbose) proc.add_input_buffer("wf", wf_in, dtype='float32') proc.add_processor(mean_stdev, "wf[0:1000]", "bl", "bl_sig") proc.add_processor(np.subtract, "wf", "bl", "wf_blsub") proc.add_processor(pole_zero, "wf_blsub", 70 * us, "wf_pz") proc.add_processor(asymTrapFilter, "wf_pz", 10 * us, 5 * us, 10 * us, "wf_atrap") proc.add_processor(np.amax, "wf_atrap", 1, "atrapE", signature='(n),()->()', types=['fi->f']) # proc.add_processor(np.divide, "atrapmax", 10*us, "atrapE") proc.add_processor(trap_norm, "wf_pz", 10 * us, 5 * us, "wf_trap") proc.add_processor(np.amax, "wf_trap", 1, "trapE", signature='(n),()->()', types=['fi->f']) proc.add_processor(avg_current, "wf_pz", 10, "curr") proc.add_processor(np.amax, "curr", 1, "A_10", signature='(n),()->()', types=['fi->f']) proc.add_processor(np.divide, "A_10", "trapE", "AoE") proc.add_processor(trap_pickoff, "wf_pz", dcr_trap_int, dcr_trap_flat, dcr_trap_startSample, "dcr") # Set up the LH5 output lh5_out = lh5.Table(size=proc._buffer_len) lh5_out.add_field( "trapE", lh5.Array(proc.get_output_buffer("trapE"), attrs={"units": "ADC"})) lh5_out.add_field( "bl", lh5.Array(proc.get_output_buffer("bl"), attrs={"units": "ADC"})) lh5_out.add_field( "bl_sig", lh5.Array(proc.get_output_buffer("bl_sig"), attrs={"units": "ADC"})) lh5_out.add_field( "A", lh5.Array(proc.get_output_buffer("A_10"), attrs={"units": "ADC"})) lh5_out.add_field( "AoE", lh5.Array(proc.get_output_buffer("AoE"), attrs={"units": "ADC"})) lh5_out.add_field( "dcr", lh5.Array(proc.get_output_buffer("dcr"), attrs={"units": "ADC"})) print("Processing:\n", proc) proc.execute() print("Writing to: ", dsp_file) f_lh5.write_object(lh5_out, "data", dsp_file)
def process_ds(f_grid, f_opt, f_tier1, d_out, efilter): """ process the windowed raw file 'f_tier1' and create the DSP file 'f_opt' """ print("Grid file:",f_grid) df_grid = pd.read_hdf(f_grid) if os.path.exists(f_opt): os.remove(f_opt) if 'corr' in efilter: bfilter = efilter.split('corr')[0] try: df_res = pd.read_hdf(f'{d_out}/{bfilter}_results.h5',key='results') print("Extraction of best parameters for", bfilter) except: print(bfilter,"not optimized") return # open raw file lh5_in = lh5.Store() #groups = lh5_in.ls(f_tier1, '*/raw') f = h5py.File(f_tier1,'r') #print("File info: ",f.keys()) t_start = time.time() #for group in groups: for idx, ged in enumerate(f.keys()): if idx == 4: diff = time.time() - t_start tot = diff/5 * len(df_grid) / 60 tot -= diff / 60 print(f"Estimated remaining time: {tot:.2f} mins") print("Detector:",ged) #data = lh5_in.read_object(group, f_tier1) data = f[ged]['raw'] #wf_in = data['waveform']['values'].nda #dt = data['waveform']['dt'].nda[0] * unit_parser.parse_unit(data['waveform']['dt'].attrs['units']) wf_in = data['waveform']['values'][()] dt = data['waveform']['dt'][0] * unit_parser.parse_unit(data['waveform']['dt'].attrs['units']) bl_in = data['baseline'][()] #flashcam baseline values # Set up DSP processing chain -- very minimal block = 8 #waveforms to process simultaneously proc = ProcessingChain(block_width=block, clock_unit=dt, verbosity=False) proc.add_input_buffer("wf", wf_in, dtype='float32') proc.add_input_buffer("bl", bl_in, dtype='float32') wsize = wf_in.shape[1] dt0 = data['waveform']['dt'][0]*0.001 #proc.add_processor(mean_stdev, "wf[0:1000]", "bl", "bl_sig") proc.add_processor(np.subtract, "wf", "bl", "wf_blsub") for i, row in df_grid.iterrows(): if 'corr' in efilter: ct_const = row if 'trapE' in efilter: if 'corr' in efilter: rise, flat, rc = float(df_res['rise'][idx]), float(df_res['flat'][idx]), float(df_res['rc'][idx]) else: rise, flat, rc = row proc.add_processor(pole_zero, "wf_blsub", rc*us, "wf_pz") proc.add_processor(trap_norm, "wf_pz", rise*us, flat*us, f"wf_trap_{i}") proc.add_processor(asymTrapFilter, "wf_pz", 0.05*us, 4*us, 4*us, "wf_atrap") proc.add_processor(time_point_thresh, "wf_pz", 0, "tp_0") proc.add_processor(np.amax, f"wf_trap_{i}", 1, f"trapE_{i}", signature='(n),()->()', types=['fi->f']) proc.add_processor(fixed_time_pickoff, f"wf_trap_{i}", f"tp_0+({rise*us}+{flat*us})", f"trapEftp_{i}") if 'zacE' in efilter: if 'corr' in efilter: sigma, flat, decay = float(df_res['sigma'][idx]), float(df_res['flat'][idx]), float(df_res['decay'][idx]) else: sigma, flat, decay = row proc.add_processor(zac_filter(wsize, sigma/dt0, flat/dt0, decay/dt0),"wf", f"wf_zac_{i}(101, f)") proc.add_processor(np.amax, f"wf_zac_{i}", 1, f"zacE_{i}", signature='(n),()->()', types=['fi->f']) if 'cuspE' in efilter: if 'corr' in efilter: sigma, flat, decay = float(df_res['sigma'][idx]), float(df_res['flat'][idx]), float(df_res['decay'][idx]) else: sigma, flat, decay = row proc.add_processor(cusp_filter(wsize, sigma/dt0, flat/dt0, decay/dt0),"wf_blsub", f"wf_cusp_{i}(101, f)") proc.add_processor(np.amax, f"wf_cusp_{i}", 1, f"cuspE_{i}", signature='(n),()->()', types=['fi->f']) if 'corr' in efilter: proc.add_processor(trap_pickoff, "wf_pz", 1.5*us, 0, "tp_0", "ct_corr") #proc.add_processor(trap_pickoff, "wf_pz", rise*us, flat*us, "tp_0", "ct_corr") proc.add_processor(np.multiply, ct_const, "ct_corr", f"ct_corr_cal_{i}") proc.add_processor(np.add, f"ct_corr_cal_{i}", f"{bfilter}_{i}", f"{efilter}_{i}") # Set up the LH5 output lh5_out = lh5.Table(size=proc._buffer_len) for i, row in df_grid.iterrows(): lh5_out.add_field(f"{efilter}_{i}", lh5.Array(proc.get_output_buffer(f"{efilter}_{i}"), attrs={"units":"ADC"})) print("Processing:\n",proc) proc.execute() #groupname = group[:group.rfind('/')+1]+"data" #groupname = df_key+"/"+group+"/data" groupname = ged+"/data" print("Writing to: " + f_opt + "/" + groupname) lh5_in.write_object(lh5_out, groupname, f_opt) print("") #list the datasets of the output file data_opt = lh5_in.ls(f_opt) #data_opt_0 = lh5_in.ls(f_opt,'opt_0/*') data_opt_0 = lh5_in.ls(f_opt,'g024/data/*') diff = time.time() - t_start print(f"Time to process: {diff:.2f} s")
# RC constant. Linear fit of log of falling tail. proc.add_processor(np.log, "wf_blsub", "tail_log") proc.add_processor(linear_fit, "tail_log", "tail_b", "tail_m") proc.add_processor(np.divide, -1, "tail_m", "tail_rc") # Get tail_rc output buffer tail_rc = proc.get_output_buffer("tail_rc", unit=us) # Process and create histogram rc_hist, bins = np.histogram([], n_bins, rc_range) for start_row in range(0, tot_n_rows, args.chunk): if args.verbose > 0: update_progress(start_row / tot_n_rows) lh5_in, n_rows = lh5_st.read_object(group, args.file, start_row=start_row, obj_buf=lh5_in) proc.execute(0, n_rows) rc_hist += np.histogram(tail_rc, n_bins, rc_range)[0] if args.verbose > 0: update_progress(1) # Get mode of hist and record it rc_const = bins[np.argmax(rc_hist)] if args.verbose > 0: print("Optimal pole-zero constant is", rc_const, "us") rc_const_lib[chan_name] = {'pz_const': "{:.1f}*us".format(rc_const)} with open(args.output, 'w') as f: json.dump(rc_const_lib, f, indent=2, sort_keys=True)