def raw_to_dsp(self, overwrite=False, test=False, proc_list=None, out_dir=None, verbose=False, multiproc=True): """ output a file with dsp parameters """ from pygama.dsp.base import Intercom from pygama.io.raw import Processraw for run in self.runs: raw_file = self.paths[run]["raw_path"] dsp_file = self.paths[run]["dsp_path"] if dsp_file is not None and overwrite is False: continue if test: print("test mode (dry run), processing Tier 1 file:", raw_file) continue conf = self.paths[run]["build_opt"] if proc_list is None: proc_list = self.config['build_options'][conf]['raw_options'] proc = Intercom(proc_list) out_dir = self.dsp_dir if out_dir is None else out_dir Processraw(raw_file, proc, output_dir=out_dir, overwrite=overwrite, verbose=verbose, multiprocess=False, nevt=np.inf, ioff=0, chunk=self.config["chunksize"])
def process_ds(rise_times): """ and determine the trapezoid parameters that minimize the FWHM of the peak (fitting to the peakshape function). """ from pygama.dsp.base import Intercom from pygama.io.tier1 import ProcessTier1 import pygama.io.decoders.digitizers as pgd ds_num = 3 ds = DataSet(ds_num, md="runDB.json") first_run = ds.runs[0] # specify temporary I/O locations out_dir = os.path.expanduser('~') + "/Data/cage" t1_file = f"{out_dir}/cage_ds3_t1.h5" t2_file = f"{out_dir}/cage_ds3_t2.h5" opt_file = f"{out_dir}/cage_ds3_optimize.h5" if os.path.exists(opt_file): os.remove(opt_file) # check the windowed file tmp = pd.read_hdf(t1_file) nevt = len(tmp) rc_decay = 72 for i, rt in enumerate(rise_times): # custom tier 1 processor list -- very minimal proc_list = { "clk" : 100e6, "fit_bl" : {"ihi":500, "order":1}, "blsub" : {}, "trap" : [ {"wfout":"wf_etrap", "wfin":"wf_blsub", "rise":rt, "flat":2.5, "decay":rc_decay}, {"wfout":"wf_atrap", "wfin":"wf_blsub", "rise":0.04, "flat":0.1, "fall":2} ], "get_max" : [{"wfin":"wf_etrap"}, {"wfin":"wf_atrap"}], # "ftp" : {"test":1} "ftp" : {} } proc = Intercom(proc_list) dig = pgd.SIS3302Decoder dig.decoder_name = "df_windowed" dig.class_name = None ProcessTier1(t1_file, proc, output_dir=out_dir, overwrite=True, verbose=False, multiprocess=True, nevt=np.inf, ioff=0, chunk=ds.config["chunksize"], run=first_run, t2_file=t2_file, digitizers=[dig]) # load the temporary file and append to the main output file df_key = f"opt_{i}" t2df = pd.read_hdf(t2_file) t2df.to_hdf(opt_file, df_key)
def tier1(ds, sub, overwrite=False, nevt=None, ioff=None, multiproc=None, verbose=False, test=False): """ Run ProcessTier1 on a set of runs. [t1_run{}.h5] ---> [t2_run{}.h5] (tier 2 file: DSP results, no waveforms) Can declare the processor list via: - json configuration file (recommended) - Intercom(default_list=True) - manually add with Intercom::add """ from pygama.dsp.base import Intercom from pygama.io.tier1 import ProcessTier1 for run in ds.runs: t1_file = ds.paths[run]["t1_path"] t2_file = ds.paths[run]["t2_path"] if t2_file is not None and overwrite is False: continue print("In/Out", t1_file, t2_file) if test: print("test mode (dry run), processing Tier 1 file:", t1_file) continue conf = ds.paths[run]["build_opt"] proc_list = ds.config["build_options"][conf]["tier1_options"] proc = Intercom(proc_list) ProcessTier1(t1_file, proc, ftype=ds.ftype, output_dir=ds.tier2_dir, overwrite=overwrite, verbose=verbose, multiprocess=multiproc, nevt=nevt, ioff=ioff, chunk=ds.config["chunksize"])
def process_ds(ds, f_grid, f_opt, f_tier1, f_tier2): """ and determine the trapezoid parameters that minimize the FWHM of the peak (fitting to the peakshape function). NOTE: I don't think we need to multiprocess this, since that's already being done in ProcessTier1 """ from pygama.dsp.base import Intercom from pygama.io.tier1 import ProcessTier1 import pygama.io.decoders.digitizers as pgd df_grid = pd.read_hdf(f_grid) if os.path.exists(f_opt): os.remove(f_opt) # check the windowed file # tmp = pd.read_hdf(f_tier1) # nevt = len(tmp) t_start = time.time() for i, row in df_grid.iterrows(): # estimate remaining time in scan if i == 4: diff = time.time() - t_start tot = diff / 5 * len(df_grid) / 60 tot -= diff / 60 print(f"Estimated remaining time: {tot:.2f} mins") rise, flat, rc = row print(f"Row {i}/{len(df_grid)}, rise {rise} flat {flat} rc {rc}") # custom tier 1 processor list -- very minimal proc_list = { "clk": 100e6, "fit_bl": { "ihi": 500, "order": 1 }, "blsub": {}, "trap": [ { "wfout": "wf_etrap", "wfin": "wf_blsub", "rise": rise, "flat": flat, "decay": rc }, { "wfout": "wf_atrap", "wfin": "wf_blsub", "rise": 0.04, "flat": 0.1, "fall": 2 } # could vary these too ], "get_max": [{ "wfin": "wf_etrap" }, { "wfin": "wf_atrap" }], # "ftp" : {"test":1} "ftp": {} } proc = Intercom(proc_list) dig = pgd.SIS3302Decoder dig.decoder_name = "df_windowed" dig.class_name = None out_dir = "/".join(f_tier2.split("/")[:-1]) # process silently ProcessTier1(f_tier1, proc, output_dir=out_dir, overwrite=True, verbose=False, multiprocess=True, nevt=np.inf, ioff=0, chunk=ds.config["chunksize"], run=ds.runs[0], t2_file=f_tier2, digitizers=[dig]) # load the temporary file and append to the main output file df_key = f"opt_{i}" t2df = pd.read_hdf(f_tier2) t2df.to_hdf(f_opt, df_key)