コード例 #1
0
    def __init__(self,
                 files_in,
                 lh5_group,
                 dsp_config=None,
                 database=None,
                 n_drawn=1,
                 x_unit='ns',
                 x_lim=None,
                 waveforms='waveform',
                 wf_styles=None,
                 lines=None,
                 legend=None,
                 legend_opts=None,
                 norm=None,
                 align=None,
                 selection=None,
                 buffer_len=128,
                 block_width=8,
                 verbosity=1):
        """Constructor for WaveformBrowser:
        - file_in: name of file or list of names to browse. Can use wildcards
        - lh5_group: name of LH5 group in file to browse
        - dsp_config (optional): name of DSP config json file containing transforms available to draw
        - database (optional): dict with database of processing parameters
        - n_drawn (default 1): number of events to draw simultaneously when calling DrawNext
        - x_unit (default ns): unit for x-axis
        - x_lim (default auto): range of x-values passes as tuple
        - waveforms (default 'waveform'): name of wf or list of wf names to draw
        - wf_styles (default None): waveform colors and other style parameters to cycle through when drawing waveforms. Can be given as:
            dict of lists: e.g. {'color':['r', 'g', 'b'], 'linestyle':['-', '--', '.']}
            name of predefined style; see matplotlib.style documentation
            None: use current matplotlib style
          If a single style cycle is given, use for all lines; if a list is given, match to waveforms list.
        - lines (default None): name of parameter or list of parameters to draw hlines and vlines for
        - legend (default None): formatting string and values to include in the legend. This can be a list of values (one for each waveform in waveforms). The values can be given as a tuple whose first entry is a formatting string and subsequent entries are the values to place in the formatting string. When building a formatting string, if a name is given in the {}s, it is assumed to be a parameter from the DSP config file. An example is:
          ("{:0.1f} keV", energy)
        - legend_opts (default None): dict containing kwargs for formatting the legend
        - norm (default None): name of parameter (probably energy) to use to normalize WFs; useful when drawing multiple
        - align (default None): name of time parameter to set as 0 time; useful for aligning multiple waveforms
        - selection (optional): selection of events to draw. Can be either a list of event indices or a numpy array mask (ala pandas).
        - buffer_len (default 128): number of waveforms to keep in memory at a time
        - block_width (default 8): block width for processing chain
        """
        self.verbosity = verbosity

        # data i/o initialization
        self.lh5_st = lh5.Store(keep_open=True)
        if isinstance(files_in, str): files_in = [files_in]

        # Expand wildcards and map out the files
        self.lh5_files = [
            f for f_wc in files_in
            for f in sorted(glob.glob(os.path.expandvars(f_wc)))
        ]
        self.lh5_group = lh5_group
        # file map is cumulative lenght of files up to file n. By doing searchsorted left, we can get the file for a given wf index
        self.file_map = np.array(
            [self.lh5_st.read_n_rows(lh5_group, f) for f in self.lh5_files],
            'int64')
        np.cumsum(self.file_map, out=self.file_map)

        # Get the input buffer and read the first chunk
        self.lh5_in = self.lh5_st.get_buffer(self.lh5_group, self.lh5_files[0],
                                             buffer_len)
        self.lh5_st.read_object(self.lh5_group,
                                self.lh5_files[0],
                                start_row=0,
                                n_rows=buffer_len,
                                obj_buf=self.lh5_in)
        self.buffer_len = buffer_len
        self.current_file = None
        self.current_chunk = None

        # initialize stuff for iteration
        self.selection = selection
        self.index_it = None
        self.reset()
        self.n_drawn = n_drawn

        # initialize list of objects to draw
        if isinstance(waveforms, str): self.wf_names = [waveforms]
        elif waveforms is None: self.wf_names = []
        else: self.wf_names = list(waveforms)
        self.wf_data = [[] for _ in self.wf_names]

        # wf_styles
        if isinstance(wf_styles, list) or isinstance(wf_styles, tuple):
            self.wf_styles = [None for _ in self.wf_data]
            for i, sty in enumerate(wf_styles):
                if isinstance(sty, str):
                    try:
                        self.wf_styles[i] = plt.style.library[sty][
                            'axes.prop_cycle']
                    except:
                        self.wf_styles[i] = itertools.repeat(None)
                elif sty is None:
                    self.wf_styles[i] = itertools.repeat(None)
                else:
                    self.wf_styles[i] = cycler(**sty)
        else:
            if isinstance(wf_styles, str):
                try:
                    self.wf_styles = plt.style.library[wf_styles][
                        'axes.prop_cycle']
                except:
                    self.wf_styles = itertools.repeat(None)
            elif wf_styles is None:
                self.wf_styles = itertools.repeat(None)
            else:
                self.wf_styles = cycler(**wf_styles)

        if lines is None: self.line_names = []
        elif isinstance(lines, list): self.line_names = lines
        elif isinstance(lines, tuple): self.line_names = list(lines)
        else: self.line_names = [lines]
        self.line_data = [[] for _ in self.line_names]

        if legend is None: legend = []
        elif not isinstance(legend, list): legend = [legend]

        # Set up the legend format strings and collect input values
        self.legend_input = []
        self.legend_format = []
        for entry in legend:
            legend_input = []
            legend_format = ''
            if not isinstance(entry, tuple):
                entry = (entry, )

            for val in entry:
                if isinstance(val, str):
                    for st, name, form, cv in string.Formatter().parse(val):
                        legend_format += st
                        if name is not None:
                            legend_format += '{'
                            legend_input.append(name)
                            if form is not None and form != '':
                                legend_format += ':' + form
                            if cv is not None and cv != '':
                                legend_format += '!' + cv
                            legend_format += '}'
                else:
                    # find any {}s to fill from the formatter
                    idxs = [
                        i for i, inp in enumerate(legend_input)
                        if isinstance(inp, str) and inp == ''
                    ]
                    if idxs:  # if we found a {}. it's already in the formatter
                        legend_input[idxs[0]] = val
                    else:  # otherwise add to formatter
                        legend_input.append(val)
                        if legend_format != '': legend_format += ', '
                        if isinstance(val, pd.Series):
                            legend_format += val.name + ' = {:.3g}'
                        elif isinstance(val, np.ndarray):
                            legend_format += '{:.3g}'
            self.legend_input.append(legend_input)
            self.legend_format.append(legend_format)

        self.legend_data = [[] for _ in self.legend_input]
        self.legend_kwargs = legend_opts if legend_opts else {}

        self.norm_par = norm
        self.align_par = align

        self.x_unit = units.unit_parser.parse_unit(x_unit)
        self.x_lim = x_lim

        # make processing chain and output buffer
        outputs = self.wf_names + \
                  [name for name in self.line_names if isinstance(name, str)] + \
                  [name for name in self.legend_input  if isinstance(name, str)]
        if isinstance(self.norm_par, str): outputs += [self.norm_par]
        if isinstance(self.align_par, str): outputs += [self.align_par]

        self.proc_chain, self.lh5_out = build_processing_chain(
            self.lh5_in,
            dsp_config,
            db_dict=database,
            outputs=outputs,
            verbosity=self.verbosity,
            block_width=block_width)

        self.fig = None
        self.ax = None
コード例 #2
0
    def run_dsp(dfrow):
        """
        run dsp on the test file, editing the processor list
        alternate idea: generate a long list of processors with different names
        """
        # adjust dsp config dictionary
        rise, flat = dfrow
        # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = f'{tau}*us'
        dsp_config['processors']['wf_trap']['args'][1] = f'{rise}*us'
        dsp_config['processors']['wf_trap']['args'][2] = f'{flat}*us'
        # pprint(dsp_config)

        # run dsp
        pc, tb_out = build_processing_chain(tb_data, dsp_config, verbosity=0)
        pc.execute()

        # analyze peak
        e_peak = 1460.
        etype = 'trapEmax'
        elo, ehi, epb = 4000, 4500, 3  # the peak moves around a bunch
        energy = tb_out[etype].nda

        # get histogram
        hE, bins, vE = pgh.get_hist(energy, range=(elo, ehi), dx=epb)
        xE = bins[1:]

        # should I center the max at 1460?

        # simple numerical width
        i_max = np.argmax(hE)
        h_max = hE[i_max]
        upr_half = xE[(xE > xE[i_max]) & (hE <= h_max / 2)][0]
        bot_half = xE[(xE < xE[i_max]) & (hE >= h_max / 2)][0]
        fwhm = upr_half - bot_half
        sig = fwhm / 2.355

        # fit to gaussian: amp, mu, sig, bkg
        fit_func = pgf.gauss_bkg
        amp = h_max * fwhm
        bg0 = np.mean(hE[:20])
        x0 = [amp, xE[i_max], sig, bg0]
        xF, xF_cov = pgf.fit_hist(fit_func, hE, bins, var=vE, guess=x0)

        # collect results
        e_fit = xF[0]
        xF_err = np.sqrt(np.diag(xF_cov))
        e_err = xF
        fwhm_fit = xF[1] * 2.355 * 1460. / e_fit

        fwhm_err = xF_err[2] * 2.355 * 1460. / e_fit

        chisq = []
        for i, h in enumerate(hE):
            model = fit_func(xE[i], *xF)
            diff = (model - h)**2 / model
            chisq.append(abs(diff))
        rchisq = sum(np.array(chisq) / len(hE))
        fwhm_ovr_mean = fwhm_fit / e_fit

        if show_movie:

            plt.plot(xE,
                     hE,
                     ds='steps',
                     c='b',
                     lw=2,
                     label=f'{etype} {rise}--{flat}')

            # peak shape
            plt.plot(xE,
                     fit_func(xE, *x0),
                     '-',
                     c='orange',
                     alpha=0.5,
                     label='init. guess')
            plt.plot(xE,
                     fit_func(xE, *xF),
                     '-r',
                     alpha=0.8,
                     label='peakshape fit')
            plt.plot(np.nan,
                     np.nan,
                     '-w',
                     label=f'mu={e_fit:.1f}, fwhm={fwhm_fit:.2f}')

            plt.xlabel(etype, ha='right', x=1)
            plt.ylabel('Counts', ha='right', y=1)
            plt.legend(loc=2)

            # show a little movie
            plt.show(block=False)
            plt.pause(0.01)
            plt.cla()

        # return results
        return pd.Series({
            'e_fit': e_fit,
            'fwhm_fit': fwhm_fit,
            'rchisq': rchisq,
            'fwhm_err': xF_err[0],
            'fwhm_ovr_mean': fwhm_ovr_mean
        })
コード例 #3
0
def optimize_dcr(dg):
    """
    I don't have an a priori figure of merit for the DCR parameter, until I can
    verify that we're seeing alphas.  So this function should just run processing
    on a CAGE run with known alpha events, and show you the 2d DCR vs. energy.
    
    Once we know we can reliably measure the alpha distribution somehow, then
    perhaps we can try a grid search optimization like the one done in 
    optimize_trap.
    """
    # files to consider.  fixme: right now only works with one file
    sto = lh5.Store()
    lh5_dir = os.path.expandvars(dg.config['lh5_dir'])
    raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']
    f_raw = raw_list.values[0]

    tb_raw = 'ORSIS3302DecoderForEnergy/raw/'
    tb_data = sto.read_object(tb_raw, f_raw)

    cycle = dg.fileDB['cycle'].values[0]
    f_results = f'./temp_{cycle}.h5'

    write_output = True

    # adjust dsp config
    with open('opt_dcr.json') as f:
        dsp_config = json.load(f, object_pairs_hook=OrderedDict)
    # pprint(dsp_config)
    # exit()

    # set dcr parameters
    # rise, flat, dcr_tstart = 200, 1000, 'tp_0+1.5*us' # default
    # dcr_rise, dcr_flat, dcr_tstart = 100, 3000, 'tp_0+3*us' # best so far?
    dcr_rise, dcr_flat, dcr_tstart = 100, 2500, 'tp_0+1*us'
    dsp_config['processors']['dcr_raw']['args'][1] = dcr_rise
    dsp_config['processors']['dcr_raw']['args'][2] = dcr_flat
    dsp_config['processors']['dcr_raw']['args'][3] = dcr_tstart

    # set trap energy parameters
    # ene_rise, ene_flat = "2*us", "1*us" # best? from optimize_trap
    ene_rise, ene_flat = "10*us", "5*us"
    dsp_config['processors']['wf_trap']['args'][1] = ene_rise
    dsp_config['processors']['wf_trap']['args'][2] = ene_flat

    # adjust pole-zero constant
    dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '64.4*us'
    # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '50*us'
    # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '100*us'

    # run dsp
    print('Running DSP ...')
    t_start = time.time()
    pc, tb_out = build_processing_chain(tb_data, dsp_config, verbosity=1)
    pc.execute()
    t_elap = (time.time() - t_start) / 60
    print(f'Done.  Elapsed: {t_elap:.2f} min')

    df_out = tb_out.get_dataframe()

    if write_output:
        df_out.to_hdf(f_results, key='opt_dcr')
        print('Wrote output file:', f_results)
コード例 #4
0
    def __init__(self,
                 files_in,
                 lh5_group,
                 dsp_config=None,
                 n_drawn=1,
                 x_unit='ns',
                 x_lim=None,
                 waveforms='waveform',
                 lines=None,
                 legend=None,
                 norm=None,
                 align=None,
                 selection=None,
                 buffer_len=128,
                 block_width=8,
                 verbosity=1):
        """Constructor for WaveformBrowser:
        - file_in: name of file or list of names to browse. Can use wildcards
        - lh5_group: name of LH5 group in file to browse
        - dsp_config (optional): name of DSP config json file containing transforms available to draw
        - n_drawn (default 1): number of events to draw simultaneously when calling DrawNext
        - x_unit (default ns): unit for x-axis
        - x_lim (default auto): range of x-values passes as tuple
        - waveforms (default 'waveform'): name of wf or list of wf names to draw
        - lines (default None): name of parameter or list of parameters to draw hlines and vlines for
        - legend (default None): name of parameters to include in legend
        - norm (default None): name of parameter (probably energy) to use to normalize WFs; useful when drawing multiple
        - align (default None): name of time parameter to set as 0 time; useful for aligning multiple waveforms
        - selection (optional): selection of events to draw. Can be either a list of event indices or a numpy array mask (ala pandas).
        - buffer_len (default 128): number of waveforms to keep in memory at a time
        - block_width (default 8): block width for processing chain
        """
        self.verbosity = verbosity

        # data i/o initialization
        self.lh5_st = lh5.Store(keep_open=True)
        if isinstance(files_in, str): files_in = [files_in]

        # Expand wildcards and map out the files
        self.lh5_files = [
            f for f_wc in files_in
            for f in sorted(glob.glob(os.path.expandvars(f_wc)))
        ]
        self.lh5_group = lh5_group
        # file map is cumulative lenght of files up to file n. By doing searchsorted left, we can get the file for a given wf index
        self.file_map = np.array(
            [self.lh5_st.read_n_rows(lh5_group, f) for f in self.lh5_files],
            'int64')
        np.cumsum(self.file_map, out=self.file_map)

        # Get the input buffer and read the first chunk
        self.lh5_in = self.lh5_st.get_buffer(self.lh5_group, self.lh5_files[0],
                                             buffer_len)
        self.lh5_st.read_object(self.lh5_group, self.lh5_files[0], 0,
                                buffer_len, self.lh5_in)
        self.buffer_len = buffer_len
        self.current_file = None
        self.current_chunk = None

        # initialize stuff for iteration
        self.selection = selection
        self.index_it = None
        self.reset()
        self.n_drawn = n_drawn

        # initialize list of objects to draw
        if isinstance(waveforms, str): self.waveforms = [waveforms]
        elif waveforms is None: self.waveforms = []
        else: self.waveforms = list(waveforms)

        if isinstance(lines, str): self.lines = [lines]
        elif lines is None: self.lines = []
        else: self.lines = list(lines)

        if isinstance(legend, str): self.legend = [legend]
        elif legend is None: self.legend = []
        else: self.legend = list(legend)
        self.labels = []

        self.norm_par = norm
        self.align_par = align

        self.x_unit = units.unit_parser.parse_unit(x_unit)
        self.x_lim = x_lim

        # make processing chain and output buffer
        outputs = self.waveforms + self.lines + self.legend + (
            [self.norm_par] if self.norm_par is not None else
            []) + ([self.align_par] if self.align_par is not None else [])
        self.proc_chain, self.lh5_out = build_processing_chain(
            self.lh5_in,
            dsp_config,
            outputs,
            verbosity=self.verbosity,
            block_width=block_width)

        self.fig = None
        self.ax = None