Beispiel #1
0
def main():
    """
    OPPI biasing on 12/30/20 didn't go well (elog 298).  
    It looks like there is significantly more capacitance "near" the detector,
    and during running, I thought I saw it changing over time (~minute timescale).
    
    Study: find some pulse shape parameters that are sensitive to "how slow" 
    the waveforms really are, for a given run/cycle.  We should then be able
    to extend this analysis to monitor the stability of OPPI during "good" runs.
    """
    # fileDB query.  this could be moved to cmd line arg in the future
    que = 'run==111'  # 30 min bad bkg run
    # que = 'run==110' # 3hr alpha run

    # load fileDB.  use DataGroup, with a hack to fix use of relative file paths
    pwd = os.getcwd()
    os.chdir('../processing/')
    dg = DataGroup('cage.json', load=True)
    os.chdir(pwd)
    # print(dg.fileDB)

    # run query
    dg.fileDB = dg.fileDB.query(que)

    # check query result
    view_cols = ['daq_file', 'cycle', 'run', 'runtype', 'startTime', 'runtime']
    print(dg.fileDB[view_cols], '\n')

    # -- run routines --
    plot_dsp(dg)
Beispiel #2
0
def fix_fileDB(dg):
    """
    ./setup.py -ff
    sometimes the fileDB can get corrupted in places.
    here's a function which can be modified to fix various issues without
    deleting the existing fileDB.
    """
    # load existing fileDB
    dg.load_df()

    # accidentally forgot to run get_lh5_columns when I updated the fileDB.
    # print(dg.fileDB.columns)

    df1 = dg.fileDB.query('raw_path == raw_path')  # no nan's
    df2 = dg.fileDB.query('raw_path != raw_path')  # nan's

    dg2 = DataGroup('$CAGE_SW/processing/cage.json')
    dg2.fileDB = df2

    # clone of pygama/analysis/datagroup.py :: get_lh5_columns
    def get_files(row):
        tmp = row.to_dict()
        for tier in dg2.tier_dirs:

            # get filename
            tmp['tier'] = tier

            # leave subsystem unspecified
            if dg2.subsystems != ['']:
                tmp['sysn'] = '{sysn}'

            # set the filename.  might have a '{sysn}' string present
            row[f'{tier}_file'] = dg2.lh5_template.format_map(tmp)

            # compute file path.
            # daq_to_raw outputs a file for each subsystem, and we
            # handle this here by leaving a regex in the file string
            path = f'/{tier}'
            if dg2.subsystems != [""]:
                path += '/{sysn}'
            if row['runtype'] in dg2.run_types:
                path += f"/{row['runtype']}"

            row[f'{tier}_path'] = path
        return row

    dg2.fileDB = dg2.fileDB.apply(get_files, axis=1)
    # print(dg2.fileDB)

    tmp = pd.concat([df1, dg2.fileDB])
    dg.fileDB = tmp

    print('New fileDB:')
    print(dg.fileDB)

    print('Ready to save.  This will overwrite any existing fileDB.')
    ans = input('Save updated fileDB? (y/n):')
    if ans.lower() == 'y':
        dg.save_df(os.path.expandvars(dg.config['fileDB']))
        print('fileDB updated.')
Beispiel #3
0
def main():
    doc="""
    OPPI STC data processing routine.
    """
    rthf = argparse.RawTextHelpFormatter
    par = argparse.ArgumentParser(description=doc, formatter_class=rthf)
    arg, st, sf = par.add_argument, 'store_true', 'store_false'

    # declare datagroup
    arg('-q', '--query', nargs=1, type=str,
        help="select file group to calibrate: -q 'run==1' ")

    # routines
    arg('--d2r', action=st, help='run daq_to_raw')
    arg('--r2d', action=st, help='run raw_to_dsp')
    arg('--r2d_file', nargs=2, type=str, help='single-file raw_to_dsp')
    arg('--d2h', action=st, help='run dsp_to_hit (CAGE-specific)')

    # options
    arg('-o', '--over', action=st, help='overwrite existing files')
    arg('-n', '--nwfs', nargs='*', type=int, help='limit num. waveforms')
    arg('-v', '--verbose', action=st, help='verbose mode')

    args = par.parse_args()

    # load main DataGroup, select files to calibrate
    dg = DataGroup('oppi.json', load=True)
    if args.query:
        que = args.query[0]
        dg.file_keys.query(que, inplace=True)
    else:
        dg.file_keys = dg.file_keys[-1:]

    view_cols = ['run','cycle','daq_file','runtype','startTime']#,'threshold',
                 # 'stopTime','runtime']
    print(dg.file_keys[view_cols].to_string())
    print('Files:', len(dg.file_keys))
    # exit()

    # -- set options --
    nwfs = args.nwfs[0] if args.nwfs is not None else np.inf

    print('Processing settings:'
          # '\n$LPGTA_DATA =', os.environ.get('LPGTA_DATA'),
          # '\n$LEGEND_META =', os.environ.get('LEGEND_META'),
          f'\n  overwrite? {args.over}'
          f'\n  limit wfs? {nwfs}')

    # -- run routines --
    if args.d2r: d2r(dg, args.over, nwfs, args.verbose)
    if args.r2d: r2d(dg, args.over, nwfs, args.verbose)
    if args.d2h: d2h(dg, args.over, nwfs, args.verbose)

    if args.r2d_file:
        f_raw, f_dsp = args.r2d_file
        r2d_file(f_raw, f_dsp, args.over, nwfs, args.verbose)
Beispiel #4
0
def update(dg, batch_mode=False):
    """
    ./setup.py -u
    After taking new data, run this function to add rows to fileDB.
    New rows will not have all columns yet.
    TODO: look for nan's to identify cycles not covered in runDB
    """
    print('Updating fileDB ...')

    dbg_cols = ['unique_key', 'run', 'cycle', 'daq_file']

    # load existing file keys
    dg.load_df()
    # print(dg.fileDB[dbg_cols])

    # scan daq dir for new file keys
    dg_new = DataGroup('$CAGE_SW/processing/cage.json')
    dg_new.scan_daq_dir()
    dg_new.fileDB.sort_values(['cycle'], inplace=True)
    dg_new.fileDB.reset_index(drop=True, inplace=True)

    # add standard columns
    dg_new.fileDB = dg_new.fileDB.apply(get_cyc_info, args=[dg_new], axis=1)
    dg_new.get_lh5_cols()

    for col in ['run', 'cycle']:
        dg_new.fileDB[col] = pd.to_numeric(dg_new.fileDB[col])
    print(dg_new.fileDB[dbg_cols])

    # identify new keys, save new indexes
    df1 = dg.fileDB['unique_key']
    df2 = dg_new.fileDB['unique_key']
    new_keys = pd.concat([df1, df2]).drop_duplicates(keep=False)
    new_idx = new_keys.index

    if len(new_keys) > 0:
        print('Found new files:')
        print(new_keys)

        print('Merging with existing fileDB:')
        df_upd = pd.concat([dg.fileDB, dg_new.fileDB.loc[new_idx]])
        print(df_upd[dbg_cols])

        if not batch_mode:
            print(
                "RunDB Check -- did you update runDB.json?  Are there any NaN's in filenames/paths above?"
            )
            ans = input('Save updated fileDB? (y/n):')
            if ans.lower() == 'y':
                dg.fileDB = df_upd
                dg.save_df(os.path.expandvars(dg.config['fileDB']))
                print('fileDB updated.')
        else:
            dg.fileDB = df_upd
            dg.save_df(os.path.expandvars(dg.config['fileDB']))
            print('fileDB updated.')
    else:
        print('No new files found!  current fileDB:')
        print(dg.fileDB[dbg_cols])
Beispiel #5
0
def main():
    doc = """
    Create and maintain the 'fileDB' needed by DataGroup.
    Provides options for first-time setup, and updating an existing fileDB.
    """
    rthf = argparse.RawTextHelpFormatter
    par = argparse.ArgumentParser(description=doc, formatter_class=rthf)
    arg, st, sf = par.add_argument, 'store_true', 'store_false'

    # initial setup
    arg('--mkdirs', action=st, help='run first-time directory setup')
    arg('--init', action=st, help='run first-time DAQ directory scan')

    # update mode (normal)
    arg('-u',
        '--update',
        action=st,
        help='rescan DAQ dir, update existing fileDB')
    arg('--orca', action=st, help='scan ORCA XML headers of DAQ files')
    arg('--rt', action=st, help='get runtimes (requires dsp file)')

    # TODO: add a "delete existing entries matching this query" mode,
    # so we don't have to rescan the whole fileDB if we make a change to
    # runDB.

    # options
    arg('-b', '--batch', action=st, help='batch mode, do not ask for user y/n')
    arg('--show', action=st, help='show current on-disk fileDB')
    arg('-o', '--over', action=st, help='overwrite existing fileDB')
    arg('--lh5_user', action=st, help='use $CAGE_LH5_USER over $CAGE_LH5')
    arg('-ff',
        '--fixit',
        action=st,
        help='special: run fix-it mode for fileDB')

    args = par.parse_args()

    # declare main DataGroup
    dg = DataGroup('$CAGE_SW/processing/cage.json')

    # -- run routines --
    if args.mkdirs: dg.lh5_dir_setup(args.lh5_user)
    if args.show: show_fileDB(dg)
    if args.init: init(dg)
    if args.update: update(dg, args.batch)
    if args.orca: scan_orca_headers(dg, args.over, args.batch)
    if args.rt: get_runtimes(dg, args.over, args.batch)
    if args.fixit: fix_fileDB(dg)
Beispiel #6
0
def main():
    doc = """
    Post-GERDA Test (PGT): fileDB setup app.
    Creates an HDF5 file with pygama daq/raw/dsp file names & paths.
    Requires: 
    - LPGTA.json config file.
    - env var $LPGTA_DATA = /global/cfs/cdirs/m2676/data/lngs/pgt
    """
    rthf = argparse.RawTextHelpFormatter
    par = argparse.ArgumentParser(description=doc, formatter_class=rthf)
    arg, st, sf = par.add_argument, 'store_true', 'store_false'

    arg('--create_dirs', action=st, help='run only once: create LH5 dirs')
    arg('--init_db', action=st, help='initialize primary ecal output file')
    arg('--runtime',
        action=st,
        help='add runtime col to fileDB (use raw file)')
    # arg('--update', action=st, help='update fileDB with new files')
    arg('--show_db', action=st, help='print current fileDB')

    args = par.parse_args()

    # load datagroup and show status
    dg = DataGroup('LPGTA.json')
    print('LPGTA_DATA:', os.environ.get('LPGTA_DATA'))

    # run routines
    if args.show_db: show_db(dg)
    if args.create_dirs: create_dirs(dg)
    if args.init_db: init_fileDB(dg)
    if args.runtime: get_runtimes(dg)
Beispiel #7
0
def main():
    """
    """
    # set output file
    # f_super = './data/superpulses_oct2020.h5' # todo
    f_super = './data/superpulses_dec2020.h5'

    # Set 1 (Oct 2020)
    # https://elog.legend-exp.org/UWScanner/249 (one file per run)
    tb1 = StringIO("""
    V_pulser  run  E_keV  mV_firststage
    3.786 824  1460  300
    5.2   826  2000  420
    6.77  828  2615  544
    8.27  830  3180  660
    10    832  3840  800
    2.5   834  967   200
    1.3   836  500   106
    0.55  837  212   44
    0.16  838  60    13.2
    """)
    dfp1 = pd.read_csv(tb1, delim_whitespace=True)

    # Set 2 (Dec 2020)
    # https://elog.legend-exp.org/UWScanner/294
    tb2 = StringIO("""
    V_pulser  run  E_keV  mV_firststage
    0     1170  0     0
    3.76  1172  1460  316
    0.05  1173  15    7.2
    0.1   1174  31    11.0
    0.2   1175  62    19.4
    0.5   1176  167   44.0
    0.8   1177  277   69.6
    1     1178  352   85.6
    2     1179  744   172
    5     1180  1971  500
    8     1181  3225  740
    10    1182  4054  880
    """)
    dfp2 = pd.read_csv(tb2, delim_whitespace=True)

    # load fileDB to get dsp filenames
    # use DataGroup, with a temp hack to fix use of relative file paths
    pwd = os.getcwd()
    os.chdir('../processing/')
    dg = DataGroup('cage.json', load=True)
    os.chdir(pwd)

    # merge with fileDB
    cycles = dfp2['run'].tolist()
    df_pulsDB = dg.fileDB.loc[dg.fileDB['cycle'].isin(cycles)]
    df_pulsDB.reset_index(inplace=True)
    dfp2 = pd.concat([dfp2, df_pulsDB], axis=1)

    # -- run routines --
    # show_gain(dfp1, dfp2)
    # show_spectra(dfp2, dg)
    get_superpulses(dfp2, dg, f_super)
    get_transferfn(f_super)
Beispiel #8
0
def test_datagroup():
    """
    current columns:
    ['unique_key', 'run', 'label', 'YYYYmmdd', 'hhmmss', 'rtp', 'daq_dir',
     'daq_file', 'cmap', 'runtype', 'raw_file', 'raw_path', 'dsp_file',
     'dsp_path', 'hit_file', 'hit_path', 'daq_size_GB', 'proc_group']
    """
    dg = DataGroup('LPGTA.json', load=True)
    query = "run==30 and rtp == 'calib' and proc_group==35"
    dg.fileDB.query(query, inplace=True)
    # dg.fileDB = dg.fileDB[-1:]
    # print(dg.fileDB.columns)

    # show what has been selected
    view_cols = [
        'run', 'label', 'YYYYmmdd', 'hhmmss', 'rtp', 'cmap', 'runtype',
        'daq_size_GB', 'proc_group'
    ]
    # print(dg.fileDB[view_cols].to_string())

    raw_path, raw_file = dg.fileDB[['raw_path', 'raw_file']].iloc[0]
    f_raw = f'{dg.lh5_dir}/{raw_path}/{raw_file}'

    if "sysn" in f_raw:
        tmp = {'sysn': 'geds'}  # hack for lpgta
        f_raw = f_raw.format_map(tmp)
        # f_dsp = f_dsp.format_map(tmp)

    # check_lh5_groups(f_raw)
    # load_raw_data_example(f_raw)
    check_timestamps(f_raw)
Beispiel #9
0
def main():
    doc = """
    === optimizer.py ====================================================

    dsp optimization app, works with DataGroup

    === C. Wiseman (UW) =============================================
    """
    rthf = argparse.RawTextHelpFormatter
    par = argparse.ArgumentParser(description=doc, formatter_class=rthf)
    arg, st, sf = par.add_argument, 'store_true', 'store_false'

    # primary operations
    arg('-q',
        '--query',
        nargs=1,
        type=str,
        help="select group to analyze: -q 'cycle==1' ")
    arg('-e', '--energy', action=st, help='optimize energy trapezoid')
    arg('-d', '--dcr', action=st, help='optimize DCR parameter')

    args = par.parse_args()

    # -- setup --

    # load main DataGroup, select files to analyze
    dg = DataGroup('cage.json', load=True)
    if args.query:
        que = args.query[0]
        dg.fileDB.query(que, inplace=True)
    else:
        dg.fileDB = dg.fileDB[-1:]

    view_cols = [
        'run', 'cycle', 'daq_file', 'runtype', 'startTime', 'threshold'
    ]
    print(dg.fileDB[view_cols].to_string())
    # print(f'Found {len(dg.fileDB)} files.')

    # -- run routines --

    # TODO : we could split this code into "spectrum" (peak width) optimizations,
    # and "waveform" optimizations, where the FOM is a waveform, not a peak.
    # so like optimize_spec.py and optimize_wfs.py

    optimize_trap(dg)
    show_trap_results()
Beispiel #10
0
def load_datagroup():
    """
    """
    # # -- HADES mode --
    # dg = DataGroup('HADES.json')
    # dg.load_df('HADES_fileDB.h5')
    #
    # # get the first 3 cycle files for det 60A, first th scan
    # que = "detSN=='I02160A' and scantype=='th_HS2_top_psa' and run==1"
    #
    # # det 60A, lat th scan
    # # que = "detSN=='I02160A' and scantype=='th_HS2_lat_psa'"
    #
    # # det 60B, first th scan
    # # que = "detSN=='I02160B' and scantype=='th_HS2_top_psa'"
    #
    # dg.file_keys.query(que, inplace=True)
    # dg.file_keys = dg.file_keys[:3]

    # # -- CAGE mode --
    # dg = DataGroup('CAGE.json')
    # dg.load_df('CAGE_fileDB.h5')
    # que = 'run==8'
    # dg.file_keys.query(que, inplace=True)

    # # -- LPGTA mode --
    # dg = DataGroup('LPGTA.json')
    # dg.load_df('LPGTA_fileDB.h5')
    # # process one big cal file (64 GB)
    # que = "run==18 and YYYYmmdd == '20200302' and hhmmss == '184529'"
    # dg.file_keys.query(que, inplace=True)

    # print('files to process:')
    # print(dg.file_keys)

    # -- SURF mode --
    dg = DataGroup(
        '/global/homes/c/clarkm18/SURFchar/pygama/experiments/surf/SURFCHAR.json'
    )
    dg.load_df(
        '/global/homes/c/clarkm18/SURFchar/pygama/experiments/surf/SURFCHAR_fileDB.h5'
    )

    # can add other filters here
    #    dg.file_keys = dg.file_keys[:2]

    return dg
Beispiel #11
0
def main():
    """
    """
    dg = DataGroup('cage.json')
    
    # init(dg) # only run first time
    # update(dg) 
    scan_orca_headers(dg)
Beispiel #12
0
def load_datagroup(query=None):
    """
    """
    dg = DataGroup('LPGTA.json')
    dg.load_df('LPGTA_fileDB.h5')
    
    # NOTE: for now, we have to edit this line to choose which files to process
    # process one big cal file (64 GB)
    #query = "run==18 and YYYYmmdd == '20200302' and hhmmss == '184529'"
    if query is not None: dg.file_keys.query(query, inplace=True)
    
    print('files to process:')
    print(dg.file_keys)
    
    # can add other filters here
    #dg.file_keys = dg.file_keys[:2]
    
    return dg
Beispiel #13
0
def main():
    doc = """
    analysis of Aug 2020 OPPI+CAGE commissioning runs (138-141)
    tasks:
    - load calibration from energy_cal
    - show 1460 peak stability
    - show removal of low-e retrigger noise
    - look at waveforms near 5 MeV, confirm they're muon-related
    - look at low-e waveforms, examine noise
    - determine pz correction value
    """
    rthf = argparse.RawTextHelpFormatter
    par = argparse.ArgumentParser(description=doc, formatter_class=rthf)
    arg, st, sf = par.add_argument, 'store_true', 'store_false'
    arg('-q',
        '--query',
        nargs=1,
        type=str,
        help="select file group to calibrate: -q 'run==1' ")
    #     arg('-u', '--lh5_user', action=st, help='user lh5 mode')
    args = par.parse_args()

    # load main DataGroup, select files from cmd line
    dg = DataGroup('cage.json', load=True)
    if args.query:
        que = args.query[0]
        dg.file_keys.query(que, inplace=True)
    else:
        dg.file_keys = dg.file_keys[-1:]
    view_cols = [
        'runtype', 'run', 'cycle', 'startTime', 'runtime', 'threshold'
    ]
    print(dg.file_keys[view_cols])

    # -- run routines --
    #     show_raw_spectrum(dg)
    #     show_cal_spectrum(dg)
    # show_wfs(dg)
    #     data_cleaning(dg) # doesn't work right now, I think due to same error as experienced before fix in line 171 in setup.py, but not entirely sure how to fix yet
    data_cleaning_ucal(
        dg
    )  # doesn't work right now, I think due to same error as experienced before fix in line 171 in setup.py, but not entirely sure how to fix yet
Beispiel #14
0
def main():
    doc = """
    analysis of Aug 2020 OPPI+CAGE commissioning runs (138-141)
    tasks:
    - load calibration from energy_cal
    - show 1460 peak stability
    - show removal of low-e retrigger noise
    - look at waveforms near 5 MeV, confirm they're muon-related
    - look at low-e waveforms, examine noise
    - determine pz correction value
    """
    rthf = argparse.RawTextHelpFormatter
    par = argparse.ArgumentParser(description=doc, formatter_class=rthf)
    arg, st, sf = par.add_argument, 'store_true', 'store_false'
    arg('-q',
        '--query',
        nargs=1,
        type=str,
        help="select file group to calibrate: -q 'run==1' ")
    args = par.parse_args()

    # load main DataGroup, select files from cmd line
    dg = DataGroup('$CAGE_SW/processing/cage.json', load=True)
    if args.query:
        que = args.query[0]
        dg.fileDB.query(que, inplace=True)
    else:
        dg.fileDB = dg.fileDB[-1:]

    view_cols = [
        'runtype', 'run', 'cycle', 'startTime', 'runtime', 'threshold'
    ]
    print(dg.fileDB[view_cols])

    # -- run routines --
    # show_raw_spectrum(dg)
    # show_cal_spectrum(dg)
    # show_wfs(dg)
    # data_cleaning(dg)
    # peak_drift(dg)
    # pole_zero(dg)
    label_alpha_runs(dg)
Beispiel #15
0
def analyze_ornl():

    dg = DataGroup('ORNL.json')
    # dg.lh5_dir_setup()
    dg.scan_daq_dir()

    # expt-specific organization
    dg.file_keys.sort_values(['cycle'], inplace=True)
    dg.file_keys.reset_index(drop=True, inplace=True)

    dg.save_keys()
    dg.load_keys()
    print(dg.file_keys)
Beispiel #16
0
def main():
    """
    Requires LPGTA.json config file. 
    Save an hdf5 file with pygama daq, raw, and dsp names + paths.
    """
    dg = DataGroup('LPGTA.json')

    # dg.lh5_dir_setup() # <-- run this once with create=True

    dg.scan_daq_dir()

    # -- organize and augment the dg.file_keys DataFrame --

    # run 1 & 2 files don't match template
    dg.file_keys.query('run > 2', inplace=True)

    dg.file_keys.sort_values(['run', 'YYYYmmdd', 'hhmmss'], inplace=True)
    dg.file_keys.reset_index(drop=True, inplace=True)

    def get_cmap(row):
        run_str = f"{row['run']:0>4d}"
        if run_str not in dg.runDB:
            print("Warning: no runDB entry for run", run_str)
            row['cmap'] = ''
            return row
        row['cmap'] = dg.runDB[f"{row['run']:0>4d}"]["cmap"]
        return row

    dg.file_keys = dg.file_keys.apply(get_cmap, axis=1)

    dg.file_keys['runtype'] = dg.file_keys['rtp']

    dg.get_lh5_cols()

    # save to file used by processing.py
    dg.save_df('./LPGTA_fileDB.h5')

    print(dg.file_keys)
Beispiel #17
0
def main():
    doc = """
    Create and maintain the 'fileDB' needed by DataGroup.
    Provides options for first-time setup, and updating an existing fileDB.
    """
    rthf = argparse.RawTextHelpFormatter
    par = argparse.ArgumentParser(description=doc, formatter_class=rthf)
    arg, st, sf = par.add_argument, 'store_true', 'store_false'

    # initial setup
    arg('--mkdirs', action=st, help='run first-time directory setup')
    arg('--init', action=st, help='run first-time DAQ directory scan')

    # update mode (normal)
    arg('-u',
        '--update',
        action=st,
        help='rescan DAQ dir, update existing fileDB')
    arg('--orca', action=st, help='scan ORCA XML headers of DAQ files')
    arg('--rt', action=st, help='get runtimes (requires dsp file)')

    # options
    arg('-b', '--batch', action=st, help='batch mode, do not ask for user y/n')
    arg('--show', action=st, help='show current on-disk fileDB')
    arg('-o', '--over', action=st, help='overwrite existing fileDB')
    arg('--lh5_user', action=st, help='use $CAGE_LH5_USER over $CAGE_LH5')

    args = par.parse_args()

    # declare main DataGroup
    dg = DataGroup('hades.json')

    # -- run routines --
    if args.mkdirs: dg.lh5_dir_setup(args.lh5_user)
    if args.show: show_fileDB(dg)
    if args.init: init(dg)
    if args.update: update(dg, args.batch)
Beispiel #18
0
def main():
    doc="""
    Detect jumps in the 1460 keV line in the uncalibrated trapEftp
    """
    rthf = argparse.RawTextHelpFormatter
    par = argparse.ArgumentParser(description=doc, formatter_class=rthf)
    arg, st, sf = par.add_argument, 'store_true', 'store_false'
    arg('-q', '--query', nargs=1, type=str,
        help="select file group to calibrate: -q 'run==1' ")
    arg('-p', '--plot', action='store_true',
        help="plot option")
    arg('-u', '--user', action='store_true',
        help="use lh5 user directory")
    args = par.parse_args()
    plot = args.plot
    user = args.user

    # load main DataGroup, select files from cmd line
    dg = DataGroup('$CAGE_SW/processing/cage.json', load=True)
    if args.query:
        que = args.query[0]
        dg.fileDB.query(que, inplace=True)
    else:
        dg.fileDB = dg.fileDB[-1:]
    
    global lh5_dir
    if user:
        lh5_dir = os.path.expandvars(dg.lh5_user_dir)
    else:
        lh5_dir = os.path.expandvars(dg.lh5_dir)
        
    global time_intervals
    time_intervals = 60
    print(find_jumps(dg, plot))
    
    return 0
Beispiel #19
0
def getStartStop(run):
    """
    get the start time and stop time for a given run
    """

    # get run files
    dg = DataGroup('$CAGE_SW/processing/cage.json', load=True)
    str_query = f'run=={run} and skip==False'
    dg.fileDB.query(str_query, inplace=True)

    #get runtime, startime, runtype
    runtype_list = np.array(dg.fileDB['runtype'])
    runtype = runtype_list[0]
    rt_min = dg.fileDB['runtime'].sum()
    u_start = dg.fileDB.iloc[0]['startTime']
    t_start = pd.to_datetime(u_start, unit='s')

    u_stop = u_start + rt_min * 60

    t_stop = pd.to_datetime(u_stop, unit='s')

    print(f'start: {t_start}\n stop: {t_stop}')
    return (t_start, t_stop)
Beispiel #20
0
def getDataFrame(run, user=True, hit=True, cal=True, lowE=False, dsp_list=[]):
    # get run files
    dg = DataGroup('$CAGE_SW/processing/cage.json', load=True)
    str_query = f'run=={run} and skip==False'
    dg.fileDB.query(str_query, inplace=True)

    #get runtime, startime, runtype
    runtype_list = np.array(dg.fileDB['runtype'])
    runtype = runtype_list[0]
    rt_min = dg.fileDB['runtime'].sum()
    u_start = dg.fileDB.iloc[0]['startTime']
    t_start = pd.to_datetime(u_start, unit='s')

    # get scan position

    if runtype == 'alp':
        alphaDB = pd.read_hdf(
            os.path.expandvars('$CAGE_SW/processing/alphaDB.h5'))
        scan_pos = alphaDB.loc[alphaDB['run'] == run]
        radius = np.array(scan_pos['radius'])[0]
        angle = np.array(scan_pos['source'])[0]
        rotary = np.array(scan_pos['rotary'])[0]
        #radius = int(radius)
        angle_det = int((-1 * angle) - 90)
        if rotary < 0:
            angle_det = int(angle + 270)
        print(f'Radius: {radius}; Angle: {angle_det}; Rotary: {rotary}')

    else:
        radius = 'n/a'
        angle = 'n/a'
        angle_det = 'n/a'
        rotary = 'n/a'

    # print(etype, etype_cal, run)
    # exit()

    print(f'user: {user}; cal: {cal}; hit: {hit}')

    # get data and load into df
    lh5_dir = dg.lh5_user_dir if user else dg.lh5_dir

    if cal == True:
        default_dsp_list = [
            'energy', 'trapEmax', 'trapEftp', 'trapEftp_cal', 'bl', 'bl_sig',
            'bl_slope', 'lf_max', 'A_10', 'AoE', 'dcr', 'tp_0', 'tp_10',
            'tp_90', 'tp_50', 'tp_80', 'tp_max', 'ToE'
        ]

    else:
        default_dsp_list = [
            'energy', 'trapEmax', 'trapEftp', 'bl', 'bl_sig', 'bl_slope',
            'lf_max', 'A_10', 'AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90', 'tp_50',
            'tp_80', 'tp_max', 'ToE'
        ]

    if len(dsp_list) < 1:
        print(
            f'No options specified for DSP list! Using default: {default_dsp_list}'
        )
        dsp_list = default_dsp_list

    if hit == True:
        print('Using hit files')
        file_list = lh5_dir + dg.fileDB['hit_path'] + '/' + dg.fileDB[
            'hit_file']

        if lowE == True:
            file_list = lh5_dir + dg.fileDB['hit_path'] + '/lowE/' + dg.fileDB[
                'hit_file']
            print(f'Using lowE calibration files \n {file_list}')

        if cal == True:
            df = lh5.load_dfs(file_list, dsp_list,
                              'ORSIS3302DecoderForEnergy/hit')

        if cal == False:
            df = lh5.load_dfs(file_list, dsp_list,
                              'ORSIS3302DecoderForEnergy/hit')

    elif hit == False:
        print('Using dsp files')
        file_list = lh5_dir + dg.fileDB['dsp_path'] + '/' + dg.fileDB[
            'dsp_file']
        if cal == True:
            df = lh5.load_dfs(file_list, dsp_list,
                              'ORSIS3302DecoderForEnergy/dsp')
        if cal == False:
            df = lh5.load_dfs(file_list, dsp_list,
                              'ORSIS3302DecoderForEnergy/dsp')

    else:
        print(
            'dont know what to do here! need to specify if working with calibrated/uncalibrated data, or dsp/hit files'
        )

    return (df, dg, runtype, rt_min, radius, angle_det, rotary)
Beispiel #21
0
def timepoints(runs):

    for run in runs:
        # get run files
        dg = DataGroup('cage.json', load=True)
        str_query = f'run=={run} and skip==False'
        dg.fileDB.query(str_query, inplace=True)

        #get runtime, startime, runtype
        runtype_list = np.array(dg.fileDB['runtype'])
        runtype = runtype_list[0]
        rt_min = dg.fileDB['runtime'].sum()
        u_start = dg.fileDB.iloc[0]['startTime']
        t_start = pd.to_datetime(u_start, unit='s')

        # get scan position

        if runtype == 'alp':
            alphaDB = pd.read_hdf('alphaDB.h5')
            scan_pos = alphaDB.loc[alphaDB['run']==run]
            radius = np.array(scan_pos['radius'])[0]
            angle = np.array(scan_pos['angle'])[0]
            angle_det = 270 + angle
            print(f'Radius: {radius}; Angle: {angle}')

        else:
            radius = 'n/a'
            angle = 'n/a'
            angle_det = 'n/a'

        # get hit df
        lh5_dir = dg.lh5_user_dir #if user else dg.lh5_dir
        hit_list = lh5_dir + dg.fileDB['hit_path'] + '/' + dg.fileDB['hit_file']
        df_hit = lh5.load_dfs(hit_list, ['trapEmax', 'trapEmax_cal', 'bl','bl_sig','A_10','AoE', 'ts_sec', 'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10', 'tp_50', 'tp_90', 'tp_max'], 'ORSIS3302DecoderForEnergy/hit')

        # use baseline cut
        df_cut = df_hit.query('bl > 8500 and bl < 10000').copy()

        #creat new DCR
        const = 0.0555
        df_cut['dcr_linoff'] = df_cut['dcr_raw'] + const*df_cut['trapEmax']

        #create 0-50
        df_cut['tp0_50'] = df_cut['tp_50']- df_cut['tp_0']

        #create 10-90
        df_cut['10-90'] = df_cut['tp_90']- df_cut['tp_10']

        #create 50-100
        df_cut['50-100'] = df_cut['tp_max']- df_cut['tp_50']

        #-------------------------------------
        # Plots before alpha cuts
        #--------------------

        # DCR vs tp_50___________

        fig, ax = plt.subplots()
        fig.suptitle(f'DCR vs 50% rise time', horizontalalignment='center', fontsize=16)

        dlo, dhi, dpb = -100, 200, 0.6
        tlo, thi, tpb = 0, 700, 10

        nbx = int((dhi-dlo)/dpb)
        nby = int((thi-tlo)/tpb)

        alpha_dcr_hist = plt.hist2d(df_cut['dcr_linoff'], df_cut['tp0_50'], bins=[nbx,nby],
                range=[[dlo, dhi], [tlo, thi]], cmap='viridis', norm=LogNorm())

        cb = plt.colorbar()
        cb.set_label("counts", ha = 'right', va='center', rotation=270, fontsize=14)
        cb.ax.tick_params(labelsize=12)
        ax.set_xlabel('DCR (arb)', fontsize=16)
        ax.set_ylabel('tp 0-50 (ns)', fontsize=16)
        plt.setp(ax.get_xticklabels(), fontsize=14)
        plt.setp(ax.get_yticklabels(), fontsize=14)

        # plt.legend()
        ax.text(0.95, 0.83, f'r = {radius} mm \ntheta = {angle_det} deg', verticalalignment='bottom',
                    horizontalalignment='right', transform=ax.transAxes, color='green', fontsize=14, bbox={'facecolor': 'white', 'alpha': 0.95, 'pad': 10})

        plt.title(f'\n{runtype} run {run}, {rt_min:.2f} mins', fontsize=12)
        plt.tight_layout()
        plt.savefig(f'./plots/normScan/cal_normScan/{runtype}_dcr_vs_tp0_50_run{run}.png', dpi=200)
        # plt.show()
        plt.clf()
        plt.close()

        # DCR vs 10-90___________

        fig, ax = plt.subplots()
        fig.suptitle(f'DCR vs 10-90% rise time', horizontalalignment='center', fontsize=16)

        dlo, dhi, dpb = -100, 200, 0.6
        tlo, thi, tpb = 0, 600, 10

        nbx = int((dhi-dlo)/dpb)
        nby = int((thi-tlo)/tpb)

        alpha_dcr_hist = plt.hist2d(df_cut['dcr_linoff'], df_cut['10-90'], bins=[nbx,nby],
                range=[[dlo, dhi], [tlo, thi]], cmap='viridis', norm=LogNorm())

        cb = plt.colorbar()
        cb.set_label("counts", ha = 'right', va='center', rotation=270, fontsize=14)
        cb.ax.tick_params(labelsize=12)
        ax.set_xlabel('DCR (arb)', fontsize=16)
        ax.set_ylabel('tp 10-90 (ns)', fontsize=16)
        plt.setp(ax.get_xticklabels(), fontsize=14)
        plt.setp(ax.get_yticklabels(), fontsize=14)

        # plt.legend()
        ax.text(0.95, 0.83, f'r = {radius} mm \ntheta = {angle_det} deg', verticalalignment='bottom',
                    horizontalalignment='right', transform=ax.transAxes, color='green', fontsize=14, bbox={'facecolor': 'white', 'alpha': 0.95, 'pad': 10})

        plt.title(f'\n{runtype} run {run}, {rt_min:.2f} mins', fontsize=12)
        plt.tight_layout()
        plt.savefig(f'./plots/normScan/cal_normScan/{runtype}_dcr_vs_tp10_90_run{run}.png', dpi=200)
        # plt.show()
        plt.clf()
        plt.close()

        # DCR vs 50-100___________

        fig, ax = plt.subplots()
        fig.suptitle(f'DCR vs 50-100% rise time', horizontalalignment='center', fontsize=16)

        dlo, dhi, dpb = -100, 200, 0.6
        tlo, thi, tpb = 0, 1000, 10

        nbx = int((dhi-dlo)/dpb)
        nby = int((thi-tlo)/tpb)

        alpha_dcr_hist = plt.hist2d(df_cut['dcr_linoff'], df_cut['50-100'], bins=[nbx,nby],
                range=[[dlo, dhi], [tlo, thi]], cmap='viridis', norm=LogNorm())

        cb = plt.colorbar()
        cb.set_label("counts", ha = 'right', va='center', rotation=270, fontsize=14)
        cb.ax.tick_params(labelsize=12)
        ax.set_xlabel('DCR (arb)', fontsize=16)
        ax.set_ylabel('tp 10-90 (ns)', fontsize=16)
        plt.setp(ax.get_xticklabels(), fontsize=14)
        plt.setp(ax.get_yticklabels(), fontsize=14)

        # plt.legend()
        ax.text(0.95, 0.83, f'r = {radius} mm \ntheta = {angle_det} deg', verticalalignment='bottom',
                    horizontalalignment='right', transform=ax.transAxes, color='green', fontsize=14, bbox={'facecolor': 'white', 'alpha': 0.95, 'pad': 10})

        plt.title(f'\n{runtype} run {run}, {rt_min:.2f} mins', fontsize=12)
        plt.tight_layout()
        plt.savefig(f'./plots/normScan/cal_normScan/{runtype}_dcr_vs_tp50_100_run{run}.png', dpi=200)
        # plt.show()
        plt.clf()
        plt.close()
Beispiel #22
0
def get_hists(runs,
              user=False,
              hit=True,
              cal=True,
              etype='trapEftp',
              bl_cut=True):

    hist_arr = []
    if cal == True:
        etype_cal = etype + '_cal'

    for run in runs:
        # get run files
        dg = DataGroup('$CAGE_SW/processing/cage.json', load=True)
        str_query = f'run=={run} and skip==False'
        dg.fileDB.query(str_query, inplace=True)

        #get runtime, startime, runtype
        runtype_list = np.array(dg.fileDB['runtype'])
        runtype = runtype_list[0]
        rt_min = dg.fileDB['runtime'].sum()
        u_start = dg.fileDB.iloc[0]['startTime']
        t_start = pd.to_datetime(u_start, unit='s')

        # get scan position

        if runtype == 'alp':
            alphaDB = pd.read_hdf(
                os.path.expandvars('$CAGE_SW/processing/alphaDB.h5'))
            scan_pos = alphaDB.loc[alphaDB['run'] == run]
            radius = np.array(scan_pos['radius'])[0]
            angle = np.array(scan_pos['source'])[0]
            rotary = np.array(scan_pos['rotary'])[0]
            radius = int(radius)
            angle_det = int((-1 * angle) - 90)
            if rotary < 0:
                angle_det = int(angle + 270)
            print(f'Radius: {radius}; Angle: {angle_det}')

        else:
            radius = 'n/a'
            angle = 'n/a'
            angle_det = 'n/a'

        # print(etype, etype_cal, run)
        # exit()

        # get data and load into df
        lh5_dir = dg.lh5_user_dir if user else dg.lh5_dir

        if hit == True:
            print('Using hit files')
            file_list = lh5_dir + dg.fileDB['hit_path'] + '/' + dg.fileDB[
                'hit_file']
            if run < 117 and cal == True:
                df = lh5.load_dfs(file_list, [
                    'energy', 'trapEmax', 'trapEmax_cal', 'bl', 'bl_sig',
                    'A_10', 'AoE', 'ts_sec', 'dcr_raw', 'dcr_ftp', 'dcr_max',
                    'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'
                ], 'ORSIS3302DecoderForEnergy/hit')
            elif run >= 117 and cal == True:
                df = lh5.load_dfs(file_list, [
                    'energy', 'trapEmax', 'trapEftp', 'trapEmax_cal',
                    'trapEftp_cal', 'bl', 'bl_sig', 'bl_slope', 'lf_max',
                    'A_10', 'AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90', 'tp_50',
                    'tp_80', 'tp_max'
                ], 'ORSIS3302DecoderForEnergy/hit')

            elif run < 117 and cal == False:
                df = lh5.load_dfs(file_list, [
                    f'{etype}', 'bl', 'bl_sig', 'A_10', 'AoE', 'ts_sec',
                    'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10', 'tp_90',
                    'tp_50', 'tp_80', 'tp_max'
                ], 'ORSIS3302DecoderForEnergy/hit')
            elif run >= 117 and cal == False:
                df = lh5.load_dfs(file_list, [
                    f'{etype}', 'bl', 'bl_sig', 'bl_slope', 'lf_max', 'A_10',
                    'AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80',
                    'tp_max'
                ], 'ORSIS3302DecoderForEnergy/hit')

        elif hit == False:
            print('Using dsp files')
            file_list = lh5_dir + dg.fileDB['dsp_path'] + '/' + dg.fileDB[
                'dsp_file']
            if run < 117 and cal == True:
                df = lh5.load_dfs(file_list, [
                    f'{etype}', f'{etype_cal}', 'bl', 'bl_sig', 'A_10', 'AoE',
                    'ts_sec', 'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10',
                    'tp_90', 'tp_50', 'tp_80', 'tp_max'
                ], 'ORSIS3302DecoderForEnergy/dsp')
            elif run >= 117 and cal == True:
                df = lh5.load_dfs(file_list, [
                    f'{etype}', f'{etype_cal}', 'bl', 'bl_sig', 'bl_slope',
                    'lf_max', 'A_10', 'AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90',
                    'tp_50', 'tp_80', 'tp_max'
                ], 'ORSIS3302DecoderForEnergy/dsp')

            elif run < 117 and cal == False:
                df = lh5.load_dfs(file_list, [
                    f'{etype}', 'bl', 'bl_sig', 'A_10', 'AoE', 'ts_sec',
                    'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10', 'tp_90',
                    'tp_50', 'tp_80', 'tp_max'
                ], 'ORSIS3302DecoderForEnergy/dsp')
            elif run >= 117 and cal == False:
                df = lh5.load_dfs(file_list, [
                    f'{etype}', 'bl', 'bl_sig', 'bl_slope', 'lf_max', 'A_10',
                    'AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80',
                    'tp_max'
                ], 'ORSIS3302DecoderForEnergy/dsp')

        else:
            print(
                'dont know what to do here! need to specify if working with calibrated/uncalibrated data, or dsp/hit files'
            )

        if bl_cut == True:
            # use baseline cut
            print('Using baseline cut')
            if run < 117:
                bl_cut_lo, bl_cut_hi = 8500, 10000
            if run >= 117:
                bl_cut_lo, bl_cut_hi = 9700, 9760

            df_cut = df.query(f'bl > {bl_cut_lo} and bl < {bl_cut_hi}').copy()

        else:
            print('Not using baseline cut')
            df_cut = df

        # select energy type and energy range
        if cal == False:
            elo, ehi, epb = 0, 10000, 10  #entire enerty range trapEftp
            e_unit = ' (uncal)'
        elif cal == True:
            elo, ehi, epb = 0, 6000, 5
            etype = etype_cal
            e_unit = ' (keV)'

        # create energy histograms
        ene_hist, bins = np.histogram(df_cut[etype],
                                      bins=nbx,
                                      range=([elo, ehi]))
        ene_hist_norm = np.divide(ene_hist, (rt_min))

        hist_arr.append(ene_hist_norm)

    return (hist_arr, bins)
Beispiel #23
0
def plot_wfs(run, cycle, etype, user=False, hit=True, cal=True):
    """
    show waveforms in different enery regions.
    use the dsp or hit file to select events
    """
    dg = DataGroup('$CAGE_SW/processing/cage.json', load=True)
    str_query = f'cycle=={cycle} and skip==False'
    dg.fileDB.query(str_query, inplace=True)

    #get runtime, startime, runtype
    runtype_list = np.array(dg.fileDB['runtype'])
    runtype = runtype_list[0]
    rt_min = dg.fileDB['runtime'].sum()
    u_start = dg.fileDB.iloc[0]['startTime']
    t_start = pd.to_datetime(u_start, unit='s')


    # get data and load into df
    lh5_dir = dg.lh5_user_dir if user else dg.lh5_dir
    if cal==True:
        etype_cal = etype + '_cal'

    if hit==True:
        print('Using hit files')
        file_list = lh5_dir + dg.fileDB['hit_path'] + '/' + dg.fileDB['hit_file']
        if run<=117 and cal==True:
            df = lh5.load_dfs(file_list, [f'{etype}', f'{etype_cal}', 'bl','bl_sig','A_10','AoE', 'ts_sec', 'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/hit')
        elif run>117 and cal==True:
            df = lh5.load_dfs(file_list, [f'{etype}', f'{etype_cal}', 'bl','bl_sig', 'bl_slope', 'lf_max', 'A_10','AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/hit')

        elif run<=117 and cal==False:
            df = lh5.load_dfs(file_list, [f'{etype}', 'bl','bl_sig','A_10','AoE', 'ts_sec', 'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/hit')
        elif run>117 and cal==False:
            df = lh5.load_dfs(file_list, [f'{etype}', 'bl','bl_sig', 'bl_slope', 'lf_max', 'A_10','AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/hit')

    elif hit==False:
        print('Using dsp files')
        file_list = lh5_dir + dg.fileDB['dsp_path'] + '/' + dg.fileDB['dsp_file']
        if run<=117 and cal==True:
            df = lh5.load_dfs(file_list, [f'{etype}', f'{etype_cal}', 'bl','bl_sig','A_10','AoE', 'ts_sec', 'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/dsp')
        elif run>117 and cal==True:
            df = lh5.load_dfs(file_list, [f'{etype}', f'{etype_cal}', 'bl','bl_sig', 'bl_slope', 'lf_max', 'A_10','AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/dsp')

        elif run<=117 and cal==False:
            df = lh5.load_dfs(file_list, [f'{etype}', 'bl','bl_sig','A_10','AoE', 'ts_sec', 'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/dsp')
        elif run>117 and cal==False:
            df = lh5.load_dfs(file_list, [f'{etype}', 'bl','bl_sig', 'bl_slope', 'lf_max', 'A_10','AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/dsp')

    else:
        print('dont know what to do here! need to specify if working with calibrated/uncalibrated data, or dsp/hit files')


    waveforms = []

    n_eranges = 10 #number of steps between lower and higher energy limits
    nwfs= 50 #number of waveforms to average for superpulse
    emin = 500 #lower energy limit
    emax = 15000 #higher energy limit

    eranges = np.linspace(emin, emax, n_eranges) #set up energy slices
    for e in eranges:
        #get events within 1% of energy
        elo = e-(0.01*e)
        ehi = e+(0.01*e)
        idx = df[etype].loc[(df[etype] >= elo) & (df[etype] <= ehi)].index[:nwfs]
        raw_store = lh5.Store()
        tb_name = 'ORSIS3302DecoderForEnergy/raw'
        lh5_dir = dg.lh5_dir
        raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']
        f_raw = raw_list.values[0] # fixme, only works for one file rn
        data_raw, nrows = raw_store.read_object(tb_name, f_raw, start_row=0, n_rows=idx[-1]+1)

        wfs_all = (data_raw['waveform']['values']).nda
        wfs = wfs_all[idx.values, :]
        # baseline subtraction
        bl_means = wfs[:,:800].mean(axis=1)
        wf_blsub = (wfs.transpose() - bl_means).transpose()
        ts = np.arange(0, wf_blsub.shape[1]-1, 1)
        super_wf = np.mean(wf_blsub, axis=0)
        wf_max = np.amax(super_wf)
        superpulse = np.divide(super_wf, wf_max)
        waveforms.append(superpulse)

    fig, ax = plt.subplots(figsize=(9,8))
    ax = plt.axes()

    # set up colorbar to plot waveforms of different energies different colors
    colors = plt.cm.viridis(np.linspace(0, 1, n_eranges))
    c = np.arange(0, n_eranges)
    norm = mpl.colors.Normalize(vmin=c.min(), vmax=c.max())
    cmap = mpl.cm.ScalarMappable(norm=norm, cmap=mpl.cm.jet)
    cmap.set_array([])

    for n in range(n_eranges):
        plt.plot(ts, waveforms[n][:len(waveforms[n])-1], c=cmap.to_rgba(n))

    cb = fig.colorbar(cmap, ticks=list(eranges))
    cb.set_label("Energy", ha = 'right', va='center', rotation=270, fontsize=20)
    cb.ax.tick_params(labelsize=18)

#     plt.xlim(3800, 8000)
#     plt.ylim(0.4, 1.01)
    plt.setp(ax.get_xticklabels(), fontsize=16)
    plt.setp(ax.get_yticklabels(), fontsize=16)
    plt.title(f'Waveforms, {emin}-{emax} trapEftp, {n_eranges} steps', fontsize=20)
    plt.xlabel('clock cycles', fontsize=20)
    plt.savefig(f'./plots/angleScan/waveforms/wfs_fallingEdge_cycle{cycle}.png', dpi=300)
Beispiel #24
0
def main():
    doc = """
    === pygama: energy_cal.py ==================================================

    energy calibration app

    - Initial guesses are determined by running 'check_raw_spectrum'
    - Uses a DataGroup to organize files and processing.
    - Metadata is handled in JSON format with 'legend-metadata' conventions.

    === T. Mathew, C. Wiseman (UW) =============================================
    """
    rthf = argparse.RawTextHelpFormatter
    par = argparse.ArgumentParser(description=doc, formatter_class=rthf)
    arg, st, sf = par.add_argument, 'store_true', 'store_false'

    # initial setup
    arg('--init_db', action=st, help='initialize primary ecal output file')
    arg('--raw', action=st, help='display/save uncalibrated energy histogram')

    # primary operations
    arg('-q',
        '--query',
        nargs=1,
        type=str,
        help="select file group to calibrate: -q 'run==1' ")
    arg('-p1', '--peakdet', action=st, help='first pass: peak detection')
    arg('-p2', '--peakfit', action=st, help='second pass: individual peak fit')
    arg('--run_all', action=st, help='run all passes, write to db')

    # options
    arg('-w', '--write_db', action=st, help='write results to ecalDB file')
    arg('-s', '--show_db', action=st, help='show ecalDB results file')
    arg('-p', '--show_plot', action=st, help='show debug plot')
    arg('-o',
        '--order',
        nargs=1,
        type=int,
        help='set cal poly order, default: 2')
    arg('-b',
        '--batch',
        action=st,
        help="batch mode: save & don't display plots")
    arg('--show_config', action=st, help='show current configuration')
    arg('--indiv', action=st, help='calibrate individual cycle files')
    arg('--match',
        nargs=1,
        type=str,
        help='set peak match mode (default: ratio)')
    arg('--epar',
        nargs=1,
        type=str,
        help="specify raw energy parameters: --epar 'asd sdf dfg' ")
    arg('--group',
        nargs=1,
        type=str,
        help="select alternate groupby: --group 'YYYY run' ")

    args = par.parse_args()

    # -- setup --

    # load main DataGroup, select files to calibrate
    dg = DataGroup('cage.json', load=True)
    if args.query:
        que = args.query[0]
        dg.fileDB.query(que, inplace=True)
    else:
        dg.fileDB = dg.fileDB[-1:]

    view_cols = [
        'run', 'cycle', 'daq_file', 'runtype', 'startTime', 'threshold',
        'stopTime', 'runtime'
    ]
    print(dg.fileDB[view_cols].to_string())
    print(len(dg.fileDB))
    # exit()

    # merge main and ecal config JSON as dicts
    config = dg.config
    with open(config['ecal_config']) as f:
        config = {**dg.config, **json.load(f)}

    # initialize JSON output file.  only run this once
    if args.init_db:
        init_ecaldb(config)
    try:
        # load ecal db in memory s/t the pretty on-disk formatting isn't changed
        db_ecal = db.TinyDB(storage=MemoryStorage)
        with open(config['ecaldb']) as f:
            raw_db = json.load(f)
            db_ecal.storage.write(raw_db)
    except:
        print('JSON database file not found or corrupted.  Rerun --init_db')
        exit()

    # set additional options, augmenting the config dict
    config['gb_cols'] = args.group.split(' ') if args.group else ['run']
    config['rawe'] = args.epar[0].split(
        ' ') if args.epar else config['rawe_default']
    config['match_mode'] = args.match if args.match else 'first'
    config['batch_mode'] = True if args.batch else False
    config['indiv'] = True if args.indiv else False
    config['show_plot'] = True if args.show_plot else False
    config['write_db'] = True if args.write_db else False
    config['pol_order'] = args.order if args.order else 2
    config['mp_tol'] = 10  # raw peaks must be within keV
    config = {**config, **db_ecal.table('_file_info').all()[0]}

    if args.show_config:
        print('Current configuration:')
        pprint(config)
        print('\n')

    # -- raw spectrum check --
    if args.raw:
        check_raw_spectrum(dg, config, db_ecal)
        exit()

    # show status
    print(
        f'Ready to calibrate.\n'
        f"Output file: {config['ecaldb']} \n"
        'Calibrating raw energy parameters:', config['rawe'], '\n'
        'Current DataGroup:')
    print(dg.fileDB[['run', 'cycle', 'startTime', 'runtime']])
    print('Columns:', dg.fileDB.columns.values)

    # -- main calibration routines --
    if args.show_db: show_ecaldb(config)
    if args.peakdet: run_peakdet(dg, config, db_ecal)
    if args.peakfit: run_peakfit(dg, config, db_ecal)

    if args.run_all:
        config['write_db'] = True
        run_peakdet(dg, config, db_ecal)
        run_peakfit(dg, config, db_ecal)
Beispiel #25
0
def analyze_cage():

    dg = DataGroup('CAGE.json')
    dg.lh5_dir_setup()

    dg.scan_daq_dir()

    # -- experiment-specific choices --
    dg.file_keys.sort_values(['cycle'], inplace=True)
    dg.file_keys.reset_index(drop=True, inplace=True)

    def get_cyc_info(row):
        """
        map cycle numbers to physics runs, and identify detector
        """
        cyc = row['cycle']
        for run, cycles in dg.runDB.items():
            tmp = cycles[0].split(',')
            for rng in tmp:
                if '-' in rng:
                    clo, chi = [int(x) for x in rng.split('-')]
                    if clo <= cyc <= chi:
                        row['run'] = run
                        break
                else:
                    clo = int(rng)
                    if cyc == clo:
                        row['run'] = run
                        break
        # label the detector ('runtype' matches 'run_types' in config file)
        if cyc < 126:
            row['runtype'] = 'oppi'
        else:
            row['runtype'] = 'icpc'
        return row

    dg.file_keys = dg.file_keys.apply(get_cyc_info, axis=1)

    dg.get_lh5_cols()

    for col in ['run']:
        dg.file_keys[col] = pd.to_numeric(dg.file_keys[col])

    print(dg.file_keys)

    dg.save_df('CAGE_fileDB.h5')
Beispiel #26
0
def main():
    """
    """
    dg = DataGroup('oppi.json')
Beispiel #27
0
def analyze_hades():
    """
    """
    dg = DataGroup('HADES.json')

    dg.lh5_dir_setup()
    # dg.lh5_dir_setup(create=True)

    dg.scan_daq_dir()

    # -- experiment-specific stuff --
    dg.file_keys['runtype'] = dg.file_keys['detSN']

    # add a sortable timestamp column
    def get_ts(row):
        ts = f"{row['YYmmdd']} {row['hhmmss']}"
        row['date'] = pd.to_datetime(ts, format='%y%m%d %H%M%S')
        return row

    dg.file_keys = dg.file_keys.apply(get_ts, axis=1)
    dg.file_keys.sort_values('date', inplace=True)

    dg.get_lh5_cols()
    print(dg.file_keys['raw_file'].values)

    dg.save_df('HADES_fileDB.h5')
Beispiel #28
0
def dcr_AvE(runs, user=False, hit=True, cal=True, etype='trapEmax', cut=True):

    for run in runs:
        # get run files
        dg = DataGroup('$CAGE_SW/processing/cage.json', load=True)
        str_query = f'run=={run} and skip==False'
        dg.fileDB.query(str_query, inplace=True)

        #get runtime, startime, runtype
        runtype_list = np.array(dg.fileDB['runtype'])
        runtype = runtype_list[0]
        rt_min = dg.fileDB['runtime'].sum()
        u_start = dg.fileDB.iloc[0]['startTime']
        t_start = pd.to_datetime(u_start, unit='s')

        # get scan position

        if runtype == 'alp':
            alphaDB = pd.read_hdf(os.path.expandvars('$CAGE_SW/processing/alphaDB.h5'))
            scan_pos = alphaDB.loc[alphaDB['run']==run]
            radius = np.array(scan_pos['radius'])[0]
            angle = np.array(scan_pos['source'])[0]
            rotary = np.array(scan_pos['rotary'])[0]
            radius = int(radius)
            angle_det = int((-1*angle) - 90)
            if rotary <0:
                angle_det = int(angle + 270)
            print(f'Radius: {radius}; Angle: {angle_det}')

        else:
            radius = 'n/a'
            angle = 'n/a'
            angle_det = 'n/a'

        if cal==True:
            etype_cal = etype+'_cal'



        # get data and load into df
        lh5_dir = dg.lh5_user_dir if user else dg.lh5_dir

        if hit==True:
            print('Using hit files')
            file_list = lh5_dir + dg.fileDB['hit_path'] + '/' + dg.fileDB['hit_file']
            if run<=117 and cal==True:
                df = lh5.load_dfs(file_list, [f'{etype}', f'{etype_cal}', 'bl','bl_sig','A_10','AoE', 'ts_sec', 'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/hit')
            elif run>117 and cal==True:
                df = lh5.load_dfs(file_list, [f'{etype}', f'{etype_cal}', 'bl','bl_sig', 'bl_slope', 'lf_max', 'A_10','AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/hit')

            elif run<=117 and cal==False:
                df = lh5.load_dfs(file_list, [f'{etype}', 'bl','bl_sig','A_10','AoE', 'ts_sec', 'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/hit')
            elif run>117 and cal==False:
                df = lh5.load_dfs(file_list, [f'{etype}', 'bl','bl_sig', 'bl_slope', 'lf_max', 'A_10','AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/hit')

        elif hit==False:
            print('Using dsp files')
            file_list = lh5_dir + dg.fileDB['dsp_path'] + '/' + dg.fileDB['dsp_file']
            if run<=117 and cal==True:
                df = lh5.load_dfs(file_list, [f'{etype}', f'{etype_cal}', 'bl','bl_sig','A_10','AoE', 'ts_sec', 'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/dsp')
            elif run>117 and cal==True:
                df = lh5.load_dfs(file_list, [f'{etype}', f'{etype_cal}', 'bl','bl_sig', 'bl_slope', 'lf_max', 'A_10','AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/dsp')

            elif run<=117 and cal==False:
                df = lh5.load_dfs(file_list, [f'{etype}', 'bl','bl_sig','A_10','AoE', 'ts_sec', 'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/dsp')
            elif run>117 and cal==False:
                df = lh5.load_dfs(file_list, [f'{etype}', 'bl','bl_sig', 'bl_slope', 'lf_max', 'A_10','AoE', 'dcr', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/dsp')

        else:
            print('dont know what to do here! need to specify if working with calibrated/uncalibrated data, or dsp/hit files')



        # use baseline cut
        if run <=117:
            bl_cut_lo, bl_cut_hi = 8500, 10000
        if run>117:
            bl_cut_lo, bl_cut_hi = 9700, 9760

        df_cut = df.query(f'bl > {bl_cut_lo} and bl < {bl_cut_hi}').copy()

        #creat new DCR
        if run <= 86:
            const = 0.0555
            df_cut['dcr_linoff'] = df_cut['dcr_raw'] + const*df_cut['trapEmax']

        if run>86 and run <=117:
            const = -0.0225
            df_cut['dcr_linoff'] = df_cut['dcr_raw'] + const*df_cut['trapEmax']

        if run>117:
            const = -0.0003
            const2 = -0.0000003
            df_cut['dcr_linoff'] = df_cut['dcr'] + const*(df_cut['trapEftp']) + const2*(df_cut['trapEftp'])**2



        #create 0-50
        df_cut['tp0_50'] = df_cut['tp_50']- df_cut['tp_0']

        # create cut for alphas
        # alpha_cut = 'dcr_linoff > 25 and dcr_linoff < 200 and tp0_50 > 100 and tp0_50 < 400 and trapEmax_cal < 6000'
        # new_dcr_cut = df_cut.query(alpha_cut).copy()
        # len(new_dcr_cut)

        #-------------------------------------
        # Plots before alpha cuts
        #--------------------

        # select energy type and energy range
        if cal==False:
            elo, ehi, epb = 0, 10000, 10 #entire enerty range trapEftp
            e_unit = ' (uncal)'
        elif cal==True:
            elo, ehi, epb = 0, 6000, 10
            etype=etype_cal
            e_unit = ' (keV)'

        # Make (calibrated) energy spectrum_________

        fig, ax = plt.subplots()
        fig.suptitle(f'Energy', horizontalalignment='center', fontsize=16)

        nbx = int((ehi-elo)/epb)

        energy_hist, bins = np.histogram(df_cut[etype], bins=nbx,
                range=[elo, ehi])
        energy_rt = np.divide(energy_hist, rt_min * 60)

        plt.semilogy(bins[1:], energy_rt, ds='steps', c='b', lw=1) #, label=f'{etype}'

        ax.set_xlabel(f'{etype+e_unit}', fontsize=16)
        ax.set_ylabel('counts/sec', fontsize=16)
        plt.setp(ax.get_xticklabels(), fontsize=14)
        plt.setp(ax.get_yticklabels(), fontsize=14)

        ax.text(0.95, 0.83, f'r = {radius} mm \ntheta = {angle_det} deg', verticalalignment='bottom',
                    horizontalalignment='right', transform=ax.transAxes, color='green', fontsize=14, bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 10})

        # plt.legend()
        plt.title(f'\n{runtype} run {run}, {rt_min:.2f} mins', fontsize=12)
        plt.tight_layout()
        # plt.savefig(f'./plots/normScan/cal_normScan/{runtype}_energy_run{run}.png', dpi=200)
        if runtype=='alp':
            plt.savefig(f'./plots/angleScan/{runtype}_energy_{radius}mm_{angle_det}deg_run{run}.png', dpi=200)
        elif runtype=='bkg':
            plt.savefig(f'./plots/angleScan/{runtype}_energy_run{run}.png', dpi=200)
        plt.clf()
        plt.close()


        # AoE vs E---------------------------------
        fig, ax = plt.subplots()
        alo, ahi, apb = 0.0, 0.09, 0.0001
        if run>=60:
            alo, ahi, apb = 0.005, 0.0905, 0.0001
        if run>117:
            alo, ahi, apb = 0.0, 0.125, 0.001

        nbx = int((ehi-elo)/epb)
        nby = int((ahi-alo)/apb)

        fig.suptitle(f'A/E vs Energy', horizontalalignment='center', fontsize=16)

        h = plt.hist2d(df_cut[etype], df_cut['AoE'], bins=[nbx,nby],
                    range=[[elo, ehi], [alo, ahi]], cmap='viridis', norm=LogNorm())

        cb = plt.colorbar()
        cb.set_label("counts", ha = 'right', va='center', rotation=270, fontsize=14)
        cb.ax.tick_params(labelsize=12)
        ax.set_xlabel(f'{etype+e_unit}', fontsize=16)
        ax.set_ylabel('A/E (arb)', fontsize=16)
        plt.setp(ax.get_xticklabels(), fontsize=14)
        plt.setp(ax.get_yticklabels(), fontsize=14)


        ax.text(0.95, 0.83, f'r = {radius} mm \ntheta = {angle_det} deg', verticalalignment='bottom',
                    horizontalalignment='right', transform=ax.transAxes, color='green', fontsize=14, bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 10})

        # plt.legend()
        plt.title(f'\n{runtype} run {run}, {rt_min:.2f} mins', fontsize=12)
        plt.tight_layout()
        # plt.savefig(f'./plots/normScan/cal_normScan/{runtype}_AoE_run{run}.png', dpi=200)
        if runtype=='alp':
            plt.savefig(f'./plots/angleScan/{runtype}_AoE_{radius}mm_{angle_det}deg_run{run}.png', dpi=200)
        elif runtype=='bkg':
            plt.savefig(f'./plots/angleScan/{runtype}_AoE_run{run}.png', dpi=200)
        # plt.show()

        plt.clf()
        plt.close()

        # DCR vs E___________

        fig, ax = plt.subplots()

        if run>=60 and run<117:
            dlo, dhi, dpb = -100, 300, 0.6
        elif run>117:
            dlo, dhi, dpb = -20., 60, 0.1

        nbx = int((ehi-elo)/epb)
        nby = int((dhi-dlo)/dpb)

        fig.suptitle(f'DCR vs Energy', horizontalalignment='center', fontsize=16)

        h = plt.hist2d(df_cut[etype], df_cut['dcr_linoff'], bins=[nbx,nby],
                    range=[[elo, ehi], [dlo, dhi]], cmap='viridis', norm=LogNorm())

        cb = plt.colorbar()
        cb.set_label("counts", ha = 'right', va='center', rotation=270, fontsize=14)
        cb.ax.tick_params(labelsize=12)
        ax.set_xlabel('Energy (keV)', fontsize=16)
        ax.set_ylabel('DCR (arb)', fontsize=16)
        plt.setp(ax.get_xticklabels(), fontsize=14)
        plt.setp(ax.get_yticklabels(), fontsize=14)

        # plt.legend()

        ax.text(0.95, 0.83, f'r = {radius} mm \ntheta = {angle_det} deg', verticalalignment='bottom',
                    horizontalalignment='right', transform=ax.transAxes, color='green', fontsize=14, bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 10})

        plt.title(f'\n{runtype} run {run}, {rt_min:.2f} mins', fontsize=12)
        plt.tight_layout()
        # plt.savefig(f'./plots/normScan/cal_normScan/{runtype}_dcr_run{run}.png', dpi=200)
        if runtype=='alp':
            plt.savefig(f'./plots/angleScan/{runtype}_DCR_{radius}mm_{angle_det}deg_run{run}.png', dpi=200)
        elif runtype=='bkg':
            plt.savefig(f'./plots/angleScan/{runtype}_DCR_run{run}.png', dpi=200)
        # plt.show()
        plt.clf()
        plt.close()

        # DCR vs A/E___________

        fig, ax = plt.subplots()
        nbx = int((ahi-alo)/apb)
        nby = int((dhi-dlo)/dpb)

        fig.suptitle(f'A/E vs DCR', horizontalalignment='center', fontsize=16)

        h = plt.hist2d(df_cut['AoE'], df_cut['dcr_linoff'], bins=[nbx,nby],
                    range=[[alo, ahi], [dlo, dhi]], cmap='viridis', norm=LogNorm())

        cb = plt.colorbar()
        cb.set_label("counts", ha = 'right', va='center', rotation=270, fontsize=14)
        cb.ax.tick_params(labelsize=12)
        ax.set_xlabel('A/E (arb)', fontsize=16)
        ax.set_ylabel('DCR (arb)', fontsize=16)
        plt.setp(ax.get_xticklabels(), fontsize=12)
        plt.setp(ax.get_yticklabels(), fontsize=14)

        # plt.legend()
        ax.text(0.95, 0.83, f'r = {radius} mm \ntheta = {angle_det} deg', verticalalignment='bottom',
                    horizontalalignment='right', transform=ax.transAxes, color='green', fontsize=14, bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 10})

        plt.title(f'\n{runtype} run {run}, {rt_min:.2f} mins', fontsize=12)
        plt.tight_layout()
        # plt.savefig(f'./plots/normScan/cal_normScan/{runtype}_AoE_vs_dcr_run{run}.png', dpi=200)
        if runtype=='alp':
            plt.savefig(f'./plots/angleScan/{runtype}_AoEvDCR_{radius}mm_{angle_det}deg_run{run}.png', dpi=200)
        elif runtype=='bkg':
            plt.savefig(f'./plots/angleScan/{runtype}_AoEvDCR_run{run}.png', dpi=200)
        # plt.show()
        plt.clf()
        plt.close()

        # DCR vs tp_50___________

        fig, ax = plt.subplots()
        fig.suptitle(f'DCR vs 50% rise time', horizontalalignment='center', fontsize=16)

        tlo, thi, tpb = 0, 800, 10

        nbx = int((dhi-dlo)/dpb)
        nby = int((thi-tlo)/tpb)

        alpha_dcr_hist = plt.hist2d(df_cut['dcr_linoff'], df_cut['tp0_50'], bins=[nbx,nby],
                range=[[dlo, dhi], [tlo, thi]], cmap='viridis', norm=LogNorm())

        cb = plt.colorbar()
        cb.set_label("counts", ha = 'right', va='center', rotation=270, fontsize=14)
        cb.ax.tick_params(labelsize=12)
        ax.set_xlabel('DCR (arb)', fontsize=16)
        ax.set_ylabel('tp 0-50 (ns)', fontsize=16)
        plt.setp(ax.get_xticklabels(), fontsize=14)
        plt.setp(ax.get_yticklabels(), fontsize=14)

        # plt.legend()
        ax.text(0.95, 0.83, f'r = {radius} mm \ntheta = {angle_det} deg', verticalalignment='bottom',
                    horizontalalignment='right', transform=ax.transAxes, color='green', fontsize=14, bbox={'facecolor': 'white', 'alpha': 0.95, 'pad': 10})

        plt.title(f'\n{runtype} run {run}, {rt_min:.2f} mins', fontsize=12)
        plt.tight_layout()
        # plt.savefig(f'./plots/normScan/cal_normScan/{runtype}_dcr_vs_tp0_50_run{run}.png', dpi=200)
        if runtype=='alp':
            plt.savefig(f'./plots/angleScan/{runtype}_DCRvTp050_{radius}mm_{angle_det}deg_run{run}.png', dpi=200)
        elif runtype=='bkg':
            plt.savefig(f'./plots/angleScan/{runtype}_DCRvTp050_run{run}.png', dpi=200)
        # plt.show()
        plt.clf()
        plt.close()

        # 1D AoE_________

        fig, ax = plt.subplots()
        fig.suptitle(f'A/E', horizontalalignment='center', fontsize=16)

        aoe_hist, bins = np.histogram(df_cut['AoE'], bins=nbx,
                range=[alo, ahi])

        plt.semilogy(bins[1:], aoe_hist, ds='steps', c='b', lw=1) #, label=f'{etype}'

        ax.set_xlabel('A/E (arb)', fontsize=16)
        ax.set_ylabel('counts', fontsize=16)
        plt.setp(ax.get_xticklabels(), fontsize=14)
        plt.setp(ax.get_yticklabels(), fontsize=14)

        ax.text(0.95, 0.83, f'r = {radius} mm \ntheta = {angle_det} deg', verticalalignment='bottom',
                    horizontalalignment='right', transform=ax.transAxes, color='green', fontsize=14, bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 10})

        # plt.legend()
        plt.title(f'\n{runtype} run {run}, {rt_min:.2f} mins', fontsize=12)
        plt.tight_layout()
        # plt.savefig(f'./plots/normScan/cal_normScan/{runtype}_1d_aoe_run{run}.png', dpi=200)
        if runtype=='alp':
            plt.savefig(f'./plots/angleScan/{runtype}_1dAoE_{radius}mm_{angle_det}deg_run{run}.png', dpi=200)
        elif runtype=='bkg':
            plt.savefig(f'./plots/angleScan/{runtype}_1dAoE_run{run}.png', dpi=200)
        plt.clf()
        plt.close()
Beispiel #29
0
def plot_energy(runs):
    radius_arr_1 = []
    mean_energy_arr_1 = []
    std_energy_arr_1 = []
    mean_dcr_arr_1 = []
    std_dcr_arr_1 = []
    count_arr_1 = []

    radius_arr_2 = []
    mean_energy_arr_2 = []
    std_energy_arr_2 = []
    mean_dcr_arr_2 = []
    std_dcr_arr_2 = []
    count_arr_2 = []


    for run in runs:
        # get run files
        dg = DataGroup('cage.json', load=True)
        str_query = f'run=={run} and skip==False'
        dg.fileDB.query(str_query, inplace=True)

        #get runtime, startime, runtype
        runtype_list = np.array(dg.fileDB['runtype'])
        runtype = runtype_list[0]
        rt_min = dg.fileDB['runtime'].sum()
        u_start = dg.fileDB.iloc[0]['startTime']
        t_start = pd.to_datetime(u_start, unit='s')

        # get scan position

        if runtype == 'alp':
            alphaDB = pd.read_hdf('alphaDB.h5')
            scan_pos = alphaDB.loc[alphaDB['run']==run]
            radius = np.array(scan_pos['radius'])[0]
            angle = np.array(scan_pos['angle'])[0]
            angle_det = 270 + angle
            print(f'Radius: {radius}; Angle: {angle}')

        else:
            radius = 'n/a'
            angle = 'n/a'
            angle_det = 'n/a'

        # get hit df
        lh5_dir = dg.lh5_user_dir #if user else dg.lh5_dir
        hit_list = lh5_dir + dg.fileDB['hit_path'] + '/' + dg.fileDB['hit_file']
        df_hit = lh5.load_dfs(hit_list, ['trapEmax', 'trapEmax_cal', 'bl','bl_sig','A_10','AoE', 'ts_sec', 'dcr_raw', 'dcr_ftp', 'dcr_max', 'tp_0', 'tp_10', 'tp_90', 'tp_50', 'tp_80', 'tp_max'], 'ORSIS3302DecoderForEnergy/hit')

        # use baseline cut
        df_cut = df_hit.query('bl > 8500 and bl < 10000').copy()

        #creat new DCR
        const = 0.0555
        df_cut['dcr_linoff'] = df_cut['dcr_raw'] + const*df_cut['trapEmax']

        #create 0-50
        df_cut['tp0_50'] = df_cut['tp_50']- df_cut['tp_0']

        # create cut for alphas
        alpha_cut = 'dcr_linoff > 25 and dcr_linoff < 200 and tp0_50 > 100 and tp0_50 < 400 and trapEmax_cal < 6000'
        new_dcr_cut = df_cut.query(alpha_cut).copy()
        # len(new_dcr_cut)

        alpha_energy = np.array(new_dcr_cut['trapEmax_cal'])
        mean_energy = np.mean(alpha_energy)
        std_energy = np.std(alpha_energy)
#         std_energy = np.sqrt(len(new_dcr_cut['trapEmax']))

        alpha_dcr = np.array(new_dcr_cut['dcr_linoff'])
        mean_dcr = np.mean(alpha_dcr)
        std_dcr = np.std(alpha_dcr)
#         std_dcr = np.sqrt((len(new_dcr_cut['dcr_linoff'])))

        print(f'Energy std: {std_energy} \n DCR std: {std_dcr}')

        if radius%5 == 0:
            radius_arr_1.append(radius)
            mean_energy_arr_1.append(mean_energy)
            std_energy_arr_1.append(std_energy)
            mean_dcr_arr_1.append(mean_dcr)
            std_dcr_arr_1.append(std_dcr)
            count_arr_1.append(len(alpha_energy))

        else:
            radius_arr_2.append(radius)
            mean_energy_arr_2.append(mean_energy)
            std_energy_arr_2.append(std_energy)
            mean_dcr_arr_2.append(mean_dcr)
            std_dcr_arr_2.append(std_dcr)
            count_arr_2.append(len(alpha_energy))

    # make plots with errorbars
    fig, ax = plt.subplots()

    energy_plot = plt.errorbar(radius_arr_1, mean_energy_arr_1, yerr=std_energy_arr_1, marker = '.', ls='none', color = 'red', label='Scan 1')
    ax.set_xlabel('Radial position (mm)', fontsize=16)
    ax.set_ylabel('Mean energy (keV)', fontsize=16)
    plt.setp(ax.get_xticklabels(), fontsize=14)
    plt.setp(ax.get_yticklabels(), fontsize=14)


#     plt.yscale('log')
    plt.title('Mean energy of alphas by radial position \nnormal incidence', fontsize=16)


    plt.errorbar(radius_arr_2, mean_energy_arr_2, yerr=std_energy_arr_2, marker = '.', ls='none', color = 'blue', label='Scan 2')
    plt.legend()
    plt.tight_layout()

    plt.savefig('./plots/normScan/cal_normScan/errorbars_energy_deg.png', dpi=200)

    plt.clf()
    plt.close()

    fig, ax = plt.subplots()
    dcr_plot = plt.errorbar(radius_arr_1, mean_dcr_arr_1, yerr=std_dcr_arr_1, marker = '.', ls='none', color = 'red', label='Scan 1')
    ax.set_xlabel('Radial position (mm)', fontsize=16)
    ax.set_ylabel('Mean DCR value (arb)', fontsize=16)
    plt.setp(ax.get_xticklabels(), fontsize=14)
    plt.setp(ax.get_yticklabels(), fontsize=14)

    #    plt.yscale('log')
    plt.title('Mean DCR value by radial position \nnormal incidence', fontsize=16)


    plt.errorbar(radius_arr_2, mean_dcr_arr_2, yerr=std_dcr_arr_2, marker = '.', ls='none', color = 'blue', label='Scan 2')
    plt.legend()
    plt.tight_layout()

    plt.savefig('./plots/normScan/cal_normScan/errorbars_dcr_avg.png', dpi=200)

    plt.clf()
    plt.close()

    # make plots without errorbars
    fig, ax = plt.subplots()
    energy_plot = plt.plot(radius_arr_1, mean_energy_arr_1, '.r', label='Scan 1')
    ax.set_xlabel('Radial position (mm)', fontsize=16)
    ax.set_ylabel('Mean energy (keV)', fontsize=16)
    plt.setp(ax.get_xticklabels(), fontsize=14)
    plt.setp(ax.get_yticklabels(), fontsize=14)


#     plt.yscale('log')
    plt.title('Mean energy of alphas by radial position \nnormal incidence', fontsize=16)


    plt.plot(radius_arr_2, mean_energy_arr_2, '.b', label='Scan 2')
    plt.legend()
    plt.tight_layout()

    plt.savefig('./plots/normScan/cal_normScan/energy_deg.png', dpi=200)

    plt.clf()
    plt.close()

    fig, ax = plt.subplots()
    dcr_plot = plt.plot(radius_arr_1, mean_dcr_arr_1, '.r', label='Scan 1')
    ax.set_xlabel('Radial position (mm)', fontsize=16)
    ax.set_ylabel('Mean DCR value (arb)', fontsize=16)
    plt.setp(ax.get_xticklabels(), fontsize=14)
    plt.setp(ax.get_yticklabels(), fontsize=14)

    #    plt.yscale('log')
    plt.title('Mean DCR value by radial position \nnormal incidence', fontsize=16)

    plt.plot(radius_arr_2, mean_dcr_arr_2, '.b', label='Scan 2')
    plt.legend()
    plt.tight_layout()

    plt.savefig('./plots/normScan/cal_normScan/dcr_avg.png', dpi=200)

    # plt.clf()
    plt.close()
Beispiel #30
0
def main():
    doc = """
    Energy calibration app for CAGE.

    Usage:
    First, generate an up-to-date fileDB (setup.py) and DSP files (processing.py).
      You will need to run setup.py with --orca and --rt options.
    
    Select a group of files to calibrate with a query:
        $ ./energy_cal.py -q 'run==234 [and cycle <= 345, etc.]'
    
    Check the raw spectrum with '--raw' (default estimator: trapEmax)
    
    Adjust the JSON configuration file as needed (default: config_ecal.json)
    
    Run "peakdet", which calculates up to 2nd-order calibration constants for
      each channel, y = p0 +  p1 * x  +  p2 * x**2, and saves them as tables
      in our ecalDB file.
    
    Run "peakfit", which fits each peak of interest to a peakshape function 
      (default: gaussian + linear step function), computes calibration 
      constants, and resolution curves, and saves results to ecalDB.

    Results are saved ('-w' option) to JSON format with 'legend-metadata' 
      style conventions.
    
    C. Wiseman, T. Mathew, G. Othman, J. Detwiler
    """
    rthf = argparse.RawTextHelpFormatter
    par = argparse.ArgumentParser(description=doc, formatter_class=rthf)
    arg, st, sf = par.add_argument, 'store_true', 'store_false'

    # declare group of files of interest.  supports sql-style(ish) queries
    arg('-q',
        '--query',
        nargs=1,
        type=str,
        help="select file group to calibrate: -q 'run==1 and [condition]' ")

    # primary ops
    arg('--raw', action=st, help='display/save uncalibrated energy histogram')
    arg('-pd', '--peakdet', action=st, help='first pass: peak detection')
    arg('-pi',
        '--peakinp',
        nargs=1,
        type=str,
        help='first pass: manually input peaks')
    arg('-pf', '--peakfit', action=st, help='second pass: individual peak fit')
    arg('--all', action=st, help='run all passes, write to DB')

    # options
    arg('-v', '--verbose', nargs=1, help='set verbosity (default: 1)')
    arg('--init_db', action=st, help='initialize ecal database JSON file')
    arg('-u', '--lh5_user', action=st, help='user lh5 mode')
    arg('-w', '--write_db', action=st, help='write results to ecalDB file')
    arg('-s',
        '--show_db',
        nargs='*',
        help='show ecalDB, optionally specify table name')
    arg('-p', '--show_plot', action=st, help='show debug plot')
    arg('-b',
        '--batch',
        action=st,
        help="batch mode: save & don't display plots")
    arg('--show_config', action=st, help='show current configuration')
    arg('--match',
        nargs=1,
        type=str,
        help='set peak match mode (default: first)')
    arg('--pol', nargs=1, type=int, help='set peakdet/peakinput pol order')
    arg('--epar',
        nargs=1,
        type=str,
        help="specify raw energy parameters: --epar 'asd sdf dfg' ")
    arg('-gb',
        '--group',
        nargs=1,
        type=str,
        help="select alternate groupby: -gb 'run cycle' ")
    arg('-ff',
        '--fit_func',
        nargs=1,
        type=str,
        help='set peakfit fit function (default is gaus+step)')
    arg('--spec',
        nargs=1,
        type=int,
        help='select alternate set of peaks to calibrate')
    args = par.parse_args()

    # -- set up fileDB and config dictionary --

    # query the fileDB & modify in-memory to only contain files matching our query
    dg = DataGroup('cage.json', load=True)
    if args.query:
        que = args.query[0]
        dg.fileDB.query(que, inplace=True)
        show_all = False
    else:
        dg.fileDB = dg.fileDB[-1:]
        show_all = True

    # load ecal config file
    f_ecal = dg.config['ecal_default']
    if args.spec:
        spec_id = args.spec[0]
        if spec_id == 1:
            f_ecal = './metadata/config_ecal_ba.json'
            print(f'Loading Ba133 calibration parameters from: {f_ecal}')
        else:
            print('Error, unknown calib mode:', args.spec[0])
    else:
        print(f'Loading default calibration parameters from:\n  {f_ecal}')

    # merge main and ecal config dicts
    with open(os.path.expandvars(f_ecal)) as f:
        config = {**dg.config, **json.load(f)}

    # initialize ecalDB JSON output file.  only run this once
    if args.init_db:
        init_ecaldb(config)
        exit()
    try:
        # load ecalDB this way (into memory) s/t the pretty on-disk formatting isn't changed
        db_ecal = db.TinyDB(storage=MemoryStorage)
        with open(config['ecaldb']) as f:
            raw_db = json.load(f)
            db_ecal.storage.write(raw_db)
    except:
        print('JSON database file not found or corrupted.  Rerun --init_db')
        exit()

    # set more options -- everything should be loaded into the 'config' dict
    config['gb_cols'] = args.group[0].split(' ') if args.group else ['run']
    if config['gb_cols'][0] != 'run':
        print(
            "Error, first groupby column must be 'run'!  Try -gb 'run cycle'")
        exit()

    # set input data directory (CAGE_LH5, CAGE_LH5_USER, or cwd)
    lh5_dir = dg.lh5_user_dir if args.lh5_user else dg.lh5_dir
    config['lh5_dir'] = os.path.expandvars(lh5_dir)
    config['pol'] = args.pol if args.pol else [2]
    config['rawe'] = args.epar[0].split(
        ' ') if args.epar else config['rawe_default']
    config['match_mode'] = args.match if args.match else 'first'
    config['mp_tol'] = 100  # raw peaks must be within keV
    config['batch_mode'] = True if args.batch else False
    config['show_plot'] = True if args.show_plot else False
    config['write_db'] = True if args.write_db else False
    if args.peakinp: config['input_id'] = args.peakinp[0]
    config['input_peaks'] = './metadata/input_peaks.json'
    config['fit_func'] = args.fit_func[0] if args.fit_func else 'gauss_step'
    config['verbose'] = args.verbose[0] if args.verbose else 0

    # include fields from ecalDB in the config dict
    dg.config = {**config, **db_ecal.table('_file_info').all()[0]}

    # -- show status --

    ecal_cols = [
        'run', 'cycle', 'daq_file', 'runtype', 'startTime', 'threshold',
        'stopTime', 'runtime'
    ]

    if dg.fileDB is None:
        print('Warning, no fileDB is loaded.')

    elif not all(x in dg.fileDB.columns for x in ecal_cols):
        print('Error, fileDB is missing some columns.  Did you run setup.py?')
        print('Current available columns:\n', dg.fileDB.columns)
        exit()

    print(
        f'Ready to calibrate.\n'
        f"Output file: {config['ecaldb']} \n"
        'Calibrating raw energy parameters:', config['rawe'], '\n'
        f'Current data group ({len(dg.fileDB)} files) --->> ')
    print(dg.fileDB[ecal_cols], '\n')

    if args.show_config:
        print('Current energy_cal config:')
        pprint(config)
        print('\n')

    if args.show_db is not None:
        tables = args.show_db  # list
        show_ecaldb(dg, tables, args.query, show_all)

    # -- main routines --

    if args.raw:
        check_raw_spectrum(dg, config, db_ecal)

    if args.peakdet:
        run_peakdet(dg, config, db_ecal)

    if args.peakfit:
        run_peakfit(dg, config, db_ecal)

    if args.all:
        config['write_db'] = True
        run_peakdet(dg, config, db_ecal)
        run_peakfit(dg, config, db_ecal)