コード例 #1
0
def main():
    description = (
        "Generate the pedestals from an R0 file, subtract it from another "
        "R0 file, and plot the comparison of residuals from different "
        "pedestal methods")
    parser = argparse.ArgumentParser(description=description,
                                     formatter_class=Formatter)
    parser.add_argument('-f',
                        '--file',
                        dest='r0_path',
                        required=True,
                        help='R0 file to obtain residuals from')
    parser.add_argument('-p',
                        '--pedestal',
                        dest='pedestal_r0_path',
                        required=True,
                        help='R0 file to generate pedestal from')
    parser.add_argument('-o',
                        '--output',
                        dest='output_dir',
                        required=True,
                        help='directort to store output plots')
    args = parser.parse_args()

    r0_path = args.r0_path
    pedestal_r0_path = args.pedestal_r0_path
    output_dir = args.output_dir

    create_directory(output_dir)
    reader_ped = TIOReader(pedestal_r0_path, max_events=100000)
    reader_res = TIOReader(r0_path, max_events=1000)

    # Generate Pedestals
    pedestal = PedestalTargetCalib(reader_ped.n_pixels, reader_ped.n_samples,
                                   reader_ped.n_cells)
    desc = "Generating pedestal"
    for wfs in tqdm(reader_ped, total=reader_ped.n_events, desc=desc):
        if wfs.missing_packets:
            continue
        pedestal.add_to_pedestal(wfs, wfs.first_cell_id)

    channel_stats = PixelStats(reader_res.n_pixels)

    # Subtract Pedestals
    desc = "Subtracting pedestal"
    for wfs in tqdm(reader_res, total=reader_res.n_events, desc=desc):
        if wfs.missing_packets:
            continue

        subtracted_tc = pedestal.subtract_pedestal(wfs, wfs.first_cell_id)
        channel_stats.add_to_stats(subtracted_tc)

    # Plot results
    p_channel_std = ChannelStd()
    p_channel_std.plot(channel_stats.std)
    p_channel_std.save(join(output_dir, "channel_std.pdf"))

    p_ci_stats = CameraStats(reader_res.mapping)
    p_ci_stats.set_image(channel_stats.mean, channel_stats.std)
    p_ci_stats.save(join(output_dir, f"ci_stats.pdf"))
コード例 #2
0
def main():
    description = (
        "Generate the pedestals from an R0 file, subtract it from another "
        "R0 file, and plot the comparison of residuals from different "
        "pedestal methods"
    )
    parser = argparse.ArgumentParser(description=description,
                                     formatter_class=Formatter)
    parser.add_argument('-f', '--file', dest='r0_path', required=True,
                        help='R0 file to obtain residuals from')
    parser.add_argument('-p', '--pedestal', dest='pedestal_r0_path',
                        required=True,
                        help='R0 file to generate pedestal from')
    parser.add_argument('-o', '--output', dest='output_dir', required=True,
                        help='directort to store output plots')
    args = parser.parse_args()

    r0_path = args.r0_path
    pedestal_r0_path = args.pedestal_r0_path
    output_dir = args.output_dir

    create_directory(output_dir)
    reader_ped = TIOReader(pedestal_r0_path, max_events=100000)
    reader_res = TIOReader(r0_path, max_events=100000)

    # Generate Pedestals
    pedestal = PedestalTargetCalib(
        reader_ped.n_pixels, reader_ped.n_samples, reader_ped.n_cells
    )
    desc = "Generating pedestal"
    for wfs in tqdm(reader_ped, total=reader_ped.n_events, desc=desc):
        if wfs.missing_packets:
            continue
        pedestal.add_to_pedestal(wfs, wfs.first_cell_id)

    online_stats = OnlineStats()
    online_hist = OnlineHist(bins=100, range_=(-10, 10))

    # Subtract Pedestals
    desc = "Subtracting pedestal"
    for wfs in tqdm(reader_res, total=reader_res.n_events, desc=desc):
        if wfs.missing_packets:
            continue

        subtracted_tc = pedestal.subtract_pedestal(wfs, wfs.first_cell_id)
        online_stats.add_to_stats(subtracted_tc)
        online_hist.add(subtracted_tc)

    p_hist = HistPlot()
    p_hist.plot(
        online_hist.hist, online_hist.edges, online_stats.mean, online_stats.std,
    )
    p_hist.save(join(output_dir, "hist.pdf"))
コード例 #3
0
def process(tf_r0_paths, pedestal_path, tf_path):
    pedestal = PedestalTargetCalib.from_tcal(pedestal_path)

    # Parse amplitudes from filepath
    amplitudes = []
    readers = []
    for path in tf_r0_paths:
        regex_ped = re.search(r".+VPED_(\d+).tio", path)
        amplitudes.append(int(regex_ped.group(1)))
        readers.append(TIOReader(path))

    # Instance TF class from first file
    tf = TFDC(readers[0].n_pixels, readers[0].n_samples - 32,
              readers[0].n_cells, amplitudes)

    desc0 = "Generating TF"
    it = zip(amplitudes, readers)
    n_amp = len(amplitudes)
    for amplitude, reader in tqdm(it, total=n_amp, desc=desc0):
        amplitude_index = tf.get_input_amplitude_index(amplitude)
        for iwf, wfs in enumerate(reader):
            if wfs.missing_packets:
                continue

            # Skip to next file when enough hits are reached
            if iwf % 1000 == 0:
                if (tf.hits[..., amplitude_index] > 100).all():
                    break

            tf.add_to_tf(pedestal.subtract_pedestal(wfs, wfs.first_cell_id),
                         wfs.first_cell_id, amplitude_index)

    tf.save(tf_path)
コード例 #4
0
def process(input_paths, data_path, poi):
    df_list = []

    n_files = len(input_paths)
    for ifile, f in enumerate(input_paths):
        print("Processing File {}/{}".format(ifile, n_files))
        reader = TIOReader(f, max_events=1000)

        n_events = reader.n_events
        n_samples = reader.n_samples

        wfs = np.zeros((n_events, n_samples))

        desc = "Processing events"
        for wf in tqdm(reader, total=n_events, desc=desc):
            iev = wf.iev
            wfs[iev] = wf[poi]

        average_wf = wfs.mean(0)

        df_list.append(
            pd.DataFrame(
                dict(ifile=ifile,
                     file=f,
                     wf=average_wf,
                     isam=np.arange(n_samples))))

    df = pd.concat(df_list, ignore_index=True)

    with HDF5Writer(data_path) as writer:
        writer.write(data=df)
        writer.write_metadata(n_files=n_files)
コード例 #5
0
def process(input_path, output_path):
    reader = TIOReader(input_path)
    dt = np.zeros(reader.n_events - 1)
    for iev in trange(1, reader.n_events):
        dt[iev - 1] = (reader[iev].t_cpu - reader[iev - 1].t_cpu).value

    np.save(output_path, dt)
コード例 #6
0
def main():
    # files = glob("/Volumes/gct-jason/data_checs/d181019_dctf/15C/Run*_r0.tio")
    files = glob(
        "/Volumes/gct-jason/data_checs/d181019_dctf/15C/r1_tf/Run*_r1.tio")
    files = sort_file_list(files)
    files = files[1:]
    n_files = len(files)

    mean_p = np.zeros(n_files)
    std_p = np.zeros(n_files)
    mean_s = np.zeros(n_files)
    std_s = np.zeros(n_files)
    vped = np.zeros(n_files)

    for ifile, f in enumerate(files):
        vped_path = f.replace("_r1.tio", "_r0.txt")
        vped[ifile] = read_vped(vped_path)

        r = TIOReader(f)
        n_events = r.n_events
        n_pixels = r.n_pixels
        n_samples = r.n_samples
        samples = np.zeros((n_events, n_pixels, n_samples))
        for iev, wf in enumerate(r):
            samples[iev] = wf

        lookup = samples.mean(0)

        mean_p[ifile] = lookup[:, 0].mean()
        std_p[ifile] = lookup[:, 0].std()
        mean_s[ifile] = lookup[0, :].mean()
        std_s[ifile] = lookup[0, :].std()

    (_, caps, _) = plt.errorbar(vped,
                                mean_p,
                                yerr=std_p,
                                fmt='o',
                                mew=1,
                                markersize=3,
                                capsize=3,
                                elinewidth=0.7,
                                label="")
    for cap in caps:
        cap.set_markeredgewidth(0.7)

    (_, caps, _) = plt.errorbar(vped,
                                mean_s,
                                yerr=std_s,
                                fmt='o',
                                mew=1,
                                markersize=3,
                                capsize=3,
                                elinewidth=0.7,
                                label="")
    for cap in caps:
        cap.set_markeredgewidth(0.7)

    # plt.errorbar(vped, mean_p, yerr=std_p)
    # plt.errorbar(vped, mean_s, yerr=std_s)
    plt.show()
コード例 #7
0
def process(input_path, output_path, poi):
    r0_reader = TIOReader(input_path)
    n_events = r0_reader.n_events
    n_samples = r0_reader.n_samples
    isam = np.arange(n_samples, dtype=np.uint16)

    df_list = []

    desc = "Looping over events"
    for r0 in tqdm(r0_reader, total=n_events, desc=desc):
        if r0.missing_packets:
            continue

        iev = r0.iev
        fci = r0.first_cell_id
        adc = r0[poi]

        df_list.append(
            pd.DataFrame(dict(
                iev=iev,
                fci=fci,
                isam=isam,
                adc=adc,
            )))

    df = pd.concat(df_list, ignore_index=True)

    with HDF5Writer(output_path) as writer:
        writer.write(data=df)
        writer.add_metadata(poi=poi)
コード例 #8
0
def process(input_path, output_path, hv_path, event_slice):
    reader = TIOReader(input_path)
    n_events = reader.n_events
    n_pixels = reader.n_pixels
    n_samples = reader.n_samples
    mapping = reader.mapping
    mappingtc = reader.tc_mapping
    mappingsp = MappingSP(mappingtc)

    waveforms = reader[event_slice]

    sp_arr = np.vstack(mapping.groupby("superpixel").pixel.apply(np.array))
    avg_wfs = waveforms[:, sp_arr].mean((0, 2))
    amplitudes = avg_wfs.max(1)

    hvs = hv_file_reader(hv_path)
    hv255 = np.where(hvs == 255)

    df = get_superpixel_mapping(mapping)
    camera = CameraImage.from_mapping(df)
    image = amplitudes
    camera.image = image
    camera.add_colorbar("Superpixel-Waveform Max (mV)")
    camera.highlight_pixels(hv255, 'red')
    camera.save(output_path)

    lt75 = np.where(amplitudes < 75)
    print(lt75)
コード例 #9
0
def process(input_path, pedestal_path, output_path):
    reader = TIOReader(input_path)
    wf_calib = WaveformCalibrator(
        pedestal_path, reader.n_pixels, reader.n_samples
    )

    wfs = get_da(reader, wf_calib)

    mean, std, mean_pix, std_pix, (hist, edges) = da.compute(
        wfs.mean(),
        wfs.std(),
        wfs.mean(axis=(0, 2)),
        wfs.std(axis=(0, 2)),
        da.histogram(wfs, bins=1000, range=(-10, 10))
    )

    np.savez(
        output_path,
        mean=mean,
        std=std,
        mean_pix=mean_pix,
        std_pix=std_pix,
        hist=hist,
        edges=edges
    )
コード例 #10
0
def process(r0_path, pedestal, tf, output_path):
    reader = TIOReader(r0_path, max_events=1000)
    sum = np.zeros(reader.n_samples - 32)
    n = 0

    for wfs in tqdm(reader, total=reader.n_events):
        if wfs.missing_packets:
            continue
        cells = get_cell_ids_for_waveform(wfs.first_cell_id, reader.n_samples,
                                          reader.n_cells)
        wfs = wfs[:, 32:]
        wfs.first_cell_id = cells[32]

        iev = wfs.iev
        first_cell_id = wfs.first_cell_id
        wfs = correct_overflow(wfs)
        wfs = pedestal.subtract_pedestal(wfs, first_cell_id)
        if tf is not None:
            wfs = tf.apply_tf(wfs, first_cell_id)

        if np.isnan(wfs).any():
            continue

        sum += wfs.sum(0)
        n += wfs.shape[0]

    avg = sum / n
    avg /= avg.max()
    array = np.column_stack([np.arange(avg.size) * 1e-9,
                             avg]).astype(np.float32)
    print(f"Creating file: {output_path}")
    np.savetxt(output_path, array)
コード例 #11
0
def main():
    r0_path = "/Users/Jason/Downloads/tempdata/Pedestal_23deg_r0.tio"
    r1_int_path = "/Users/Jason/Downloads/tempdata/Pedestal_23deg_r1_int.tio"
    r1_rnd_path = "/Users/Jason/Downloads/tempdata/Pedestal_23deg_r1_rnd.tio"
    tcal_path = "/Users/Jason/Downloads/tempdata/Pedestal_23deg_ped.tcal"

    reader_r0 = TIOReader(r0_path, max_events=10000)
    reader_r1_int = TIOReader(r1_int_path, max_events=10000)
    reader_r1_rnd = TIOReader(r1_rnd_path, max_events=10000)

    # Generate Pedestal
    pedestal_tc = PedestalTargetCalib(reader_r0.n_pixels, reader_r0.n_samples,
                                      reader_r0.n_cells)
    pedestal_tc.load_tcal(tcal_path)

    l_int = []
    l_rnd = []

    # Subtract Pedestals
    desc = "Subtracting pedestal"
    z = zip(reader_r0, reader_r1_int, reader_r1_rnd)
    it = tqdm(z, total=reader_r0.n_events, desc=desc)
    for wfs_r0, wfs_r1_int, wfs_r1_rnd in it:
        if wfs_r0.missing_packets:
            continue

        wfs_r1_flt = pedestal_tc.subtract_pedestal(wfs_r0,
                                                   wfs_r0.first_cell_id)

        # offset = 700
        # scale = 13.6
        # wfs_r1_flt = (wfs_r1_flt + offset) * scale
        # wfs_r1_int = (wfs_r1_int + offset) * scale
        # wfs_r1_rnd = (wfs_r1_rnd + offset) * scale

        l_int.append(wfs_r1_flt - wfs_r1_int)
        l_rnd.append(wfs_r1_flt - wfs_r1_rnd)

    l_int = np.array(l_int).ravel()
    l_rnd = np.array(l_rnd).ravel()

    plt.hist(l_int, bins=20, histtype='step', label='int')
    plt.hist(l_rnd, bins=20, histtype='step', label='rnd')
    plt.legend(loc='best')
    plt.xlabel("Difference to float ped-sub ADC")
    plt.ylabel("N")
    plt.show()
コード例 #12
0
ファイル: waveforms.py プロジェクト: watsonjj/sstcam-sandbox
def process(file):
    illumination = 50

    r1_path, _, _ = file.get_run_with_illumination(illumination, r1=True)
    dl1_path, _, _ = file.get_run_with_illumination(illumination, r1=False)
    poi = file.poi
    plot_dir = file.waveforms_plot_dir
    ff_path = file.ff_path

    with HDF5Reader(ff_path) as reader:
        df = reader.read("data")
        ff_m = df['ff_m'].values
        ff_c = df['ff_c'].values

    reader = TIOReader(r1_path)
    n_events = reader.n_events
    n_pixels = reader.n_pixels
    n_samples = reader.n_samples

    wfs = np.zeros((n_events, n_pixels, n_samples))
    desc = "Processing events"
    for wf in tqdm(reader, total=n_events, desc=desc):
        wfs[wf.iev] = wf

    reader = DL1Reader(dl1_path)
    iev, pixel, charge_1d = reader.select_columns(['iev', 'pixel', 'charge'])
    charge_1d = (charge_1d - ff_c[pixel]) / ff_m[pixel]
    charge = np.zeros((n_events, n_pixels))
    charge[iev, pixel] = charge_1d

    x = np.arange(n_samples)

    p_wf = WFPlotter(switch_backend=True)
    p_wf.plot(x, wfs[:, poi].T)
    p_wf.save(os.path.join(plot_dir, "p{}.pdf".format(poi)))

    p_wf = WFPlotter()
    p_wf.plot(x, wfs.mean(0).T)
    p_wf.save(get_plot(os.path.join(plot_dir, "average.pdf")))

    p_hist = HistPlotter(switch_backend=True)
    p_hist.plot(charge[:, poi])
    p_hist.save(os.path.join(plot_dir, "hist.pdf"))
コード例 #13
0
ファイル: averages.py プロジェクト: watsonjj/sstcam-sandbox
def get_df(paths, vped_list):
    assert (len(paths) == len(vped_list))
    readers = [TIOReader(p) for p in paths]

    n_files = len(paths)
    first_reader = TIOReader(paths[0])
    n_pixels = first_reader.n_pixels
    n_samples = first_reader.n_samples

    # mean = np.zeros((n_files, n_pixels, n_samples))
    # std = np.zeros((n_files, n_pixels, n_samples))
    # vped = np.zeros((n_files, n_pixels, n_samples))
    jpixel, jsample = np.indices((n_pixels, n_samples))

    df_list = []

    for ifile in trange(n_files):
        reader = readers[ifile]

        r_n_events = reader.n_events
        r_n_pixels = reader.n_pixels
        r_n_samples = reader.n_samples
        samples = np.zeros((r_n_events, r_n_pixels, r_n_samples))
        for iev, wf in enumerate(reader):
            samples[iev] = wf

        mean = np.mean(samples, 0)
        std = np.std(samples, 0)
        vped = vped_list[ifile]

        df_list.append(pd.DataFrame(dict(
            vped_dac=vped,
            pixel=jpixel.ravel(),
            sample=jsample.ravel(),
            mean=mean.ravel(),
            std=std.ravel(),
        )))

    df = pd.concat(df_list, ignore_index=True)

    return df
コード例 #14
0
ファイル: r0r1readin.py プロジェクト: jodwilliams1990/R0R1Gui
def r0r1cameraplotter(input_path):
    reader = TIOReader(input_path)  # Load the file
    wfs = reader[1]
    for m in range (0,13):
        camera = plt.figure(figsize=(10, 10))
        # Generate a CameraImage object using the classmethod "from_mapping" which accepts the
        # mapping object contained in the reader, which converts from pixel ID to pixel
        # coordinates (using the Mapping class in TargetCalib)
        camera = CameraImage.from_mapping(reader.mapping)
        camera.add_colorbar()
        camera.image = wfs[:, m*10]  # Plot value of the sample at m x 10ns for each pixel
        plt.show()
        '''
コード例 #15
0
def plot_from_tio():
    """
    Use the CHECLabPy mapping dataframe to plot an image
    """
    path = "/Users/Jason/Software/CHECLabPy/refdata/Run17473_r1.tio"
    r = TIOReader(path, max_events=10)
    camera = CameraImage.from_mapping(r.mapping)
    camera.add_colorbar("Amplitude (mV)")
    camera.annotate_on_telescope_up()
    for wf in r:
        image = wf[:, 60]
        camera.image = image
        plt.pause(0.5)
コード例 #16
0
def process(input_paths):
    for path in input_paths:
        pedestal_path = path.replace(".tio", "_ped.tcal")
        reader = TIOReader(path)

        pedestal = PedestalTargetCalib(reader.n_pixels, reader.n_samples,
                                       reader.n_cells)
        desc = "Generating pedestal"
        for wfs in tqdm(reader, total=reader.n_events, desc=desc):
            if wfs.missing_packets:
                continue
            pedestal.add_to_pedestal(wfs, wfs.first_cell_id)
        pedestal.save_tcal(pedestal_path)
コード例 #17
0
def process(r0_paths, output_path):
    data = []

    for ipath, r0_path in enumerate(r0_paths):
        print(f"Processing: {ipath+1}/{len(r0_paths)}")
        pedestal_path = r0_path.replace(".tio", "_ped.tcal")

        regex_r0 = re.search(r".+_tc([\d.]+)_tmc([\d.]+).tio", r0_path)
        temperature_r0_chamber = float(regex_r0.group(1))
        temperature_r0_primary = float(regex_r0.group(2))

        regex_ped = re.search(r".+_tc([\d.]+)_tmc([\d.]+)_ped.tcal",
                              pedestal_path)
        temperature_pedestal_chamber = float(regex_ped.group(1))
        temperature_pedestal_primary = float(regex_ped.group(2))

        reader = TIOReader(r0_path, max_events=50000)
        pedestal = PedestalTargetCalib(reader.n_pixels, reader.n_samples,
                                       reader.n_cells)
        pedestal.load_tcal(pedestal_path)

        online_stats = OnlineStats()
        online_hist = OnlineHist(bins=100, range_=(-10, 10))

        # Subtract Pedestals
        desc = "Subtracting pedestal"
        for wfs in tqdm(reader, total=reader.n_events, desc=desc):
            if wfs.missing_packets:
                continue

            subtracted_tc = pedestal.subtract_pedestal(wfs,
                                                       wfs.first_cell_id)[[0]]
            online_stats.add_to_stats(subtracted_tc)
            online_hist.add(subtracted_tc)

        data.append(
            dict(
                temperature_r0_chamber=temperature_r0_chamber,
                temperature_r0_primary=temperature_r0_primary,
                temperature_pedestal_chamber=temperature_pedestal_chamber,
                temperature_pedestal_primary=temperature_pedestal_primary,
                mean=online_stats.mean,
                std=online_stats.std,
                hist=online_hist.hist,
                edges=online_hist.edges,
            ))

    with HDF5Writer(output_path) as writer:
        writer.write(data=pd.DataFrame(data))
コード例 #18
0
def process(path):
    pedestal_path = path.replace("_r0.tio", "_ped.tcal")
    reader = TIOReader(path)

    pedestal = PedestalTargetCalib(reader.n_pixels, reader.n_samples - 32,
                                   reader.n_cells)
    desc = "Generating pedestal"
    for wfs in tqdm(reader, total=reader.n_events, desc=desc):
        if wfs.missing_packets:
            continue
        cells = get_cell_ids_for_waveform(wfs.first_cell_id, reader.n_samples,
                                          reader.n_cells)
        wfs = wfs[:, 32:]
        wfs.first_cell_id = cells[32]
        pedestal.add_to_pedestal(wfs, wfs.first_cell_id)
    pedestal.save_tcal(pedestal_path)
コード例 #19
0
ファイル: check.py プロジェクト: watsonjj/sstcam-sandbox
def process(file, tf):
    r0_path = file.r0_path
    r1_name = os.path.basename(r0_path).replace("_r0", "_r1")
    r1_path = os.path.join(tf.r1_dir, r1_name)
    get_cell = tf.get_cell

    r = TIOReader(r1_path)
    n_events = r.n_events
    n_pixels = r.n_pixels
    n_samples = r.n_samples
    samp_arange = np.arange(n_samples, dtype=np.uint16)
    for wfs in tqdm(r, total=n_events):
        fci = r.first_cell_ids
        cells = get_cell(int(fci[0]), samp_arange)
        cells_pix = np.tile(cells, (n_pixels, 1))
        np.testing.assert_almost_equal(wfs, cells_pix, 1)
コード例 #20
0
def main():
    # path = get_astri_2019("d2019-06-10_ledflashers/flasher_comparison_May-June/unit0pattern-low_r1.tio")
    # path = get_astri_2019("d2019-06-10_ledflashers/flasher_comparison_May-June/Run13270_r1.tio")
    path = get_astri_2019(
        "d2019-06-10_ledflashers/flasher_comparison_May-June/Run13401_r1.tio")

    output_dir = path.replace("_r1.tio", "")
    reader = TIOReader(path, max_events=100)
    waveforms = reader[:].mean(0)
    mapping = get_superpixel_mapping(reader.mapping)
    n_pixels, n_samples = waveforms.shape
    n_sp = n_pixels // 4

    correction = pd.read_csv(join(DIR, "sp_illumination_profile.dat"),
                             sep='\t')['correction'].values

    sp_waveforms = waveforms.reshape((n_sp, 4, n_samples)).sum(1)
    sp_waveforms_corrected = sp_waveforms * correction[:, None]

    amplitude = sp_waveforms_corrected.max(1)
    median = np.median(amplitude)
    threshold = median * 1.10
    trigger_off = amplitude > threshold
    # trigger_off[[  # TODO: Remove?
    #     355, 356, 357, 358, 359, 382, 375, 460, 459, 457, 458, 465, 289, 489,
    #     502, 254, 247, 154, 144, 56, 46, 39, 24, 25, 76
    # ]] = True
    hv_off = np.zeros(n_sp, dtype=bool)
    hv_off[[24, 453]] = True
    print(f"SP Trigger OFF: {np.where(trigger_off)[0].tolist()}")
    print(f"SP HV OFF: {np.where(hv_off)[0].tolist()}")

    plot = CameraPlotter(mapping)
    plot.highlight_pixels(trigger_off, 'yellow')
    plot.highlight_pixels(hv_off, 'red')
    plot.plot_waveforms(sp_waveforms)
    plot.save(join(output_dir, "led.pdf"))
    plot.plot_waveforms(sp_waveforms_corrected)
    plot.save(join(output_dir, "uniform.pdf"))

    df = pd.DataFrame(
        dict(
            superpixel=np.arange(n_sp),
            hv_on=(~hv_off).astype(int),
            trigger_off=trigger_off.astype(int),
        ))
    df.to_csv(join(output_dir, "sp_settings.txt"), sep='\t', index=False)
コード例 #21
0
def main():
    description = (
        "Generate the pedestals from an R0 file, subtract it from another "
        "R0 file, and plot the comparison of residuals from different "
        "pedestal methods"
    )
    parser = argparse.ArgumentParser(description=description,
                                     formatter_class=Formatter)
    parser.add_argument('-f', '--file', dest='r0_path', required=True,
                        help='R0 file to obtain residuals from')
    parser.add_argument('-o', '--output', dest='output_dir', required=True,
                        help='directort to store output plots')
    args = parser.parse_args()

    r0_path = args.r0_path
    channel = 0
    output_dir = args.output_dir

    create_directory(output_dir)
    reader = TIOReader(r0_path, max_events=100000)

    # Generate Pedestals
    pedestal = PedestalCellPosition(
        reader.n_pixels, reader.n_samples, reader.n_cells
    )
    desc = "Generating pedestal"
    for wfs in tqdm(reader, total=reader.n_events, desc=desc):
        if wfs.missing_packets:
            continue
        pedestal.add_to_pedestal(wfs, wfs.first_cell_id)

    # embed()

    for cell in range(reader.n_cells):
        if (pedestal.hits[channel, cell] == 0).all():
            continue
        p_cell_wf = CellWaveform()
        p_cell_wf.plot(
            pedestal.pedestal[channel, cell],
            pedestal.std[channel, cell],
            pedestal.hits[channel, cell],
            cell
        )
        p_cell_wf.save(
            join(output_dir, f"cell_pedestal_vs_position/{cell:04d}.pdf")
        )
コード例 #22
0
def process(paths, output_path):
    with HDF5Writer(output_path) as writer:
        for ipath, path in enumerate(paths):
            print(f"File: {ipath+1}/{len(paths)}")
            reader = TIOReader(path)
            n_events = reader.n_events
            for wf in tqdm(reader, total=n_events):
                data = dict(
                    ipath=ipath,
                    iev=wf.iev,
                    tack=wf.t_tack,
                    stale=wf.stale[0],
                    fci=wf.first_cell_id[0],
                )
                writer.append(pd.DataFrame(data, index=[0]))
            print(
                f"Memory Usage = {psutil.Process().memory_info().rss * 1E-9} GB"
            )
コード例 #23
0
def main():
    path = get_astri_2019("d2019-06-10_ledflashers/flasher_comparison_May-June/unit0pattern-low_r1.tio")
    reader = TIOReader(path, max_events=100)
    waveforms = reader[:].mean(0)

    # Calculate coeff
    n_pixels, n_samples = waveforms.shape
    n_sp = n_pixels // 4
    waveforms_sp = waveforms.reshape(
        (n_sp, 4, n_samples)
    ).sum(1)
    correction = (waveforms_sp.max(1).mean() / waveforms_sp.max(1))

    df = pd.DataFrame(dict(
        superpixel=np.arange(n_sp),
        correction=correction,
    ))
    df.to_csv(join(DIR, "sp_illumination_profile.dat"), sep='\t', index=False)
コード例 #24
0
ファイル: dask.py プロジェクト: watsonjj/sstcam-sandbox
def main():
    input_path = "/Users/Jason/Downloads/tempdata/Run06136_r0.tio"
    pedestal_path = "/Users/Jason/Downloads/tempdata/Run06136_ped.tcal"
    max_events = None

    reader = TIOReader(input_path, max_events=max_events)
    wf_calib = WaveformCalibrator(
        pedestal_path, reader.n_pixels, reader.n_samples
    )

    dtio = DaskTIO(reader, wf_calib)
    ddf = dtio.get_file_df()
    # print("here")
    df_0, df_2 = dd.compute(
        ddf.groupby(['ipix', 'fblock', 'fbpisam'])['r0'].std(),
        ddf.groupby(['ipix', 'fci', 'fbpisam'])['r0'].std(),
    )
    embed()
コード例 #25
0
def process(tf_r0_paths, pedestal_path, tf_path, tf_class):
    pedestal = PedestalTargetCalib.from_tcal(pedestal_path)

    # Parse amplitudes from filepath
    amplitudes = []
    readers = []
    for path in tf_r0_paths:
        regex_ped = re.search(r".+_(-?\d+)_r0.tio", path)
        amplitudes.append(int(regex_ped.group(1)))
        readers.append(TIOReader(path))

    # Instance TF class from first file
    tf = tf_class(readers[0].n_pixels, readers[0].n_samples - 32,
                  readers[0].n_cells, amplitudes)
    if tf_class == TFACCrossCorrelation:
        # # Estimate range of peak positions
        # r = readers[np.abs(np.array(amplitudes) - 500).argmin()]
        # peak = r[0].mean(0).argmax()
        # tf.set_trange(peak - 5 - 32, peak + 5 - 32)
        tf.set_trange(6, 16)

    desc0 = "Generating TF"
    it = zip(amplitudes, readers)
    n_amp = len(amplitudes)
    for amplitude, reader in tqdm(it, total=n_amp, desc=desc0):
        amplitude_index = tf.get_input_amplitude_index(amplitude)
        for iwf, wfs in enumerate(tqdm(reader, total=reader.n_events)):
            if wfs.missing_packets:
                print("Skipping event")
                continue

            # Skip to next file when enough hits are reached
            if iwf % 1000 == 0:
                if (tf.hits[..., amplitude_index] > 100).all():
                    break

            cells = get_cell_ids_for_waveform(wfs.first_cell_id,
                                              reader.n_samples, reader.n_cells)
            wfs = wfs[:, 32:]
            wfs.first_cell_id = cells[32]
            tf.add_to_tf(pedestal.subtract_pedestal(wfs, wfs.first_cell_id),
                         wfs.first_cell_id, amplitude_index)

    tf.save(tf_path)
コード例 #26
0
def process(entry, poi):
    with stdout_redirector():
        input_path, amplitude = entry
        df_list = []
        desc = "Looping over events"
        r0_reader = TIOReader(input_path)
        n_events = r0_reader.n_events
        n_samples = r0_reader.n_samples
        isam = np.arange(n_samples, dtype=np.uint16)

        rd = ReducedDataframe()

        for r0 in r0_reader:
            fci = r0.first_cell_id[poi].item()
            rd.add(amplitude, r0[poi].astype(np.int), fci, isam)

        df = rd.finish()

    return df
コード例 #27
0
def main():
    r1_path = "/Volumes/gct-jason/astri_onsky_archive/d2019-05-10_triggerpatterns/Run13357_r1.tio"
    trig_path = "/Volumes/gct-jason/astri_onsky_archive/d2019-05-10_triggerpatterns/Run13357_2019-05-10.2100.prt"
    threshold = 42

    r1_dict = dict()
    trigger_dict = dict()

    r1_reader = TIOReader(r1_path, skip_events=0, skip_end_events=0)
    trigger_reader = TriggerReader(trig_path)

    n_pixels = r1_reader.n_pixels
    n_superpixels = n_pixels // 4
    n_samples = r1_reader.n_samples

    for i in range(35):
        r1_wfs = r1_reader[i]
        trigger_event = trigger_reader.read()

        r1_tack = r1_reader.current_tack
        trigger_tack = trigger_event.TACK

        r1_image = r1_wfs.reshape((n_superpixels, 4, n_samples)).sum(1).max(1)
        trigger_image = np.frombuffer(trigger_event.trigg.unpack(), dtype=bool)

        r1_dict[r1_tack] = r1_image
        trigger_dict[trigger_tack] = trigger_image

    p_image = ImagePlotter(r1_reader.mapping)

    tack_list = sorted(
        list(set().union(r1_dict.keys(), trigger_dict.keys()))
    )[:-1]
    for tack in tack_list:
        r1_image = r1_dict.get(tack, np.zeros(n_superpixels))
        trigger_image = trigger_dict.get(tack, np.zeros(n_superpixels))

        software_trigger = r1_image > threshold

        p_image.set_image(tack, r1_image, software_trigger, trigger_image)
        p_image.save(f"T{tack}.png")
コード例 #28
0
def main():
    paths = glob(get_astri_2019("*/*_r1.tio"))
    pattern = re.compile(r"(?:.+?)/d(.+?)/Run(\d+?)_r1.tio")
    d_list = []
    for path in paths:
        reader = TIOReader(path)
        wfs = reader[0]
        nudge, temperature = get_nudge_and_temperature_from_reader(reader)

        regexp = re.search(pattern, path)
        investigation = regexp.group(1)
        run_id = regexp.group(2)

        d_list.append(
            dict(investigation=investigation,
                 run_id=run_id,
                 t_cpu=wfs.t_cpu,
                 nudge=nudge,
                 temperature=temperature))

    df = pd.DataFrame(d_list)
    with HDF5Writer(get_data("d190507_check_amplitude_calib/data.h5")) as w:
        w.write(data=df)
コード例 #29
0
def process(r0_paths, pedestal, tf, output_path):
    readers = [TIOReader(path) for path in r0_paths]
    df_list = []

    channel = np.arange(readers[0].n_pixels)

    desc0 = "Extracting charges"
    for reader in tqdm(readers, total=len(readers), desc=desc0):
        regex = re.search(r".+Amplitude_([-\d]+)_.+_r0.tio", reader.path)
        amplitude = int(regex.group(1))
        for wfs in tqdm(reader, total=reader.n_events):
            if wfs.missing_packets:
                continue
            cells = get_cell_ids_for_waveform(wfs.first_cell_id,
                                              reader.n_samples, reader.n_cells)
            wfs = wfs[:, 32:]
            wfs.first_cell_id = cells[32]

            iev = wfs.iev
            first_cell_id = wfs.first_cell_id
            wfs = correct_overflow(wfs)
            wfs = pedestal.subtract_pedestal(wfs, first_cell_id)
            if tf is not None:
                wfs = tf.apply_tf(wfs, first_cell_id)
            charge = wfs[:, 4:21].sum(1)

            df_list.append(
                pd.DataFrame(
                    dict(amplitude=amplitude,
                         iev=iev,
                         fci=first_cell_id,
                         channel=channel,
                         charge=charge)))

    df = pd.concat(df_list, ignore_index=True)
    with HDF5Writer(output_path) as writer:
        writer.write(data=df)
コード例 #30
0
from CHECLabPy.core.io import TIOReader
from sstcam_sandbox import get_astri_2019
from tqdm import trange
import numpy as np
from matplotlib import pyplot as plt

path = get_astri_2019("d2019-06-14_pedestal/Run13794_r0.tio")
reader = TIOReader(path)
dt = np.zeros(reader.n_events - 1)
for iev in trange(1, reader.n_events):
    dt[iev - 1] = (reader[iev].t_cpu - reader[iev - 1].t_cpu).value

plt.hist(dt, bins=100)
plt.yscale("log")