コード例 #1
0
def main():
    d_list = []
    for file in tqdm(all_files):
        tcal_path = file.tcal

        ped_reader = PedestalArrayReader(tcal_path)
        hits_tcal = np.array(ped_reader.GetHits())
        std_tcal = np.array(ped_reader.GetStdDev())

        mask = (hits_tcal < 6) | np.isnan(std_tcal)
        std_tcal = np.ma.masked_array(std_tcal, mask=mask)

        n_noisy = (std_tcal > 5).sum()

        d_list.append(dict(
            name=file.name,
            mean=std_tcal.mean(),
            std=std_tcal.std(),
            min=std_tcal.min(),
            max=std_tcal.max(),
            n_noisy=n_noisy,
        ))

    df = pd.DataFrame(d_list)
    # embed()
    with HDF5Writer(get_data(f"d190730_pedestal/tcal_std.h5")) as w:
        w.write(data=df)
コード例 #2
0
def process(input_path, output_path, poi):
    r0_reader = TIOReader(input_path)
    n_events = r0_reader.n_events
    n_samples = r0_reader.n_samples
    isam = np.arange(n_samples, dtype=np.uint16)

    df_list = []

    desc = "Looping over events"
    for r0 in tqdm(r0_reader, total=n_events, desc=desc):
        if r0.missing_packets:
            continue

        iev = r0.iev
        fci = r0.first_cell_id
        adc = r0[poi]

        df_list.append(
            pd.DataFrame(dict(
                iev=iev,
                fci=fci,
                isam=isam,
                adc=adc,
            )))

    df = pd.concat(df_list, ignore_index=True)

    with HDF5Writer(output_path) as writer:
        writer.write(data=df)
        writer.add_metadata(poi=poi)
コード例 #3
0
def process(path, dead, output):
    with HDF5Reader(path) as reader:
        df = reader.read("data")
        df = df.loc[~df['pixel'].isin(dead)]

    d_list = []

    extractors = set(df.columns) - {'iobs', 'iev', 'pixel', 'true'}
    for extractor in extractors:
        gt5 = df['true'].values > 5
        true = df['true'].values[gt5]
        measured = df[extractor].values[gt5]
        coeff = polyfit(true, measured, [1])
        _, calib = coeff
        df[extractor] /= calib

        for true, group in df.groupby("true"):
            n = group.index.size
            d_list.append(
                dict(
                    extractor=extractor,
                    true=true,
                    n=n,
                    mean=group[extractor].mean(),
                    std=group[extractor].std(),
                    rmse=np.sqrt(np.sum((group[extractor] - true)**2) / n),
                    res=np.sqrt(
                        np.sum((group[extractor] - true)**2) / n + true),
                ))

    df_cr = pd.DataFrame(d_list)

    with HDF5Writer(output) as writer:
        writer.write(data=df_cr)
コード例 #4
0
def process_gamma_only(path_gamma, base_output, n_off):
    angles = np.linspace(0, 360, n_off + 2)[:-1] * u.deg
    n_onoff = len(angles)
    print(angles)

    x_onoff, y_onoff = get_on_off_positions(path_gamma, angles)
    df_gamma = get_dataframe(path_gamma, x_onoff, y_onoff, norm_crab)

    output_path = base_output + "_wobble.h5"
    with HDF5Writer(output_path) as writer:
        df = df_gamma
        df_on = df.copy()
        df_on['alpha'] = df_on['alpha0']
        df_list = []
        for i in range(1, n_onoff):
            df_i = df.copy()
            df_i['alpha'] = df_i[f'alpha{i}']
            df_list.append(df_i)
        df_off = pd.concat(df_list, ignore_index=True)
        for i in range(n_onoff):
            df_on.drop(f'alpha{i}', axis=1, inplace=True)
            df_off.drop(f'alpha{i}', axis=1, inplace=True)
        writer.write(on=df_on)
        writer.write(off=df_off)
        writer.add_metadata(alpha_li_ma=1 / (n_onoff - 1))
コード例 #5
0
def main():
    r1_paths = dict(
        off=get_astri_2019("d2019-05-08_ledflashers_dynrange/Run13268_r1.tio"),
        on_50=get_astri_2019("d2019-05-08_ledflashers_dynrange/Run13272_r1.tio"),
        on_3=get_astri_2019("d2019-05-08_ledflashers_dynrange/Run13267_r1.tio")
    )
    output = get_data("d190520_charge_extraction/data/charge.h5")
    poi = 2004

    reader = ReaderR1(list(r1_paths.values())[0])
    kw = dict(
        n_pixels=reader.n_pixels,
        n_samples=reader.n_samples,
        mapping=reader.mapping,
        reference_pulse_path=reader.reference_pulse_path,
    )
    extractors = dict(
        cc_nn=(CrossCorrelationNeighbour(**kw), 'charge_cc_nn'),
    )
    for width in range(1, 15):
        extractors[f'sliding_{width}'] = (
            SlidingWindowNeighbour(**kw, window_size=width),
            "charge_sliding_nn"
        )

        for shift in range(-3, 8):
            extractors[f'peak_{width}_{shift}'] = (
                CtapipeNeighbourPeakIntegrator(
                    **kw, window_size=width, window_shift=shift
                ), "charge_nn"
            )

    with HDF5Writer(output) as writer:
        for key, path in r1_paths.items():
            reader = ReaderR1(path, max_events=500)
            baseline_subtractor = BaselineSubtractor(reader)
            time_calibrator = TimeCalibrator()

            desc = "Looping over file"
            for wfs in tqdm(reader, total=reader.n_events, desc=desc):
                iev = wfs.iev
                if reader.stale.any():
                    continue

                wfs = time_calibrator(wfs)
                wfs = baseline_subtractor.subtract(wfs)

                global_params = dict(
                    key=key,
                    iev=iev,
                    pixel=poi,
                )

                for name, (extractor, column) in extractors.items():
                    params = global_params.copy()
                    params['extractor'] = name
                    params['charge'] = extractor.process(wfs)[column][poi]
                    df = pd.DataFrame(params, index=[0])
                    writer.append(df, key='data')
コード例 #6
0
def main():
    base = get_astri_2019("")
    runlist_paths = [
        join(base, "d2019-04-30_cosmicray/runlist.txt"),
        join(base, "d2019-05-01_cosmicray/runlist.txt"),
        join(base, "d2019-05-01_mrk501/runlist.txt"),
        join(base, "d2019-05-02_PG1553+113/runlist.txt"),
        join(base, "d2019-05-02_mrk421/runlist.txt"),
        join(base, "d2019-05-02_mrk501/runlist.txt"),
        join(base, "d2019-05-06_mrk501/runlist.txt"),
        join(base, "d2019-05-07_cosmicray/runlist.txt"),
        join(base, "d2019-05-08_cosmicray/runlist.txt"),
        join(base, "d2019-05-09_mrk421/runlist.txt"),
    ]
    output_path = get_data("d190522_hillas_over_campaign/hillas.h5")

    with HDF5Writer(output_path) as writer:

        n_investigations = len(runlist_paths)
        mapping = None

        for iinv, path in enumerate(runlist_paths):
            print(f"PROGRESS: Processing inv {iinv+1}/{n_investigations}")

            if not exists(path):
                raise ValueError(f"Missing runlist file: {path}")

            directory = abspath(dirname(path))
            investigation = directory.split('/')[-1]
            df_runlist = pd.read_csv(path, sep='\t')

            for _, row in df_runlist.iterrows():
                run = int(row['run'])
                hillas_path = join(directory, f"Run{run:05d}_hillas.h5")

                with HDF5Reader(hillas_path) as reader:
                    if mapping is None:
                        mapping = reader.get_mapping()

                    keys = ['data', 'pointing', 'mc', 'mcheader']
                    for key in keys:
                        if key not in reader.dataframe_keys:
                            continue

                        it = enumerate(reader.iterate_over_chunks(key, 1000))
                        for ientry, df in it:
                            df['iinv'] = iinv
                            df['investigation'] = investigation
                            writer.append(df, key=key)

        writer.add_mapping(mapping)
コード例 #7
0
def process(r0_paths, output_path):
    data = []

    for ipath, r0_path in enumerate(r0_paths):
        print(f"Processing: {ipath+1}/{len(r0_paths)}")
        pedestal_path = r0_path.replace(".tio", "_ped.tcal")

        regex_r0 = re.search(r".+_tc([\d.]+)_tmc([\d.]+).tio", r0_path)
        temperature_r0_chamber = float(regex_r0.group(1))
        temperature_r0_primary = float(regex_r0.group(2))

        regex_ped = re.search(r".+_tc([\d.]+)_tmc([\d.]+)_ped.tcal",
                              pedestal_path)
        temperature_pedestal_chamber = float(regex_ped.group(1))
        temperature_pedestal_primary = float(regex_ped.group(2))

        reader = TIOReader(r0_path, max_events=50000)
        pedestal = PedestalTargetCalib(reader.n_pixels, reader.n_samples,
                                       reader.n_cells)
        pedestal.load_tcal(pedestal_path)

        online_stats = OnlineStats()
        online_hist = OnlineHist(bins=100, range_=(-10, 10))

        # Subtract Pedestals
        desc = "Subtracting pedestal"
        for wfs in tqdm(reader, total=reader.n_events, desc=desc):
            if wfs.missing_packets:
                continue

            subtracted_tc = pedestal.subtract_pedestal(wfs,
                                                       wfs.first_cell_id)[[0]]
            online_stats.add_to_stats(subtracted_tc)
            online_hist.add(subtracted_tc)

        data.append(
            dict(
                temperature_r0_chamber=temperature_r0_chamber,
                temperature_r0_primary=temperature_r0_primary,
                temperature_pedestal_chamber=temperature_pedestal_chamber,
                temperature_pedestal_primary=temperature_pedestal_primary,
                mean=online_stats.mean,
                std=online_stats.std,
                hist=online_hist.hist,
                edges=online_hist.edges,
            ))

    with HDF5Writer(output_path) as writer:
        writer.write(data=pd.DataFrame(data))
コード例 #8
0
def process(paths, output_path):
    with HDF5Writer(output_path) as writer:
        for ipath, path in enumerate(paths):
            print(f"File: {ipath+1}/{len(paths)}")
            reader = TIOReader(path)
            n_events = reader.n_events
            for wf in tqdm(reader, total=n_events):
                data = dict(
                    ipath=ipath,
                    iev=wf.iev,
                    tack=wf.t_tack,
                    stale=wf.stale[0],
                    fci=wf.first_cell_id[0],
                )
                writer.append(pd.DataFrame(data, index=[0]))
            print(
                f"Memory Usage = {psutil.Process().memory_info().rss * 1E-9} GB"
            )
コード例 #9
0
def main():
    input_paths = glob(
        "/Volumes/gct-jason/astri_onsky_archive/d2019-11-19_monitor/*_mon.h5")
    input_paths = sort_file_list(input_paths)
    output_path = "/Volumes/gct-jason/astri_onsky_archive/d2019-11-19_monitor/monitor.h5"

    n_files = len(input_paths)

    with HDF5Writer(output_path) as writer:
        for ipath, input_path in enumerate(input_paths):
            print("PROGRESS: Processing file {}/{}".format(ipath + 1, n_files))

            with HDF5Reader(input_path) as reader:
                keys = reader.dataframe_keys
                for key in keys:
                    df = reader.read(key)
                    df = df.rename(columns={"icomp": "iunit"})
                    writer.append(df, key=key)
コード例 #10
0
def process(pedestal_paths, output_path):
    pedestal_paths = sort_file_list(pedestal_paths)

    data = []
    reference_values = None
    reference_spread = None

    for path in pedestal_paths:
        regex_ped = re.search(r".+_tc([\d.]+)_tmc([\d.]+)_ped.tcal", path)
        temperature_pedestal_primary = float(regex_ped.group(2))

        pedestal = PedestalTargetCalib.from_tcal(path)
        values = pedestal.pedestal
        spread = pedestal.std

        if reference_values is None:
            reference_values = values
            reference_spread = spread

        delta = values - reference_values
        delta_mean = delta.mean()
        delta_std = delta.std()
        delta_channel_mean = delta[0].mean()
        delta_channel_std = delta[0].std()

        # delta_spread = spread - reference_spread
        spread_mean = spread.mean()
        spread_std = spread.std()

        data.append(
            dict(
                temperature=temperature_pedestal_primary,
                delta_mean=delta_mean,
                delta_std=delta_std,
                delta_channel_mean=delta_channel_mean,
                delta_channel_std=delta_channel_std,
                spread_mean=spread_mean,
                spread_std=spread_std,
            ))

    with HDF5Writer(output_path) as writer:
        writer.write(data=pd.DataFrame(data))
コード例 #11
0
def process(path, output):
    with HDF5Reader(path) as reader:
        df = reader.read("data")

    d_list = []

    for extractor, group in df.groupby("extractor"):
        params = dict(extractor=extractor)
        for key, group_key in group.groupby("key"):
            charge = group_key['charge'].values
            params[f'mean_{key}'] = np.mean(charge)
            params[f'std_{key}'] = np.std(charge)
        d_list.append(params)

    df_output = pd.DataFrame(d_list)
    df_output['sn_on_50'] = df_output['mean_on_50'] / df_output['std_off']
    df_output['sn_on_3'] = df_output['mean_on_3'] / df_output['std_off']

    with HDF5Writer(output) as writer:
        writer.write(data=df_output)
コード例 #12
0
def main():
    output_path = get_data(f"d190918_alpha/onsky_hillas.h5")
    path_list = [
        glob(get_astri_2019("d2019-05-01_mrk501/*_hillas.h5")),
        glob(get_astri_2019("d2019-05-02_PG1553+113/*_hillas.h5")),
        glob(get_astri_2019("d2019-05-02_mrk421/*_hillas.h5")),
        glob(get_astri_2019("d2019-05-02_mrk501/*_hillas.h5")),
        glob(get_astri_2019("d2019-05-06_mrk501/*_hillas.h5")),
        glob(get_astri_2019("d2019-05-09_mrk421/*_hillas.h5")),
        glob(get_astri_2019("d2019-06-12_mrk421_moonlight/*_hillas.h5")),
        glob(get_astri_2019("d2019-06-12_mrk501/*_hillas.h5")),
        glob(get_astri_2019("d2019-06-12_mrk501_moonlight/*_hillas.h5")),
    ]

    with HDF5Writer(output_path) as writer:
        for iinv, hillas_paths in enumerate(path_list):
            hillas_paths = sort_file_list(hillas_paths)

            for ifile, input_path in enumerate(hillas_paths):
                with HDF5Reader(input_path) as reader:
                    if ifile == 0:
                        writer.add_mapping(reader.get_mapping())
                        writer.add_metadata(**reader.get_metadata())

                    df = reader.read("data")
                    df_pointing = reader.read("pointing")
                    df_source = reader.read("source")
                    source_name = reader.get_metadata("source")['source_name']

                    df['iinv'] = iinv
                    df['ifile'] = ifile
                    df['source_ra'] = df_source['source_ra'].values
                    df['source_dec'] = df_source['source_dec'].values
                    df['altitude_raw'] = df_pointing['altitude_raw'].values
                    df['altitude_cor'] = df_pointing['altitude_cor'].values
                    df['azimuth_raw'] = df_pointing['azimuth_raw'].values
                    df['azimuth_cor'] = df_pointing['azimuth_cor'].values
                    df['source_name'] = source_name

                    writer.append(df, 'data')
コード例 #13
0
def main():
    description = ('Merge together the hillas.h5 files into a single '
                   'hillas.h5 file.')
    parser = argparse.ArgumentParser(description=description,
                                     formatter_class=Formatter)
    parser.add_argument('-f',
                        '--files',
                        dest='input_paths',
                        nargs='+',
                        help='paths to the HDF5 hillas files')
    parser.add_argument('-o',
                        dest='output_path',
                        required=True,
                        help='output path to store the merged file')
    args = parser.parse_args()

    input_paths = sort_file_list(args.input_paths)
    output_path = args.output_path

    n_files = len(input_paths)

    with HDF5Writer(output_path) as writer:
        for ifile, input_path in enumerate(input_paths):
            print("PROGRESS: Processing file {}/{}".format(ifile + 1, n_files))

            with HDF5Reader(input_path) as reader:
                if ifile == 0:
                    writer.add_mapping(reader.get_mapping())
                    writer.add_metadata(**reader.get_metadata())

                keys = ['data', 'pointing', 'mc', 'mcheader']
                for key in keys:
                    if key not in reader.dataframe_keys:
                        continue

                    it = enumerate(reader.iterate_over_chunks(key, 1000))
                    for ientry, df in it:
                        writer.append(df, key=key)
コード例 #14
0
def main():
    paths = glob(get_astri_2019("*/*_r1.tio"))
    pattern = re.compile(r"(?:.+?)/d(.+?)/Run(\d+?)_r1.tio")
    d_list = []
    for path in paths:
        reader = TIOReader(path)
        wfs = reader[0]
        nudge, temperature = get_nudge_and_temperature_from_reader(reader)

        regexp = re.search(pattern, path)
        investigation = regexp.group(1)
        run_id = regexp.group(2)

        d_list.append(
            dict(investigation=investigation,
                 run_id=run_id,
                 t_cpu=wfs.t_cpu,
                 nudge=nudge,
                 temperature=temperature))

    df = pd.DataFrame(d_list)
    with HDF5Writer(get_data("d190507_check_amplitude_calib/data.h5")) as w:
        w.write(data=df)
コード例 #15
0
def main():
    # paths = glob(get_astri_2019("d2019-04-23_nudges/mc/*.simtel.gz"))
    # output = get_astri_2019("d2019-04-23_nudges/mc/charge.h5")
    paths = glob(get_astri_2019("d2019-04-23_nudges/mc_191011/*.simtel.gz"))
    output = get_astri_2019("d2019-04-23_nudges/mc_191011/charge.h5")

    with HDF5Writer(output) as writer:
        for path in paths:
            reader = SimtelReader(path)

            kw = dict(
                n_pixels=reader.n_pixels,
                n_samples=reader.n_samples,
                mapping=reader.mapping,
                reference_pulse_path=reader.reference_pulse_path,
            )
            baseline_subtractor = BaselineSubtractor(reader)
            extractor_onsky = OnskyExtractor(**kw)
            common = Common(**kw, _disable_by_default=True, waveform_max=True)

            pixel_array = np.arange(reader.n_pixels)

            desc = "Looping over file"
            for wfs in tqdm(reader, total=reader.n_events, desc=desc):
                iev = wfs.iev
                wfs = baseline_subtractor.subtract(wfs)

                params = dict(
                    iev=iev,
                    pixel=pixel_array,
                    onsky=extractor_onsky.process(wfs)['charge_onsky'],
                    waveform_max=common.process(wfs)['waveform_max'],
                    mc_true=wfs.mc_true,
                )

                df = pd.DataFrame(params)
                writer.append(df, key='data')
コード例 #16
0
def process(r0_paths, pedestal, tf, output_path):
    readers = [TIOReader(path) for path in r0_paths]
    df_list = []

    channel = np.arange(readers[0].n_pixels)

    desc0 = "Extracting charges"
    for reader in tqdm(readers, total=len(readers), desc=desc0):
        regex = re.search(r".+Amplitude_([-\d]+)_.+_r0.tio", reader.path)
        amplitude = int(regex.group(1))
        for wfs in tqdm(reader, total=reader.n_events):
            if wfs.missing_packets:
                continue
            cells = get_cell_ids_for_waveform(wfs.first_cell_id,
                                              reader.n_samples, reader.n_cells)
            wfs = wfs[:, 32:]
            wfs.first_cell_id = cells[32]

            iev = wfs.iev
            first_cell_id = wfs.first_cell_id
            wfs = correct_overflow(wfs)
            wfs = pedestal.subtract_pedestal(wfs, first_cell_id)
            if tf is not None:
                wfs = tf.apply_tf(wfs, first_cell_id)
            charge = wfs[:, 4:21].sum(1)

            df_list.append(
                pd.DataFrame(
                    dict(amplitude=amplitude,
                         iev=iev,
                         fci=first_cell_id,
                         channel=channel,
                         charge=charge)))

    df = pd.concat(df_list, ignore_index=True)
    with HDF5Writer(output_path) as writer:
        writer.write(data=df)
コード例 #17
0
def main():
    runlist_path = get_astri_2019("d2019-04-23_nudges/bright_50pe/runlist.txt")
    df_runlist = pd.read_csv(runlist_path, sep='\t')
    output = get_astri_2019("d2019-04-23_nudges/bright_50pe/charge.h5")

    with HDF5Writer(output) as writer:
        mapping = None
        for _, row in df_runlist.iterrows():
            run = row['run']
            nudge = int(row['nudge'])
            path = join(dirname(runlist_path), f"Run{run:05d}_r1.tio")
            reader = ReaderR1(path, max_events=500)
            mapping = reader.mapping

            kw = dict(
                n_pixels=reader.n_pixels,
                n_samples=reader.n_samples,
                mapping=reader.mapping,
                reference_pulse_path=reader.reference_pulse_path,
            )
            baseline_subtractor = BaselineSubtractor(reader)
            time_calibrator = TimeCalibrator()
            extractor_cc = CrossCorrelation(**kw)
            extractor_onsky_calib = OnskyCalibExtractor(**kw)
            extractor_onsky = OnskyExtractor(**kw)
            common = Common(**kw, _disable_by_default=True, waveform_max=True)

            pixel_array = np.arange(reader.n_pixels)

            monitor = get_nudge_and_temperature_from_reader(reader)
            nudge_from_dac, temperature = monitor
            assert nudge == nudge_from_dac

            desc = "Looping over file"
            for wfs in tqdm(reader, total=reader.n_events, desc=desc):
                iev = wfs.iev
                if wfs.stale.any():
                    continue

                wfs = time_calibrator(wfs)
                wfs = baseline_subtractor.subtract(wfs)

                cc = extractor_cc.process(wfs)['charge_cc']
                onsky_calib = extractor_onsky_calib.process(
                    wfs)['charge_onskycalib']
                onsky = extractor_onsky.process(wfs)['charge_onsky']
                waveform_max = common.process(wfs)['waveform_max']

                params = dict(
                    nudge=nudge,
                    nudge_from_dac=nudge_from_dac,
                    temperature=temperature,
                    iev=iev,
                    pixel=pixel_array,
                    cc=cc,
                    onsky_calib=onsky_calib,
                    onsky=onsky,
                    waveform_max=waveform_max,
                )

                df = pd.DataFrame(params)
                writer.append(df, key='data')

        writer.add_mapping(mapping)
コード例 #18
0
def main():
    paths = glob("/Users/Jason/Data/d2019-04-23_nudges/mc/*.simtel.gz")
    output = get_data("d190520_charge_extraction/mc/charge.h5")

    # Use first run to initialise extractors
    reader = SimtelReader(paths[0])
    pixel_array = np.arange(reader.n_pixels)
    kw = dict(
        n_pixels=reader.n_pixels,
        n_samples=reader.n_samples,
        mapping=reader.mapping,
        reference_pulse_path=reader.reference_pulse_path,
    )
    extractors = dict(
        cc_local=(
            CrossCorrelationLocal(**kw), "charge_cc_local"
        ),
        cc_nn=(
            CrossCorrelationNeighbour(**kw), "charge_cc_nn"
        ),
        local_4_2=(
            CtapipeLocalPeakIntegrator(
                **kw, window_size=4, window_shift=2
            ), "charge_local"
        ),
        local_8_4=(
            CtapipeLocalPeakIntegrator(
                **kw, window_size=8, window_shift=4
            ), "charge_local"
        ),
        local_6_3=(
            CtapipeLocalPeakIntegrator(
                **kw, window_size=6, window_shift=3
            ), "charge_local"
        ),
        nn_4_2=(
            CtapipeNeighbourPeakIntegrator(
                **kw, window_size=4, window_shift=2
            ), "charge_nn"
        ),
        nn_8_4=(
            CtapipeNeighbourPeakIntegrator(
                **kw, window_size=8, window_shift=4
            ), "charge_nn"
        ),
        nn_6_3=(
            CtapipeNeighbourPeakIntegrator(
                **kw, window_size=6, window_shift=3
            ), "charge_nn"
        ),
        sliding_local_4=(
            SlidingWindowLocal(**kw, window_size=4), "charge_sliding_local"
        ),
        sliding_local_8=(
            SlidingWindowLocal(**kw, window_size=8), "charge_sliding_local"
        ),
        sliding_local_6=(
            SlidingWindowLocal(**kw, window_size=6), "charge_sliding_local"
        ),
        sliding_nn_4=(
            SlidingWindowNeighbour(**kw, window_size=4), "charge_sliding_nn"
        ),
        sliding_nn_8=(
            SlidingWindowNeighbour(**kw, window_size=8), "charge_sliding_nn"
        ),
        sliding_nn_6=(
            SlidingWindowNeighbour(**kw, window_size=6), "charge_sliding_nn"
        ),
    )

    with HDF5Writer(output) as writer:
        for ipath, path in enumerate(paths):
            print(f"Processing file {ipath+1}/{len(paths)}")
            reader = SimtelReader(path)
            baseline_subtractor = BaselineSubtractor(reader)
            n_events = reader.n_events
            for waveforms in tqdm(reader, total=n_events):
                params = dict()
                # TODO: Baseline subtraction
                waveforms = baseline_subtractor.subtract(waveforms)

                for key, (extractor, column) in extractors.items():
                    params[key] = extractor.process(waveforms)[column]

                params['iobs'] = ipath
                params['iev'] = waveforms.iev
                params['pixel'] = pixel_array
                params['true'] = waveforms.mc_true

                writer.append(pd.DataFrame(params), key='data')
コード例 #19
0
def main():
    parser = argparse.ArgumentParser(
        description='Process a monitor file into a HDF5 pandas dataframe',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    parser.add_argument(
        '-f', '--file', dest='input_path',
        help='path to the monitor file'
    )
    parser.add_argument(
        '-o', '--output', dest='output_path',
        help='path to store the output file'
    )
    args = parser.parse_args()

    input_path = args.input_path
    output_path = args.output_path

    n_lines = get_number_lines(input_path)
    with HDF5Writer(output_path) as writer:
        with open(input_path) as file:
            iev = 0
            for line in tqdm(file, desc="Reading lines", total=n_lines):
                if line and line != '\n':
                    try:
                        if "Monitoring Event Done" in line:
                            iev += 1
                            continue
                        elif "EventBuildingReport" in line:
                            continue
                        elif "Counter" in line:
                            continue
                        elif line.startswith('\t') or line.startswith(' '):
                            continue
                        elif line.startswith("Number"):
                            continue
                        elif line.startswith("Start"):
                            continue

                        data = line.replace('\n', '').replace('\t', " ").rstrip()
                        data = data.replace(": TM IS NOT CONTACTABLE", " nan")
                        data = data.split(" ")

                        t_cpu = get_datetime(f"{data[0]} {data[1]}")

                        device = data[2]
                        measurement = data[3]
                        key = device + "_" + measurement

                        if (device == "TM") and (len(data) == 6):
                            icomp = np.array(data[4], dtype=np.int)
                            value = float(data[5])
                        elif (device == "TM") and (len(data) == 21):
                            icomp = np.arange(16) + int(data[4]) * 16
                            value = np.array(data[5:], dtype=np.float)
                        elif (device == "BP") and (len(data) == 36):
                            icomp = np.arange(32)
                            value = np.array(data[4:], dtype=np.float)
                        elif len(data) == 5:
                            icomp = np.array(0, dtype=np.int)
                            value = float(data[4])
                        elif len(data) == 4:
                            continue
                        else:
                            raise ValueError()

                        entry = dict(
                            iev=iev, t_cpu=t_cpu, icomp=icomp, value=value
                        )
                        entry = pd.DataFrame(entry, index=np.arange(icomp.size))
                        writer.append(entry, key=key, expectedrows=n_lines)

                    except:
                        pass
コード例 #20
0
ファイル: extract_spe.py プロジェクト: watsonjj/CHECLabPy
def main():
    parser = argparse.ArgumentParser(
        description='Extract and fit the Single-Photoelectron spectrum '
        'from N dl1 files simultaneously',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-f',
                        '--files',
                        dest='input_paths',
                        nargs='+',
                        help='paths to the input dl1 run files')
    parser.add_argument(
        '-o',
        '--output',
        dest='output_path',
        required=True,
        help='path to store the output HDF5 file containing the SPE fit result'
    )
    parser.add_argument(
        '-C',
        '--charge_col_name',
        dest='charge_col_name',
        required=True,
        help='The column name of the charge to be used in the fit.')
    parser.add_argument('-s',
                        '--fitter',
                        dest='fitter',
                        default='SiPMGentileFitter',
                        choices=SpectrumFitterFactory.subclass_names,
                        help='SpectrumFitter to use')
    parser.add_argument('-c',
                        '--config',
                        dest='config',
                        help='Path to SpectrumFitter configuration YAML file')
    parser.add_argument(
        '-p',
        '--pixel',
        dest='plot_pixel',
        type=int,
        help='Enter plot mode, and plot the spectrum and fit for the pixel')
    parser.add_argument('--n_processes',
                        dest='n_processes',
                        type=int,
                        default=1,
                        help='Multi-process each pixel in parallel')

    args = parser.parse_args()

    input_paths = args.input_paths
    output_path = args.output_path
    charge_col_name = args.charge_col_name
    fitter_name = args.fitter
    config_path = args.config
    plot_pixel = args.plot_pixel
    n_processes = args.n_processes

    # Get charges
    charges = []
    mapping = None
    for path in input_paths:
        with DL1Reader(path) as reader:
            charges.append(
                reader.select_column(charge_col_name).values.reshape(
                    (reader.n_events, reader.n_pixels)))
            if mapping is None:
                mapping = reader.mapping
    n_illuminations = len(charges)

    # Create fitter class
    fitter = SpectrumFitterFactory.produce(
        product_name=fitter_name,
        n_illuminations=n_illuminations,
    )
    if config_path is not None:
        fitter.load_config(config_path)
    initial = {param.name: param.initial for param in fitter.parameters}
    lambda_initial = initial.pop("lambda_")
    for i in range(n_illuminations):
        initial[f"lambda_{i}"] = lambda_initial

    # Plot mode
    if plot_pixel is not None:
        pixel_charges = [
            charges[i][:, plot_pixel] for i in range(n_illuminations)
        ]
        fitter.apply(*pixel_charges)
        p_fit = SpectrumFitPlotter(n_illuminations)
        p_fit.plot(*fitter.charge_histogram, *fitter.fit_result_curve,
                   fitter.fit_result_values, fitter.fit_result_errors, initial)
        p_fit.fig.suptitle(
            f"{fitter_name}, {n_illuminations} Illuminations, Pixel={plot_pixel}",
            x=0.75)
        p_fit.show()
        exit()

    pixel_fitter = PixelFitApplier(fitter)
    if n_processes > 1:
        pixel_fitter.multiprocess(n_processes, *charges)
    else:
        pixel_fitter.process(*charges)

    df_values = pd.DataFrame(list(pixel_fitter.pixel_values.values()))
    df_errors = pd.DataFrame(list(pixel_fitter.pixel_errors.values()))
    df_arrays = pd.DataFrame(list(pixel_fitter.pixel_arrays.values()))
    df_values = df_values.set_index('pixel')
    df_errors = df_errors.set_index('pixel')
    df_arrays = df_arrays.set_index('pixel')

    with HDF5Writer(output_path) as writer:
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', PerformanceWarning)
            writer.write(
                values=df_values,
                errors=df_errors,
                arrays=df_arrays,
            )
        writer.add_mapping(mapping)
        writer.add_metadata(
            files=input_paths,
            fitter=fitter.__class__.__name__,
            n_illuminations=n_illuminations,
            n_pixels=charges[0].shape,
            initial=initial,
        )
コード例 #21
0
import pandas as pd
from CHECLabPy.core.io import HDF5Reader, HDF5Writer

path = "/Users/Jason/Software/sstcam_sandbox/sstcam_sandbox/d190506_astri_publicity/events.txt"
output_path = path.replace(".txt", "_hillas.h5")

with HDF5Writer(output_path) as writer:
    df = pd.read_csv(path, sep='\t')
    ifile = 0
    for _, row in df.iterrows():
        path = row['path']
        iev = row['iev']

        with HDF5Reader(path) as reader:
            df = reader.read("data")
            df = df.loc[df['iev'] == iev]

            if ifile == 0:
                writer.add_mapping(reader.get_mapping())
                writer.add_metadata(**reader.get_metadata())

            keys = ['data', 'pointing', 'mc', 'mcheader']
            for key in keys:
                if key not in reader.dataframe_keys:
                    continue
                df = reader.read(key)
                df = df.loc[df['iev'] == iev]
                writer.append(df, key=key)
        ifile += 1