コード例 #1
0
class FITSIndexWriter(Tool):
    name = "FITSIndexWriter"
    description = __doc__
    example = """
    To create DL3 index files with default values:
    > lstchain_create_dl3_index_files
        -d /path/to/DL3/files/

    Or specify some more configurations:
    > lstchain_create_dl3_index_files
        -d /path/to/DL3/files/
        -o /path/to/DL3/index/files
        -p dl3*[run_1-run_n]*.fits.gz
        --overwrite
    """

    input_dl3_dir = traits.Path(
        help="Input path of DL3 files",
        exists=True,
        directory_ok=True,
        file_ok=False
    ).tag(config=True)

    file_pattern = traits.Unicode(
        help="File pattern to search in the given Path",
        default_value="dl3*.fits*"
    ).tag(config=True)

    output_index_path = traits.Path(
        help="Output path for the Index files",
        exists=True,
        directory_ok=True,
        file_ok=False,
        default_value=None
    ).tag(config=True)

    overwrite = traits.Bool(
        help="If True, overwrites existing output file without asking",
        default_value=False,
    ).tag(config=True)

    aliases = {
        ("d", "input-dl3-dir"): "FITSIndexWriter.input_dl3_dir",
        ("o", "output-index-path"): "FITSIndexWriter.output_index_path",
        ("p", "file-pattern"): "FITSIndexWriter.file_pattern",
    }

    flags = {
        "overwrite": (
            {"FITSIndexWriter": {"overwrite": True}},
            "overwrite output files if True",
        )
    }

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

        self.file_list = []
        self.hdu_index_filename = "hdu-index.fits.gz"
        self.obs_index_filename = "obs-index.fits.gz"

    def setup(self):

        list_files = sorted(self.input_dl3_dir.glob(self.file_pattern))
        if list_files == []:
            self.log.critical(f"No files found with pattern {self.file_pattern}")

        for f in list_files:
            self.file_list.append(f.name)
            Provenance().add_input_file(f)

        if not self.output_index_path:
            self.output_index_path = self.input_dl3_dir

        self.hdu_index_file = self.output_index_path / self.hdu_index_filename
        self.obs_index_file = self.output_index_path / self.obs_index_filename

        self.provenance_log = self.output_index_path / (self.name + ".provenance.log")

        if self.hdu_index_file.exists():
            if self.overwrite:
                self.log.warning(f"Overwriting {self.hdu_index_file}")
                self.hdu_index_file.unlink()
            else:
                raise ToolConfigurationError(
                    f"Output file {self.hdu_index_file} already exists,"
                    "use --overwrite to overwrite"
                )

        if self.obs_index_file.exists():
            if self.overwrite:
                self.log.warning(f"Overwriting {self.obs_index_file}")
                self.obs_index_file.unlink()
            else:
                raise ToolConfigurationError(
                    f"Output file {self.obs_index_file} already exists,"
                    " use --overwrite to overwrite"
                )

        self.log.debug("HDU Index file: %s", self.hdu_index_file)
        self.log.debug("OBS Index file: %s", self.obs_index_file)

    def start(self):

        create_hdu_index_hdu(
            self.file_list,
            self.input_dl3_dir,
            self.hdu_index_file,
            self.overwrite,
        )
        create_obs_index_hdu(
            self.file_list,
            self.input_dl3_dir,
            self.obs_index_file,
            self.overwrite
        )
        self.log.debug("HDULists created for the index files")

    def finish(self):

        Provenance().add_output_file(self.hdu_index_file)
        Provenance().add_output_file(self.obs_index_file)
コード例 #2
0
ファイル: display_dl1.py プロジェクト: nhrjr/ctapipe
class ImagePlotter(Component):
    """ Plotter for camera images """

    display = Bool(
        True,
        help="Display the photoelectron images on-screen as they are produced."
    ).tag(config=True)
    output_path = traits.Path(
        directory_ok=False,
        help=("Output path for the pdf containing all the images."
              " Set to None for no saved output."),
    ).tag(config=True)

    def __init__(self, subarray, config=None, parent=None, **kwargs):
        """
        Plotter for camera images.

        Parameters
        ----------
        config : traitlets.loader.Config
            Configuration specified by config file or cmdline arguments.
            Used to set traitlet values.
            Set to None if no configuration to pass.
        tool : ctapipe.core.Tool
            Tool executable that is calling this component.
            Passes the correct logger to the component.
            Set to None if no Tool to pass.
        kwargs
        """
        super().__init__(config=config, parent=parent, **kwargs)
        self._current_tel = None
        self.c_intensity = None
        self.c_peak_time = None
        self.cb_intensity = None
        self.cb_peak_time = None
        self.pdf = None
        self.subarray = subarray

        self._init_figure()

    def _init_figure(self):
        self.fig = plt.figure(figsize=(16, 7))
        self.ax_intensity = self.fig.add_subplot(1, 2, 1)
        self.ax_peak_time = self.fig.add_subplot(1, 2, 2)
        if self.output_path:
            self.log.info(f"Creating PDF: {self.output_path}")
            self.pdf = PdfPages(self.output_path)

    def plot(self, event, telid):
        image = event.dl1.tel[telid].image
        peak_time = event.dl1.tel[telid].peak_time

        if self._current_tel != telid:
            self._current_tel = telid

            self.ax_intensity.cla()
            self.ax_peak_time.cla()

            # Redraw camera
            geom = self.subarray.tel[telid].camera.geometry
            self.c_intensity = CameraDisplay(geom, ax=self.ax_intensity)

            time_cmap = copy(plt.get_cmap("RdBu_r"))
            time_cmap.set_under("gray")
            time_cmap.set_over("gray")
            self.c_peak_time = CameraDisplay(geom,
                                             ax=self.ax_peak_time,
                                             cmap=time_cmap)

            if not self.cb_intensity:
                self.c_intensity.add_colorbar(ax=self.ax_intensity,
                                              label="Intensity (p.e.)")
                self.cb_intensity = self.c_intensity.colorbar
            else:
                self.c_intensity.colorbar = self.cb_intensity
                self.c_intensity.update(True)
            if not self.cb_peak_time:
                self.c_peak_time.add_colorbar(ax=self.ax_peak_time,
                                              label="Pulse Time (ns)")
                self.cb_peak_time = self.c_peak_time.colorbar
            else:
                self.c_peak_time.colorbar = self.cb_peak_time
                self.c_peak_time.update(True)

        self.c_intensity.image = image
        self.c_peak_time.image = peak_time

        # center around brightes pixel, show 10ns total
        t_chargemax = peak_time[image.argmax()]
        self.c_peak_time.set_limits_minmax(t_chargemax - 5, t_chargemax + 5)

        self.fig.suptitle("Event_index={}  Event_id={}  Telescope={}".format(
            event.count, event.index.event_id, telid))

        if self.display:
            plt.pause(0.001)

        if self.pdf is not None:
            self.pdf.savefig(self.fig)

    def finish(self):
        if self.pdf is not None:
            self.log.info("Closing PDF")
            self.pdf.close()
コード例 #3
0
class FitIntensityScan(Tool):
    """
     Tool that generates a HDF5 file with the results of the fit
     of the signal of an intensity scan (filter scan in the case of LST), this is useful to estimate the
     quadratic noise term to include in the standard F-factor formula

     To be run with
     lstchain_fit_intensity_scan --config config.json

     """

    name = "FitFilterScan"
    description = "Tool to fit an intensity scan"

    signal_range = List(
        [[1500, 14000], [200, 14000]],
        help='Signal range to include in the fit for [HG,LG] (camera median in [ADC])'
    ).tag(config=True)

    gain_channels = List(
        [0, 1],
        help='Gain channel to process (HG=0, LG=1)'
    ).tag(config=True)

    sub_run = Int(
        0,
        help='Sub run number to process'
    ).tag(config=True)

    run_list = List(
        help='List of runs',
    ).tag(config=True)

    input_dir = traits.Path(
        directory_ok=True,
        help='directory with the input files',
    ).tag(config=True)

    input_prefix = traits.Unicode(
        default_value="calibration",
        help='Prefix to select calibration files to fit',
    ).tag(config=True)

    output_path = traits.Path(
        directory_ok=False, default_value="filter_scan_fit.h5",
        help='Path the output hdf5 file',
    ).tag(config=True)

    plot_path = traits.Path(
        directory_ok=False, default_value="filter_scan_fit.pdf",
        help='Path to pdf file with check plots',
    ).tag(config=True)

    fit_initialization = List(
        [[100.0, 0.001], [6.0, 0.001]],
        help='Fit parameters initalization [gain (ADC/pe), B term] for HG and LG'
    ).tag(config=True)

    fractional_variance_error = Float(
        0.02,
        help='Constant fractional error assumed for the y fit coordinate (variance)'
    ).tag(config=True)

    squared_excess_noise_factor = Float(
        1.222,
        help='Excess noise factor squared: 1+ Var(gain)/Mean(Gain)**2'
    ).tag(config=True)

    aliases = Dict(dict(
        signal_range='FitIntensityScan.signal_range',
        input_dir='FitIntensityScan.input_dir',
        output_path='FitIntensityScan.output_path',
        plot_path='FitIntensityScan.plot_path',
        sub_run='FitIntensityScan.sub_run',
        gain_channels='FitIntensityScan.gain_channels',
        run_list='FitIntensityScan.run_list',
        input_prefix='FitIntensityScan.input_prefix',
    ))

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        """
         For getting help run:
         python calc_camera_calibration.py --help
         
        """
        for chan in self.gain_channels:
            if not self.signal_range[chan]:
                raise ValueError(f"Trailet signal_range {self.signal_range} inconsistent with"
                                 f"trailet {self.gain_channels}. \n")

        self.unusable_pixels = [None, None]
        self.signal = [None, None]
        self.variance = [None, None]
        self.selected_runs = [[], []]
        self.fit_parameters = np.zeros((constants.N_GAINS, constants.N_PIXELS, 2))
        self.fit_cov_matrix = np.zeros((constants.N_GAINS, constants.N_PIXELS, 4))
        self.fit_error = np.zeros((constants.N_GAINS, constants.N_PIXELS))

    def setup(self):

        ff_data = FlatFieldContainer()
        ped_data = PedestalContainer()
        calib_data = WaveformCalibrationContainer()
        channel = ["HG", "LG"]

        # loop on runs and memorize data
        try:
            for i, run in enumerate(self.run_list):

                file_list = sorted(
                    Path(f"{self.input_dir}").rglob(f'{self.input_prefix}*.Run{run:05d}.{self.sub_run:04d}.h5'))

                if len(file_list) == 0:
                    raise IOError(f"Input file for run {run} do not found. \n")

                if len(file_list) > 1:
                    raise IOError(f"Input file for run {run} is more than one: {file_list} \n")

                inp_file = file_list[0]
                if os.path.getsize(inp_file) < 100:
                    raise IOError(f"file size run {run} is too short \n")

                if read_calibration_file(inp_file, ff_data, calib_data, ped_data):
                    self.log.debug(f"Read file {inp_file}")
                    for chan in self.gain_channels:
                        # verify that the median signal is inside the asked range
                        median_charge = np.median(ff_data.charge_median[chan])

                        if median_charge > self.signal_range[chan][1] or median_charge < self.signal_range[chan][0]:
                            self.log.debug(
                                f"{channel[chan]}: skip run {run}, signal out of range {median_charge:6.1f} ADC")
                            continue

                        signal = ff_data.charge_median[chan] - ped_data.charge_median[chan]
                        variance = ff_data.charge_std[chan] ** 2 - ped_data.charge_std[chan] ** 2

                        if self.signal[chan] is None:
                            self.signal[chan] = signal
                            self.variance[chan] = variance
                            self.unusable_pixels[chan] = calib_data.unusable_pixels[chan]

                        else:
                            self.signal[chan] = np.column_stack((self.signal[chan], signal))
                            self.variance[chan] = np.column_stack((self.variance[chan], variance))
                            self.unusable_pixels[chan] = np.column_stack(
                                (self.unusable_pixels[chan], calib_data.unusable_pixels[chan]))
                        self.selected_runs[chan].append(run)
                        self.log.info(f"{channel[chan]}: select run {run}, median charge {median_charge:6.1f} ADC\n")
                else:
                    raise IOError(f"--> Problem in reading {run}\n")

            # check to have enough selected runs
            for chan in self.gain_channels:
                if self.signal[chan] is None:
                    raise IOError(f"--> Zero runs selected for channel {channel[chan]} \n")

                if self.signal[chan].size < MIN_N_RUNS * constants.N_PIXELS:
                    raise IOError(
                        f"--> Not enough runs selected for channel {channel[chan]}: {int(self.signal[chan].size / constants.N_PIXELS)} runs \n")

        except ValueError as e:
            self.log.error(e)

    def start(self):
        '''loop to fit each pixel '''

        # only positive parameters
        bounds = [0, 200]

        funfit = partial(quadratic_fit, f2=self.squared_excess_noise_factor)

        for pix in np.arange(constants.N_PIXELS):

            if pix % 100 == 0:
                self.log.debug(f"Pixel {pix}")

            # loop over channel
            for chan in self.gain_channels:

                # fit parameters initialization
                p0 = np.array(self.fit_initialization[chan])

                mask = self.unusable_pixels[chan][pix]
                sig = np.ma.array(self.signal[chan][pix], mask=mask).compressed()
                var = np.ma.array(self.variance[chan][pix], mask=mask).compressed()

                # skip the pixel if not enough data
                if sig.shape[0] < MIN_N_RUNS:
                    self.log.debug(
                        f"Not enough data in pixel {pix} and channel {chan} for the fit ({sig.shape[0]} runs)\n")
                    self.fit_error[chan, pix] = 1
                    continue

                # we assume a constant fractional error
                sigma = self.fractional_variance_error * var

                try:
                    par, par_cov = curve_fit(funfit, sig, var, bounds=bounds, sigma=sigma, p0=p0)
                    self.fit_parameters[chan, pix] = par
                    self.fit_cov_matrix[chan, pix] = par_cov.reshape(4)

                except Exception as e:

                    self.log.error(e)
                    self.log.error(f"Error for pixel {pix} and channel {chan}:\n")
                    self.log.error(f"signal {sig}\n")
                    self.log.error(f"variance {var}\n")

                    self.fit_error[chan, pix] = 1

    def finish(self):
        """
        write fit results in h5 file and the check-plots in pdf file
        """

        gain = np.ma.array(self.fit_parameters.T[0], mask=self.fit_error.T)
        quadratic_term = np.ma.array(self.fit_parameters.T[1], mask=self.fit_error.T)

        # give to the badly fitted pixel a median value for the B term
        median_quadratic_term = np.ma.median(quadratic_term, axis=0)

        fill_array = np.ones((constants.N_PIXELS, constants.N_GAINS)) * median_quadratic_term

        quadratic_term_corrected = np.ma.filled(quadratic_term, fill_array)

        with h5py.File(self.output_path, 'w') as hf:
            hf.create_dataset('gain', data=gain.T)
            hf.create_dataset('B_term', data=quadratic_term_corrected.T)
            hf.create_dataset('covariance_matrix', data=self.fit_cov_matrix)
            hf.create_dataset('bad_fit_mask', data=self.fit_error)

            # remember the camera median and the variance per run
            channel = ["HG", "LG"]
            for chan in [0, 1]:
                if self.signal[chan] is not None:
                    hf.create_dataset(f'median_signal_{channel[chan]}', data=np.median(self.signal[chan], axis=0))
                    hf.create_dataset(f'median_variance_{channel[chan]}', data=np.median(self.variance[chan], axis=0))
                    hf.create_dataset(f'runs_{channel[chan]}', data=self.selected_runs[chan])

            hf.create_dataset('runs', data=self.run_list)
            hf.create_dataset('sub_run', data=self.sub_run)

            # plot open pdf
            with PdfPages(self.plot_path) as pdf:
                plt.rc("font", size=15)

                for chan in self.gain_channels:
                    # plot the used runs and their median camera charge
                    fig = plt.figure((chan + 1), figsize=(8, 20))
                    fig.suptitle(f"{channel[chan]} channel", fontsize=25)
                    ax = plt.subplot(2, 1, 1)
                    ax.grid(True)
                    ax.yaxis.set_major_locator(MaxNLocator(integer=True))
                    ax.xaxis.set_major_locator(MaxNLocator(integer=True))
                    ax.yaxis.set_major_locator(plt.MultipleLocator(1))

                    plt.plot(np.median(self.signal[chan], axis=0), self.selected_runs[chan], "o")
                    plt.xlabel(r'$\mathrm{\overline{Q}-\overline{ped}}$ [ADC]')
                    plt.ylabel(r'Runs used in the fit')

                    plt.subplot(2, 1, 2)
                    camera = load_camera_geometry()
                    camera = camera.transform_to(EngineeringCameraFrame())
                    disp = CameraDisplay(camera)
                    image = self.fit_parameters.T[1].T * 100
                    mymin = np.median(image[chan]) - 3 * np.std(image[chan])
                    mymax = np.median(image[chan]) + 3 * np.std(image[chan])
                    disp.set_limits_minmax(mymin, mymax)
                    mask = np.where(self.fit_error[chan] == 1)[0]
                    disp.highlight_pixels(mask, linewidth=2.5, color="green")
                    disp.image = image[chan]
                    disp.cmap = plt.cm.coolwarm
                    plt.title(f"{channel[chan]} Fitted B values [%]")
                    disp.add_colorbar()
                    plt.tight_layout()
                    pdf.savefig()

                    # plot the fit results and residuals for four arbitrary  pixels
                    fig = plt.figure((chan + 1) * 10, figsize=(11, 22))
                    fig.suptitle(f"{channel[chan]} channel", fontsize=25)

                    pad = 0
                    for pix in [0, 600, 1200, 1800]:
                        pad += 1
                        plt.subplot(4, 2, pad)
                        plt.grid(which='minor')

                        mask = self.unusable_pixels[chan][pix]
                        sig = np.ma.array(self.signal[chan][pix], mask=mask).compressed()
                        var = np.ma.array(self.variance[chan][pix], mask=mask).compressed()
                        popt = self.fit_parameters[chan, pix]

                        # plot points
                        plt.plot(sig, var, 'o', color="C0")

                        # plot fit
                        min_x = min(1000, np.min(sig) * 0.9)
                        max_x = max(10000, np.max(sig) * 1.1)
                        x = np.arange(np.min(sig), np.max(sig))

                        plt.plot(x, quadratic_fit(x, *popt), '--', color="C1",
                                 label=f'Pixel {pix}:\ng={popt[0]:5.2f} [ADC/pe] , B={popt[1]:5.3f}')
                        plt.xlim(min_x, max_x)
                        plt.xlabel('Q-ped [ADC]')
                        plt.ylabel(r'$\mathrm{\sigma_Q^2-\sigma_{ped}^2}$ [$ADC^2$]')
                        plt.xscale('log')
                        plt.yscale('log')
                        plt.legend()

                        # plot residuals
                        pad += 1
                        plt.subplot(4, 2, pad)
                        plt.grid(which='both', axis='both')

                        popt = self.fit_parameters[chan, pix]
                        plt.plot(sig, (quadratic_fit(sig, *popt) - var) / var * 100, 'o', color="C0")
                        plt.xlim(min_x, max_x)
                        plt.xscale('log')
                        plt.ylabel('fit residuals %')
                        plt.xlabel('Q-ped [ADC]')
                        plt.hlines(0, 0, np.max(sig), linestyle='dashed', color="black")

                    plt.tight_layout()
                    pdf.savefig()
コード例 #4
0
class IRFFITSWriter(Tool):
    name = "IRFFITSWriter"
    description = __doc__
    example = """
    To generate IRFs from MC gamma only, using default cuts/binning:
    > lstchain_create_irf_files
        -g /path/to/DL2_MC_gamma_file.h5
        -o /path/to/irf.fits.gz
        --point-like (Only for point_like IRFs)
        --overwrite

    Or to generate all 4 IRFs, using default cuts/binning:
    > lstchain_create_irf_files
        -g /path/to/DL2_MC_gamma_file.h5
        -p /path/to/DL2_MC_proton_file.h5
        -e /path/to/DL2_MC_electron_file.h5
        -o /path/to/irf.fits.gz
        --point-like (Only for point_like IRFs)

    Or use a config file for cuts and binning information:
    > lstchain_create_irf_files
        -g /path/to/DL2_MC_gamma_file.h5
        -o /path/to/irf.fits.gz
        --point-like (Only for point_like IRFs)
        --config /path/to/config.json

    Or pass the selection cuts from command-line:
    > lstchain_create_irf_files
        -g /path/to/DL2_MC_gamma_file.h5
        -o /path/to/irf.fits.gz
        --point-like (Only for point_like IRFs)
        --global-gh-cut 0.9
        --global-theta-cut 0.2
        --irf-obs-time 50

    Or use energy-dependent cuts based on a gamma efficiency:
    > lstchain_create_irf_files
        -g /path/to/DL2_MC_gamma_file.h5
        -o /path/to/irf.fits.gz
        --point-like (Only for point_like IRFs)
        --energy-dependent-gh
        --energy-dependent-theta
        --gh-efficiency 0.95
        --theta-containment 0.68

    Or generate source-dependent IRFs
    > lstchain_create_irf_files
        -g /path/to/DL2_MC_gamma_file.h5
        -o /path/to/irf.fits.gz
        --point-like
        --global-gh-cut 0.9
        --global-alpha-cut 10
        --source-dep

    """

    input_gamma_dl2 = traits.Path(help="Input MC gamma DL2 file",
                                  allow_none=True,
                                  exists=True,
                                  directory_ok=False,
                                  file_ok=True).tag(config=True)

    input_proton_dl2 = traits.Path(help="Input MC proton DL2 file",
                                   allow_none=True,
                                   exists=True,
                                   directory_ok=False,
                                   file_ok=True).tag(config=True)

    input_electron_dl2 = traits.Path(help="Input MC electron DL2 file",
                                     allow_none=True,
                                     exists=True,
                                     directory_ok=False,
                                     file_ok=True).tag(config=True)

    output_irf_file = traits.Path(
        help="IRF output file",
        allow_none=True,
        directory_ok=False,
        file_ok=True,
        default_value="./irf.fits.gz",
    ).tag(config=True)

    irf_obs_time = traits.Float(
        help="Observation time for IRF in hours",
        default_value=50,
    ).tag(config=True)

    point_like = traits.Bool(
        help="True for point_like IRF, False for Full Enclosure",
        default_value=False,
    ).tag(config=True)

    energy_dependent_gh = traits.Bool(
        help="True for applying energy-dependent gammaness cuts",
        default_value=False,
    ).tag(config=True)

    energy_dependent_theta = traits.Bool(
        help="True for applying energy-dependent theta cuts",
        default_value=False,
    ).tag(config=True)

    energy_dependent_alpha = traits.Bool(
        help="True for applying energy-dependent alpha cuts",
        default_value=False,
    ).tag(config=True)

    overwrite = traits.Bool(
        help="If True, overwrites existing output file without asking",
        default_value=False,
    ).tag(config=True)

    source_dep = traits.Bool(
        help="True for source-dependent analysis",
        default_value=False,
    ).tag(config=True)

    classes = [EventSelector, DL3Cuts, DataBinning]

    aliases = {
        ("g", "input-gamma-dl2"): "IRFFITSWriter.input_gamma_dl2",
        ("p", "input-proton-dl2"): "IRFFITSWriter.input_proton_dl2",
        ("e", "input-electron-dl2"): "IRFFITSWriter.input_electron_dl2",
        ("o", "output-irf-file"): "IRFFITSWriter.output_irf_file",
        "irf-obs-time": "IRFFITSWriter.irf_obs_time",
        "global-gh-cut": "DL3Cuts.global_gh_cut",
        "gh-efficiency": "DL3Cuts.gh_efficiency",
        "theta-containment": "DL3Cuts.theta_containment",
        "global-theta-cut": "DL3Cuts.global_theta_cut",
        "alpha-containment": "DL3Cuts.alpha_containment",
        "global-alpha-cut": "DL3Cuts.global_alpha_cut",
        "allowed-tels": "DL3Cuts.allowed_tels",
        "overwrite": "IRFFITSWriter.overwrite",
    }

    flags = {
        "point-like": (
            {
                "IRFFITSWriter": {
                    "point_like": True
                }
            },
            "Point like IRFs will be produced, otherwise Full Enclosure",
        ),
        "overwrite": (
            {
                "IRFFITSWriter": {
                    "overwrite": True
                }
            },
            "overwrites output file",
        ),
        "source-dep": (
            {
                "IRFFITSWriter": {
                    "source_dep": True
                }
            },
            "Source-dependent analysis will be performed",
        ),
        "energy-dependent-gh": (
            {
                "IRFFITSWriter": {
                    "energy_dependent_gh": True
                }
            },
            "Uses energy-dependent cuts for gammaness",
        ),
        "energy-dependent-theta": (
            {
                "IRFFITSWriter": {
                    "energy_dependent_theta": True
                }
            },
            "Uses energy-dependent cuts for theta",
        ),
        "energy-dependent-alpha": (
            {
                "IRFFITSWriter": {
                    "energy_dependent_alpha": True
                }
            },
            "Uses energy-dependent cuts for alpha",
        ),
    }

    def setup(self):

        if self.output_irf_file.absolute().exists():
            if self.overwrite:
                self.log.warning(f"Overwriting {self.output_irf_file}")
                self.output_irf_file.unlink()
            else:
                raise ToolConfigurationError(
                    f"Output file {self.output_irf_file} already exists,"
                    " use --overwrite to overwrite")

        filename = self.output_irf_file.name
        if not (filename.endswith('.fits') or filename.endswith('.fits.gz')):
            raise ValueError(
                f"{filename} is not a correct compressed FITS file name"
                "(use .fits or .fits.gz).")

        if self.input_proton_dl2 and self.input_electron_dl2 is not Undefined:
            self.only_gamma_irf = False
        else:
            self.only_gamma_irf = True

        self.event_sel = EventSelector(parent=self)
        self.cuts = DL3Cuts(parent=self)
        self.data_bin = DataBinning(parent=self)

        self.mc_particle = {
            "gamma": {
                "file": self.input_gamma_dl2,
                "target_spectrum": CRAB_MAGIC_JHEAP2015,
            },
        }
        Provenance().add_input_file(self.input_gamma_dl2)

        self.t_obs = self.irf_obs_time * u.hour

        # Read and update MC information
        if not self.only_gamma_irf:
            self.mc_particle["proton"] = {
                "file": self.input_proton_dl2,
                "target_spectrum": IRFDOC_PROTON_SPECTRUM,
            }

            self.mc_particle["electron"] = {
                "file": self.input_electron_dl2,
                "target_spectrum": IRFDOC_ELECTRON_SPECTRUM,
            }

            Provenance().add_input_file(self.input_proton_dl2)
            Provenance().add_input_file(self.input_electron_dl2)

        self.provenance_log = self.output_irf_file.parent / (self.name +
                                                             ".provenance.log")

    def start(self):

        for particle_type, p in self.mc_particle.items():
            self.log.info(f"Simulated {particle_type.title()} Events:")
            p["events"], p["simulation_info"] = read_mc_dl2_to_QTable(
                p["file"])

            p["mc_type"] = check_mc_type(p["file"])

            self.log.debug(
                f"Simulated {p['mc_type']} {particle_type.title()} Events:")

            # Calculating event weights for Background IRF
            if particle_type != "gamma":
                p["simulated_spectrum"] = PowerLaw.from_simulation(
                    p["simulation_info"], self.t_obs)

                p["events"]["weight"] = calculate_event_weights(
                    p["events"]["true_energy"],
                    p["target_spectrum"],
                    p["simulated_spectrum"],
                )

            if not self.source_dep:
                for prefix in ("true", "reco"):
                    k = f"{prefix}_source_fov_offset"
                    p["events"][k] = calculate_source_fov_offset(p["events"],
                                                                 prefix=prefix)

                # calculate theta / distance between reco and assumed source position
                p["events"]["theta"] = calculate_theta(
                    p["events"],
                    assumed_source_az=p["events"]["true_az"],
                    assumed_source_alt=p["events"]["true_alt"],
                )

            else:
                # Alpha cut is applied for source-dependent analysis.
                # To adapt source-dependent analysis to pyirf codes,
                # true position is set as reco position for survived events
                # after alpha cut
                p["events"][
                    "true_source_fov_offset"] = calculate_source_fov_offset(
                        p["events"], prefix="true")
                p["events"]["reco_source_fov_offset"] = p["events"][
                    "true_source_fov_offset"]

        self.log.debug(p["simulation_info"])
        gammas = self.mc_particle["gamma"]["events"]

        # Binning of parameters used in IRFs
        true_energy_bins = self.data_bin.true_energy_bins()
        reco_energy_bins = self.data_bin.reco_energy_bins()
        migration_bins = self.data_bin.energy_migration_bins()
        source_offset_bins = self.data_bin.source_offset_bins()

        gammas = self.event_sel.filter_cut(gammas)
        gammas = self.cuts.allowed_tels_filter(gammas)

        if self.energy_dependent_gh:
            self.gh_cuts_gamma = self.cuts.energy_dependent_gh_cuts(
                gammas, reco_energy_bins)
            gammas = self.cuts.apply_energy_dependent_gh_cuts(
                gammas, self.gh_cuts_gamma)
            self.log.info(
                f"Using gamma efficiency of {self.cuts.gh_efficiency}")
        else:
            gammas = self.cuts.apply_global_gh_cut(gammas)
            self.log.info("Using a global gammaness cut of "
                          f"{self.cuts.global_gh_cut}")

        if self.point_like:
            if not self.source_dep:
                if self.energy_dependent_theta:
                    self.theta_cuts = self.cuts.energy_dependent_theta_cuts(
                        gammas,
                        reco_energy_bins,
                    )
                    gammas = self.cuts.apply_energy_dependent_theta_cuts(
                        gammas, self.theta_cuts)
                    self.log.info("Using a containment region for theta of "
                                  f"{self.cuts.theta_containment}")
                else:
                    gammas = self.cuts.apply_global_theta_cut(gammas)
                    self.log.info(
                        "Using a global Theta cut of "
                        f"{self.cuts.global_theta_cut} for point-like IRF")
            else:
                if self.energy_dependent_alpha:
                    self.alpha_cuts = self.cuts.energy_dependent_alpha_cuts(
                        gammas,
                        reco_energy_bins,
                    )
                    gammas = self.cuts.apply_energy_dependent_alpha_cuts(
                        gammas, self.alpha_cuts)
                    self.log.info("Using a containment region for alpha of "
                                  f"{self.cuts.alpha_containment} %")
                else:
                    gammas = self.cuts.apply_global_alpha_cut(gammas)
                    self.log.info(
                        'Using a global Alpha cut of '
                        f'{self.cuts.global_alpha_cut} for point like IRF')

        if self.mc_particle["gamma"]["mc_type"] in [
                "point_like", "ring_wobble"
        ]:
            mean_fov_offset = round(
                gammas["true_source_fov_offset"].mean().to_value(), 1)
            fov_offset_bins = [mean_fov_offset - 0.1, mean_fov_offset + 0.1
                               ] * u.deg
            self.log.info('Single offset for point like gamma MC')
        else:
            fov_offset_bins = self.data_bin.fov_offset_bins()
            self.log.info('Multiple offset for diffuse gamma MC')

            if self.energy_dependent_theta:
                fov_offset_bins = [
                    round(gammas["true_source_fov_offset"].min().to_value(),
                          1),
                    round(gammas["true_source_fov_offset"].max().to_value(), 1)
                ] * u.deg
                self.log.info("For RAD MAX, the full FoV is used")

        if not self.only_gamma_irf:
            background = table.vstack([
                self.mc_particle["proton"]["events"],
                self.mc_particle["electron"]["events"]
            ])

            if self.energy_dependent_gh:
                background = self.cuts.apply_energy_dependent_gh_cuts(
                    background, self.gh_cuts_gamma)
            else:
                background = self.cuts.apply_global_gh_cut(background)

            background = self.event_sel.filter_cut(background)
            background = self.cuts.allowed_tels_filter(background)

            background_offset_bins = self.data_bin.bkg_fov_offset_bins()

        # For a global gh/theta cut, only a header value is added.
        # For energy-dependent cuts, along with GADF specified RAD_MAX HDU,
        # a new HDU is created, GH_CUTS which is based on RAD_MAX table

        # NOTE: The GH_CUTS HDU is just for provenance and is not supported
        # by GADF or used by any Science Tools
        extra_headers = {
            "TELESCOP": "CTA-N",
            "INSTRUME": "LST-" + " ".join(map(str, self.cuts.allowed_tels)),
            "FOVALIGN": "RADEC",
        }
        if self.point_like:
            self.log.info("Generating point_like IRF HDUs")
        else:
            self.log.info("Generating Full-Enclosure IRF HDUs")

        # Updating the HDU headers with the gammaness and theta cuts/efficiency
        if not self.energy_dependent_gh:
            extra_headers["GH_CUT"] = self.cuts.global_gh_cut

        else:
            extra_headers["GH_EFF"] = (self.cuts.gh_efficiency,
                                       "gamma/hadron efficiency")

        if self.point_like:
            if not self.source_dep:
                if self.energy_dependent_theta:
                    extra_headers["TH_CONT"] = (
                        self.cuts.theta_containment,
                        "Theta containment region in percentage")
                else:
                    extra_headers["RAD_MAX"] = (self.cuts.global_theta_cut,
                                                'deg')
            else:
                if self.energy_dependent_alpha:
                    extra_headers["AL_CONT"] = (
                        self.cuts.alpha_containment,
                        "Alpha containment region in percentage")
                else:
                    extra_headers["AL_CUT"] = (self.cuts.global_alpha_cut,
                                               'deg')

        # Write HDUs
        self.hdus = [
            fits.PrimaryHDU(),
        ]

        with np.errstate(invalid="ignore", divide="ignore"):
            if self.mc_particle["gamma"]["mc_type"] in [
                    "point_like", "ring_wobble"
            ]:
                self.effective_area = effective_area_per_energy(
                    gammas,
                    self.mc_particle["gamma"]["simulation_info"],
                    true_energy_bins,
                )
                self.hdus.append(
                    create_aeff2d_hdu(
                        # add one dimension for single FOV offset
                        self.effective_area[..., np.newaxis],
                        true_energy_bins,
                        fov_offset_bins,
                        point_like=self.point_like,
                        extname="EFFECTIVE AREA",
                        **extra_headers,
                    ))
            else:
                self.effective_area = effective_area_per_energy_and_fov(
                    gammas,
                    self.mc_particle["gamma"]["simulation_info"],
                    true_energy_bins,
                    fov_offset_bins,
                )
                self.hdus.append(
                    create_aeff2d_hdu(
                        self.effective_area,
                        true_energy_bins,
                        fov_offset_bins,
                        point_like=self.point_like,
                        extname="EFFECTIVE AREA",
                        **extra_headers,
                    ))

        self.log.info("Effective Area HDU created")
        self.edisp = energy_dispersion(
            gammas,
            true_energy_bins,
            fov_offset_bins,
            migration_bins,
        )
        self.hdus.append(
            create_energy_dispersion_hdu(
                self.edisp,
                true_energy_bins,
                migration_bins,
                fov_offset_bins,
                point_like=self.point_like,
                extname="ENERGY DISPERSION",
                **extra_headers,
            ))
        self.log.info("Energy Dispersion HDU created")

        if not self.only_gamma_irf:
            self.background = background_2d(
                background,
                reco_energy_bins=reco_energy_bins,
                fov_offset_bins=background_offset_bins,
                t_obs=self.t_obs,
            )
            self.hdus.append(
                create_background_2d_hdu(
                    self.background.T,
                    reco_energy_bins,
                    background_offset_bins,
                    extname="BACKGROUND",
                    **extra_headers,
                ))
            self.log.info("Background HDU created")

        if not self.point_like:
            self.psf = psf_table(
                gammas,
                true_energy_bins,
                fov_offset_bins=fov_offset_bins,
                source_offset_bins=source_offset_bins,
            )
            self.hdus.append(
                create_psf_table_hdu(
                    self.psf,
                    true_energy_bins,
                    source_offset_bins,
                    fov_offset_bins,
                    extname="PSF",
                    **extra_headers,
                ))
            self.log.info("PSF HDU created")

        if self.energy_dependent_gh:
            # Create a separate temporary header
            gh_header = fits.Header()
            gh_header["CREATOR"] = f"lstchain v{__version__}"
            gh_header["DATE"] = Time.now().utc.iso

            for k, v in extra_headers.items():
                gh_header[k] = v

            self.hdus.append(
                fits.BinTableHDU(self.gh_cuts_gamma,
                                 header=gh_header,
                                 name="GH_CUTS"))
            self.log.info("GH CUTS HDU added")

        if self.energy_dependent_theta and self.point_like:
            if not self.source_dep:
                self.hdus.append(
                    create_rad_max_hdu(self.theta_cuts["cut"][:, np.newaxis],
                                       reco_energy_bins, fov_offset_bins,
                                       **extra_headers))
                self.log.info("RAD MAX HDU added")

        if self.energy_dependent_alpha and self.source_dep:
            # Create a separate temporary header
            alpha_header = fits.Header()
            alpha_header["CREATOR"] = f"lstchain v{__version__}"
            alpha_header["DATE"] = Time.now().utc.iso

            for k, v in extra_headers.items():
                alpha_header[k] = v

            self.hdus.append(
                fits.BinTableHDU(self.alpha_cuts,
                                 header=gh_header,
                                 name="AL_CUTS"))
            self.log.info("ALPHA CUTS HDU added")

    def finish(self):

        fits.HDUList(self.hdus).writeto(self.output_irf_file,
                                        overwrite=self.overwrite)
        Provenance().add_output_file(self.output_irf_file)
コード例 #5
0
class DataReductionFITSWriter(Tool):
    name = "DataReductionFITSWriter"
    description = __doc__
    example = """
    To generate DL3 file from an observed data DL2 file, using default cuts:
    > lstchain_create_dl3_file
        -d /path/to/DL2_data_file.h5
        -o /path/to/DL3/file/
        --input-irf /path/to/irf.fits.gz
        --source-name Crab
        --source-ra 83.633deg
        --source-dec 22.01deg

    Or use a config file for the cuts:
    > lstchain_create_dl3_file
        -d /path/to/DL2_data_file.h5
        -o /path/to/DL3/file/
        --input-irf /path/to/irf.fits.gz
        --source-name Crab
        --source-ra 83.633deg
        --source-dec 22.01deg
        --overwrite
        --config /path/to/config.json

    Or pass the selection cuts from command-line:
    > lstchain_create_dl3_file
        -d /path/to/DL2_data_file.h5
        -o /path/to/DL3/file/
        --input-irf /path/to/irf.fits.gz
        --source-name Crab
        --source-ra 83.633deg
        --source-dec 22.01deg
        --global-gh-cut 0.9
        --overwrite

    Or generate source-dependent DL3 files
    > lstchain_create_dl3_file
        -d /path/to/DL2_data_file.h5
        -o /path/to/DL3/file/
        --input-irf /path/to/irf.fits.gz
        --source-name Crab
        --source-dep
        --overwrite
    """

    input_dl2 = traits.Path(help="Input data DL2 file",
                            exists=True,
                            directory_ok=False,
                            file_ok=True).tag(config=True)

    output_dl3_path = traits.Path(help="DL3 output filedir",
                                  directory_ok=True,
                                  file_ok=False).tag(config=True)

    input_irf = traits.Path(
        help="Compressed FITS file of IRFs",
        exists=True,
        directory_ok=False,
        file_ok=True,
    ).tag(config=True)

    source_name = traits.Unicode(help="Name of Source").tag(config=True)

    source_ra = traits.Unicode(help="RA position of the source").tag(
        config=True)

    source_dec = traits.Unicode(help="DEC position of the source").tag(
        config=True)

    overwrite = traits.Bool(
        help="If True, overwrites existing output file without asking",
        default_value=False,
    ).tag(config=True)

    source_dep = traits.Bool(
        help="If True, source-dependent analysis will be performed.",
        default_value=False,
    ).tag(config=True)

    classes = [EventSelector, DL3Cuts]

    aliases = {
        ("d", "input-dl2"): "DataReductionFITSWriter.input_dl2",
        ("o", "output-dl3-path"): "DataReductionFITSWriter.output_dl3_path",
        "input-irf": "DataReductionFITSWriter.input_irf",
        "global-gh-cut": "DL3Cuts.global_gh_cut",
        "source-name": "DataReductionFITSWriter.source_name",
        "source-ra": "DataReductionFITSWriter.source_ra",
        "source-dec": "DataReductionFITSWriter.source_dec",
    }

    flags = {
        "overwrite": (
            {
                "DataReductionFITSWriter": {
                    "overwrite": True
                }
            },
            "overwrite output file if True",
        ),
        "source-dep": (
            {
                "DataReductionFITSWriter": {
                    "source_dep": True
                }
            },
            "source-dependent analysis if True",
        ),
    }

    def setup(self):

        self.filename_dl3 = dl2_to_dl3_filename(self.input_dl2)
        self.provenance_log = self.output_dl3_path / (self.name +
                                                      ".provenance.log")

        Provenance().add_input_file(self.input_dl2)

        self.event_sel = EventSelector(parent=self)
        self.cuts = DL3Cuts(parent=self)

        self.output_file = self.output_dl3_path.absolute() / self.filename_dl3
        if self.output_file.exists():
            if self.overwrite:
                self.log.warning(f"Overwriting {self.output_file}")
                self.output_file.unlink()
            else:
                raise ToolConfigurationError(
                    f"Output file {self.output_file} already exists,"
                    " use --overwrite to overwrite")
        if not (self.source_ra or self.source_dec):
            self.source_pos = SkyCoord.from_name(self.source_name)
        elif bool(self.source_ra) != bool(self.source_dec):
            raise ToolConfigurationError(
                "Either provide both RA and DEC values for the source or none")
        else:
            self.source_pos = SkyCoord(ra=self.source_ra, dec=self.source_dec)

        self.log.debug(f"Output DL3 file: {self.output_file}")

        try:
            with fits.open(self.input_irf) as hdul:
                self.use_energy_dependent_cuts = (
                    "GH_CUT" not in hdul["EFFECTIVE AREA"].header)
        except:
            raise ToolConfigurationError(
                f"{self.input_irf} does not have EFFECTIVE AREA HDU, "
                " to check for global cut information in the Header value")

    def apply_srcindep_gh_cut(self):
        ''' apply gammaness cut '''
        self.data = self.event_sel.filter_cut(self.data)

        if self.use_energy_dependent_cuts:
            self.energy_dependent_gh_cuts = QTable.read(self.input_irf,
                                                        hdu="GH_CUTS")

            self.data = self.cuts.apply_energy_dependent_gh_cuts(
                self.data, self.energy_dependent_gh_cuts)
            self.log.info("Using gamma efficiency of "
                          f"{self.energy_dependent_gh_cuts.meta['GH_EFF']}")
        else:
            with fits.open(self.input_irf) as hdul:
                self.cuts.global_gh_cut = hdul[1].header["GH_CUT"]
            self.data = self.cuts.apply_global_gh_cut(self.data)
            self.log.info(f"Using global G/H cut of {self.cuts.global_gh_cut}")

    def apply_srcdep_gh_alpha_cut(self):
        ''' apply gammaness and alpha cut for source-dependent analysis '''
        srcdep_assumed_positions = get_srcdep_assumed_positions(self.input_dl2)

        for i, srcdep_pos in enumerate(srcdep_assumed_positions):
            data_temp = read_data_dl2_to_QTable(self.input_dl2,
                                                srcdep_pos=srcdep_pos)

            data_temp = self.event_sel.filter_cut(data_temp)

            if self.use_energy_dependent_cuts:
                self.energy_dependent_gh_cuts = QTable.read(self.input_irf,
                                                            hdu="GH_CUTS")

                data_temp = self.cuts.apply_energy_dependent_gh_cuts(
                    data_temp, self.energy_dependent_gh_cuts)
            else:
                with fits.open(self.input_irf) as hdul:
                    self.cuts.global_gh_cut = hdul[1].header["GH_CUT"]
                data_temp = self.cuts.apply_global_gh_cut(data_temp)

            with fits.open(self.input_irf) as hdul:
                self.cuts.global_alpha_cut = hdul[1].header["AL_CUT"]
            data_temp = self.cuts.apply_global_alpha_cut(data_temp)

            # set expected source positions as reco positions
            set_expected_pos_to_reco_altaz(data_temp)

            if i == 0:
                self.data = data_temp
            else:
                self.data = vstack([self.data, data_temp])

    def start(self):

        if not self.source_dep:
            self.data = read_data_dl2_to_QTable(self.input_dl2)
        else:
            self.data = read_data_dl2_to_QTable(self.input_dl2, 'on')
        self.effective_time, self.elapsed_time = get_effective_time(self.data)
        self.run_number = run_info_from_filename(self.input_dl2)[1]

        if not self.source_dep:
            self.apply_srcindep_gh_cut()
        else:
            self.apply_srcdep_gh_alpha_cut()

        self.data = add_icrs_position_params(self.data, self.source_pos)

        self.log.info("Generating event list")
        self.events, self.gti, self.pointing = create_event_list(
            data=self.data,
            run_number=self.run_number,
            source_name=self.source_name,
            source_pos=self.source_pos,
            effective_time=self.effective_time.value,
            elapsed_time=self.elapsed_time.value,
        )

        self.hdulist = fits.HDUList(
            [fits.PrimaryHDU(), self.events, self.gti, self.pointing])

        irf = fits.open(self.input_irf)
        self.log.info("Adding IRF HDUs")

        for irf_hdu in irf[1:]:
            self.hdulist.append(irf_hdu)

    def finish(self):
        self.hdulist.writeto(self.output_file, overwrite=self.overwrite)

        Provenance().add_output_file(self.output_file)
コード例 #6
0
class MuonAnalysis(Tool):
    """
    Detect and extract muon ring parameters, and write the muon ring and
    intensity parameters to an output table.

    The resulting output can be read e.g. using for example
    `pandas.read_hdf(filename, 'dl1/event/telescope/parameters/muon')`
    """

    name = "ctapipe-reconstruct-muons"
    description = traits.Unicode(__doc__)

    output = traits.Path(directory_ok=False,
                         help="HDF5 output file name").tag(config=True)

    completeness_threshold = traits.FloatTelescopeParameter(
        default_value=30.0,
        help="Threshold for calculating the ``ring_completeness``").tag(
            config=True)

    ratio_width = traits.FloatTelescopeParameter(
        default_value=1.5,
        help=("Ring width for intensity ratio"
              " computation as multiple of pixel diameter"),
    ).tag(config=True)

    overwrite = traits.Bool(
        default_value=False,
        help="If true, overwrite outputfile without asking").tag(config=True)

    min_pixels = traits.IntTelescopeParameter(
        help=("Minimum number of pixels after cleaning and ring finding"
              "required to process an event"),
        default_value=100,
    ).tag(config=True)

    pedestal = traits.FloatTelescopeParameter(
        help="Pedestal noise rms", default_value=1.1).tag(config=True)

    classes = [
        CameraCalibrator,
        TailcutsImageCleaner,
        EventSource,
        MuonRingFitter,
        MuonIntensityFitter,
    ]

    aliases = {
        "i": "EventSource.input_url",
        "input": "EventSource.input_url",
        "o": "MuonAnalysis.output",
        "output": "MuonAnalysis.output",
        "max-events": "EventSource.max_events",
        "allowed-tels": "EventSource.allowed_tels",
    }

    flags = {
        "overwrite": ({
            "MuonAnalysis": {
                "overwrite": True
            }
        }, "overwrite output file")
    }

    def setup(self):
        if self.output is None:
            raise ToolConfigurationError(
                "You need to provide an --output file")

        if self.output.exists() and not self.overwrite:
            raise ToolConfigurationError(
                "Outputfile {self.output} already exists, use `--overwrite` to overwrite"
            )

        self.source = EventSource(parent=self)
        subarray = self.source.subarray

        self.calib = CameraCalibrator(subarray=subarray, parent=self)
        self.ring_fitter = MuonRingFitter(parent=self)
        self.intensity_fitter = MuonIntensityFitter(subarray=subarray,
                                                    parent=self)
        self.cleaning = TailcutsImageCleaner(parent=self, subarray=subarray)
        self.writer = HDF5TableWriter(self.output,
                                      "",
                                      add_prefix=True,
                                      parent=self,
                                      mode="w")
        self.pixels_in_tel_frame = {}
        self.field_of_view = {}
        self.pixel_widths = {}

        for p in [
                "min_pixels", "pedestal", "ratio_width",
                "completeness_threshold"
        ]:
            getattr(self, p).attach_subarray(self.source.subarray)

    def start(self):
        for event in tqdm(self.source, desc="Processing events: "):
            self.process_array_event(event)

    def process_array_event(self, event):
        self.calib(event)

        for tel_id, dl1 in event.dl1.tel.items():
            self.process_telescope_event(event.index, tel_id, dl1)

        self.writer.write("sim/event/subarray/shower",
                          [event.index, event.simulation.shower])

    def process_telescope_event(self, event_index, tel_id, dl1):
        event_id = event_index.event_id

        if self.source.subarray.tel[tel_id].optics.num_mirrors != 1:
            self.log.warn(f"Skipping non-single mirror telescope {tel_id}"
                          " set --allowed_tels to get rid of this warning")
            return

        self.log.debug(f"Processing event {event_id}, telescope {tel_id}")
        image = dl1.image
        if dl1.image_mask is None:
            dl1.image_mask = self.cleaning(tel_id, image)

        if np.count_nonzero(dl1.image_mask) <= self.min_pixels.tel[tel_id]:
            self.log.debug(
                f"Skipping event {event_id}-{tel_id}:"
                f" has less then {self.min_pixels.tel[tel_id]} pixels after cleaning"
            )
            return

        x, y = self.get_pixel_coords(tel_id)

        # iterative ring fit.
        # First use cleaning pixels, then only pixels close to the ring
        # three iterations seems to be enough for most rings
        mask = dl1.image_mask
        for i in range(3):
            ring = self.ring_fitter(x, y, image, mask)
            dist = np.sqrt((x - ring.center_x)**2 + (y - ring.center_y)**2)
            mask = np.abs(dist - ring.radius) / ring.radius < 0.4

        if np.count_nonzero(mask) <= self.min_pixels.tel[tel_id]:
            self.log.debug(
                f"Skipping event {event_id}-{tel_id}:"
                f" Less then {self.min_pixels.tel[tel_id]} pixels on ring")
            return

        if np.isnan(
            [ring.radius.value, ring.center_x.value,
             ring.center_y.value]).any():
            self.log.debug(
                f"Skipping event {event_id}-{tel_id}: Ring fit did not succeed"
            )
            return

        parameters = self.calculate_muon_parameters(tel_id, image,
                                                    dl1.image_mask, ring)
        # intensity_fitter does not support a mask yet, set ignored pixels to 0
        image[~mask] = 0

        result = self.intensity_fitter(
            tel_id,
            ring.center_x,
            ring.center_y,
            ring.radius,
            image,
            pedestal=self.pedestal.tel[tel_id],
        )

        self.log.info(f"Muon fit: r={ring.radius:.2f}"
                      f", width={result.width:.4f}"
                      f", efficiency={result.optical_efficiency:.2%}")

        tel_event_index = TelEventIndexContainer(**event_index, tel_id=tel_id)

        self.writer.write(
            "dl1/event/telescope/parameters/muons",
            [tel_event_index, ring, parameters, result],
        )

    def calculate_muon_parameters(self, tel_id, image, clean_mask, ring):
        fov_radius = self.get_fov(tel_id)
        x, y = self.get_pixel_coords(tel_id)

        # add ring containment, not filled in fit
        containment = ring_containment(ring.radius, ring.center_x,
                                       ring.center_y, fov_radius)

        completeness = ring_completeness(
            x,
            y,
            image,
            ring.radius,
            ring.center_x,
            ring.center_y,
            threshold=self.completeness_threshold.tel[tel_id],
        )

        pixel_width = self.get_pixel_width(tel_id)
        intensity_ratio = intensity_ratio_inside_ring(
            x[clean_mask],
            y[clean_mask],
            image[clean_mask],
            ring.radius,
            ring.center_x,
            ring.center_y,
            width=self.ratio_width.tel[tel_id] * pixel_width,
        )

        mse = mean_squared_error(
            x[clean_mask],
            y[clean_mask],
            image[clean_mask],
            ring.radius,
            ring.center_x,
            ring.center_y,
        )

        return MuonParametersContainer(
            containment=containment,
            completeness=completeness,
            intensity_ratio=intensity_ratio,
            mean_squared_error=mse,
        )

    def get_fov(self, tel_id):
        """Guesstimate fov radius for telescope with id `tel_id`"""
        # memoize fov calculation
        if tel_id not in self.field_of_view:
            cam = self.source.subarray.tel[tel_id].camera.geometry
            border = cam.get_border_pixel_mask()

            x, y = self.get_pixel_coords(tel_id)
            self.field_of_view[tel_id] = np.sqrt(x[border]**2 +
                                                 y[border]**2).mean()

        return self.field_of_view[tel_id]

    def get_pixel_width(self, tel_id):
        """Guesstimate fov radius for telescope with id `tel_id`"""
        # memoize fov calculation
        if tel_id not in self.pixel_widths:
            x, y = self.get_pixel_coords(tel_id)
            self.pixel_widths[tel_id] = CameraGeometry.guess_pixel_width(x, y)

        return self.pixel_widths[tel_id]

    def get_pixel_coords(self, tel_id):
        """Get pixel coords in telescope frame for telescope with id `tel_id`"""
        # memoize transformation
        if tel_id not in self.pixels_in_tel_frame:
            telescope = self.source.subarray.tel[tel_id]
            cam = telescope.camera.geometry
            camera_frame = CameraFrame(
                focal_length=telescope.optics.equivalent_focal_length,
                rotation=cam.cam_rotation,
            )
            cam_coords = SkyCoord(x=cam.pix_x, y=cam.pix_y, frame=camera_frame)
            tel_coord = cam_coords.transform_to(TelescopeFrame())
            self.pixels_in_tel_frame[tel_id] = tel_coord

        coords = self.pixels_in_tel_frame[tel_id]
        return coords.fov_lon, coords.fov_lat

    def finish(self):
        Provenance().add_output_file(self.output,
                                     role="muon_efficiency_parameters")
        self.writer.close()
コード例 #7
0
class IRFFITSWriter(Tool):
    name = "IRFFITSWriter"
    description = __doc__
    example = """
    To generate IRFs from MC gamma only, using default cuts/binning:
    > lstchain_create_irf_files
        -g /path/to/DL2_MC_gamma_file.h5
        -o /path/to/irf.fits.gz
        --point-like (Only for point_like IRFs)
        --overwrite

    Or to generate all 4 IRFs, using default cuts/binning:
    > lstchain_create_irf_files
        -g /path/to/DL2_MC_gamma_file.h5
        -p /path/to/DL2_MC_proton_file.h5
        -e /path/to/DL2_MC_electron_file.h5
        -o /path/to/irf.fits.gz
        --point-like (Only for point_like IRFs)

    Or use a config file for cuts and binning information:
    > lstchain_create_irf_files
        -g /path/to/DL2_MC_gamma_file.h5
        -o /path/to/irf.fits.gz
        --point-like (Only for point_like IRFs)
        --config /path/to/config.json

    Or pass the selection cuts from command-line:
    > lstchain_create_irf_files
        -g /path/to/DL2_MC_gamma_file.h5
        -o /path/to/irf.fits.gz
        --point-like (Only for point_like IRFs)
        --fixed-gh-cut 0.9
        --fixed-theta-cut 0.2
        --irf-obs-time 50
    """

    input_gamma_dl2 = traits.Path(
        help="Input MC gamma DL2 file",
        exists=True,
        directory_ok=False,
        file_ok=True
    ).tag(config=True)

    input_proton_dl2 = traits.Path(
        help="Input MC proton DL2 file",
        exists=True,
        directory_ok=False,
        file_ok=True
    ).tag(config=True)

    input_electron_dl2 = traits.Path(
        help="Input MC electron DL2 file",
        exists=True,
        directory_ok=False,
        file_ok=True
    ).tag(config=True)

    output_irf_file = traits.Path(
        help="IRF output file",
        directory_ok=False,
        file_ok=True,
        default_value="./irf.fits.gz",
    ).tag(config=True)

    irf_obs_time = traits.Float(
        help="Observation time for IRF in hours",
        default_value=50,
    ).tag(config=True)

    point_like = traits.Bool(
        help="True for point_like IRF, False for Full Enclosure",
        default_value=False,
    ).tag(config=True)

    overwrite = traits.Bool(
        help="If True, overwrites existing output file without asking",
        default_value=False,
    ).tag(config=True)

    classes = [EventSelector, DL3FixedCuts, DataBinning]

    aliases = {
        ("g", "input-gamma-dl2"): "IRFFITSWriter.input_gamma_dl2",
        ("p", "input-proton-dl2"): "IRFFITSWriter.input_proton_dl2",
        ("e", "input-electron-dl2"): "IRFFITSWriter.input_electron_dl2",
        ("o", "output-irf-file"): "IRFFITSWriter.output_irf_file",
        "irf-obs-time": "IRFFITSWriter.irf_obs_time",
        "fixed-gh-cut": "DL3FixedCuts.fixed_gh_cut",
        "fixed-theta-cut": "DL3FixedCuts.fixed_theta_cut",
        "allowed-tels": "DL3FixedCuts.allowed_tels",
        "overwrite": "IRFFITSWriter.overwrite",
    }

    flags = {
        "point-like": (
            {"IRFFITSWriter": {"point_like": True}},
            "Point like IRFs will be produced, otherwise Full Enclosure",
        ),
        "overwrite": (
            {"IRFFITSWriter": {"overwrite": True}},
            "overwrites output file",
        )
    }

    def setup(self):

        if self.output_irf_file.absolute().exists():
            if self.overwrite:
                self.log.warning(f"Overwriting {self.output_irf_file}")
                self.output_irf_file.unlink()
            else:
                raise ToolConfigurationError(
                    f"Output file {self.output_irf_file} already exists,"
                    " use --overwrite to overwrite"
                )

        filename = self.output_irf_file.name
        if not (filename.endswith('.fits') or filename.endswith('.fits.gz')):
            raise ValueError("f{filename} is not a correct compressed FITS file name (use .fits or .fits.gz).")

        if self.input_proton_dl2 and self.input_electron_dl2 is not None:
            self.only_gamma_irf = False
        else:
            self.only_gamma_irf = True

        self.event_sel = EventSelector(parent=self)
        self.fixed_cuts = DL3FixedCuts(parent=self)
        self.data_bin = DataBinning(parent=self)

        self.mc_particle = {
            "gamma": {
                "file": str(self.input_gamma_dl2),
                "target_spectrum": CRAB_MAGIC_JHEAP2015,
            },
        }
        Provenance().add_input_file(self.input_gamma_dl2)

        self.t_obs = self.irf_obs_time * u.hour

        # Read and update MC information
        if not self.only_gamma_irf:
            self.mc_particle["proton"] = {
                "file": str(self.input_proton_dl2),
                "target_spectrum": IRFDOC_PROTON_SPECTRUM,
            }

            self.mc_particle["electron"] = {
                "file": str(self.input_electron_dl2),
                "target_spectrum": IRFDOC_ELECTRON_SPECTRUM,
            }

            Provenance().add_input_file(self.input_proton_dl2)
            Provenance().add_input_file(self.input_electron_dl2)

        self.provenance_log = self.output_irf_file.parent / (
            self.name + ".provenance.log"
        )

    def start(self):

        for particle_type, p in self.mc_particle.items():
            self.log.info(f"Simulated {particle_type.title()} Events:")
            p["events"], p["simulation_info"] = read_mc_dl2_to_QTable(p["file"])

            if p["simulation_info"].viewcone.value == 0.0:
                p["mc_type"] = "point_like"
            else:
                p["mc_type"] = "diffuse"

            self.log.debug(f"Simulated {p['mc_type']} {particle_type.title()} Events:")

            # Calculating event weights for Background IRF
            if particle_type != "gamma":
                p["simulated_spectrum"] = PowerLaw.from_simulation(
                    p["simulation_info"], self.t_obs
                )

                p["events"]["weight"] = calculate_event_weights(
                    p["events"]["true_energy"],
                    p["target_spectrum"],
                    p["simulated_spectrum"],
                )

            for prefix in ("true", "reco"):
                k = f"{prefix}_source_fov_offset"
                p["events"][k] = calculate_source_fov_offset(p["events"], prefix=prefix)
            # calculate theta / distance between reco and assumed source position
            p["events"]["theta"] = calculate_theta(
                p["events"],
                assumed_source_az=p["events"]["true_az"],
                assumed_source_alt=p["events"]["true_alt"],
            )
            self.log.debug(p["simulation_info"])

        gammas = self.mc_particle["gamma"]["events"]

        self.log.info(f"Using fixed G/H cut of {self.fixed_cuts.fixed_gh_cut}")

        gammas = self.event_sel.filter_cut(gammas)
        gammas = self.fixed_cuts.allowed_tels_filter(gammas)
        gammas = self.fixed_cuts.gh_cut(gammas)

        if self.point_like:
            gammas = self.fixed_cuts.theta_cut(gammas)
            self.log.info('Theta cuts applied for point like IRF')

        # Binning of parameters used in IRFs
        true_energy_bins = self.data_bin.true_energy_bins()
        reco_energy_bins = self.data_bin.reco_energy_bins()
        migration_bins = self.data_bin.energy_migration_bins()
        source_offset_bins = self.data_bin.source_offset_bins()

        if self.mc_particle["gamma"]["mc_type"] == "point_like":
            mean_fov_offset = round(gammas["true_source_fov_offset"].mean().to_value(), 1)
            fov_offset_bins = [mean_fov_offset - 0.1, mean_fov_offset + 0.1] * u.deg
            self.log.info('Single offset for point like gamma MC')
        else:
            fov_offset_bins = self.data_bin.fov_offset_bins()
            self.log.info('Multiple offset for diffuse gamma MC')

        if not self.only_gamma_irf:
            background = table.vstack(
                [
                    self.mc_particle["proton"]["events"],
                    self.mc_particle["electron"]["events"],
                ]
            )

            background = self.event_sel.filter_cut(background)
            background = self.fixed_cuts.allowed_tels_filter(background)
            background = self.fixed_cuts.gh_cut(background)

            background_offset_bins = self.data_bin.bkg_fov_offset_bins()

        # For a fixed gh/theta cut, only a header value is added.
        # For energy dependent cuts, a new HDU should be created
        # GH_CUT and FOV_CUT are temporary non-standard header data
        extra_headers = {
            "TELESCOP": "CTA-N",
            "INSTRUME": "LST-" + " ".join(map(str, self.fixed_cuts.allowed_tels)),
            "FOVALIGN": "RADEC",
            "GH_CUT": self.fixed_cuts.fixed_gh_cut,
        }
        if self.point_like:
            self.log.info("Generating point_like IRF HDUs")
            extra_headers["RAD_MAX"] = str(self.fixed_cuts.fixed_theta_cut * u.deg)
        else:
            self.log.info("Generating Full-Enclosure IRF HDUs")

        # Write HDUs
        self.hdus = [fits.PrimaryHDU(), ]

        with np.errstate(invalid="ignore", divide="ignore"):
            if self.mc_particle["gamma"]["mc_type"] == "point_like":
                self.effective_area = effective_area_per_energy(
                    gammas,
                    self.mc_particle["gamma"]["simulation_info"],
                    true_energy_bins,
                )
                self.hdus.append(
                    create_aeff2d_hdu(
                        # add one dimension for single FOV offset
                        self.effective_area[..., np.newaxis],
                        true_energy_bins,
                        fov_offset_bins,
                        point_like=self.point_like,
                        extname="EFFECTIVE AREA",
                        **extra_headers,
                    )
                )
            else:
                self.effective_area = effective_area_per_energy_and_fov(
                    gammas,
                    self.mc_particle["gamma"]["simulation_info"],
                    true_energy_bins,
                    fov_offset_bins,
                )
                self.hdus.append(
                    create_aeff2d_hdu(
                        self.effective_area,
                        true_energy_bins,
                        fov_offset_bins,
                        point_like=self.point_like,
                        extname="EFFECTIVE AREA",
                        **extra_headers,
                    )
                )

        self.log.info("Effective Area HDU created")
        self.edisp = energy_dispersion(
            gammas,
            true_energy_bins,
            fov_offset_bins,
            migration_bins,
        )
        self.hdus.append(
            create_energy_dispersion_hdu(
                self.edisp,
                true_energy_bins,
                migration_bins,
                fov_offset_bins,
                point_like=self.point_like,
                extname="ENERGY DISPERSION",
                **extra_headers,
            )
        )
        self.log.info("Energy Dispersion HDU created")

        if not self.only_gamma_irf:
            self.background = background_2d(
                background,
                reco_energy_bins=reco_energy_bins,
                fov_offset_bins=background_offset_bins,
                t_obs=self.t_obs,
            )
            self.hdus.append(
                create_background_2d_hdu(
                    self.background.T,
                    reco_energy_bins,
                    background_offset_bins,
                    extname="BACKGROUND",
                    **extra_headers,
                )
            )
            self.log.info("Background HDU created")

        if not self.point_like:
            self.psf = psf_table(
                gammas,
                true_energy_bins,
                fov_offset_bins=fov_offset_bins,
                source_offset_bins=source_offset_bins,
            )
            self.hdus.append(
                create_psf_table_hdu(
                    self.psf,
                    true_energy_bins,
                    source_offset_bins,
                    fov_offset_bins,
                    extname="PSF",
                    **extra_headers,
                )
            )
            self.log.info("PSF HDU created")

    def finish(self):

        fits.HDUList(self.hdus).writeto(self.output_irf_file, overwrite=self.overwrite)
        Provenance().add_output_file(self.output_irf_file)
コード例 #8
0
class MuonAnalysis(Tool):
    """
    Detect and extract muon ring parameters, and write the muon ring and
    intensity parameters to an output table.

    The resulting output can be read e.g. using for example
    `pandas.read_hdf(filename, 'dl1/event/telescope/parameters/muon')`
    """
    name = 'ctapipe-reconstruct-muons'
    description = traits.Unicode(__doc__)

    output = traits.Path(directory_ok=False,
                         help='HDF5 output file name').tag(config=True)

    completeness_threshold = traits.FloatTelescopeParameter(
        default_value=30.0,
        help='Threshold for calculating the ``ring_completeness``',
    ).tag(config=True)

    ratio_width = traits.FloatTelescopeParameter(
        default_value=1.5,
        help=('Ring width for intensity ratio'
              ' computation as multiple of pixel diameter')).tag(config=True)

    overwrite = traits.Bool(
        default_value=False,
        help='If true, overwrite outputfile without asking').tag(config=True)

    min_pixels = traits.IntTelescopeParameter(
        help=('Minimum number of pixels after cleaning and ring finding'
              'required to process an event'),
        default_value=100,
    ).tag(config=True)

    pedestal = traits.FloatTelescopeParameter(
        help='Pedestal noise rms',
        default_value=1.1,
    ).tag(config=True)

    extractor_name = traits.create_class_enum_trait(
        ImageExtractor,
        default_value='GlobalPeakWindowSum',
    ).tag(config=True)

    classes = [
        CameraCalibrator,
        TailcutsImageCleaner,
        EventSource,
        MuonRingFitter,
        MuonIntensityFitter,
    ] + traits.classes_with_traits(ImageExtractor)

    aliases = {
        'i': 'EventSource.input_url',
        'input': 'EventSource.input_url',
        'o': 'MuonAnalysis.output',
        'output': 'MuonAnalysis.output',
        'max-events': 'EventSource.max_events',
        'allowed-tels': 'EventSource.allowed_tels',
    }

    flags = {
        'overwrite': ({
            'MuonAnalysis': {
                'overwrite': True
            }
        }, 'overwrite output file')
    }

    def setup(self):
        if self.output is None:
            raise ToolConfigurationError(
                'You need to provide an --output file')

        if self.output.exists() and not self.overwrite:
            raise ToolConfigurationError(
                'Outputfile {self.output} already exists, use `--overwrite` to overwrite'
            )

        self.source = self.add_component(EventSource.from_config(parent=self))
        self.extractor = self.add_component(
            ImageExtractor.from_name(self.extractor_name,
                                     parent=self,
                                     subarray=self.source.subarray))
        self.calib = self.add_component(
            CameraCalibrator(
                subarray=self.source.subarray,
                parent=self,
                image_extractor=self.extractor,
            ))
        self.ring_fitter = self.add_component(MuonRingFitter(parent=self, ))
        self.intensity_fitter = self.add_component(
            MuonIntensityFitter(
                subarray=self.source.subarray,
                parent=self,
            ))
        self.cleaning = self.add_component(
            TailcutsImageCleaner(
                parent=self,
                subarray=self.source.subarray,
            ))
        self.writer = self.add_component(
            HDF5TableWriter(
                self.output,
                "",
                add_prefix=True,
                parent=self,
                mode='w',
            ))
        self.pixels_in_tel_frame = {}
        self.field_of_view = {}
        self.pixel_widths = {}

        for p in [
                'min_pixels', 'pedestal', 'ratio_width',
                'completeness_threshold'
        ]:
            getattr(self, p).attach_subarray(self.source.subarray)

    def start(self):
        for event in tqdm(self.source, desc='Processing events: '):
            self.process_array_event(event)

    def process_array_event(self, event):
        self.calib(event)

        for tel_id, dl1 in event.dl1.tel.items():
            self.process_telescope_event(event.index, tel_id, dl1)

        self.writer.write('sim/event/subarray/shower', [event.index, event.mc])

    def process_telescope_event(self, event_index, tel_id, dl1):
        event_id = event_index.event_id

        if self.source.subarray.tel[tel_id].optics.num_mirrors != 1:
            self.log.warn(f'Skipping non-single mirror telescope {tel_id}'
                          ' set --allowed_tels to get rid of this warning')
            return

        self.log.debug(f'Processing event {event_id}, telescope {tel_id}')
        image = dl1.image
        clean_mask = self.cleaning(tel_id, image)

        if np.count_nonzero(clean_mask) <= self.min_pixels.tel[tel_id]:
            self.log.debug(
                f'Skipping event {event_id}-{tel_id}:'
                f' has less then {self.min_pixels.tel[tel_id]} pixels after cleaning'
            )
            return

        x, y = self.get_pixel_coords(tel_id)

        # iterative ring fit.
        # First use cleaning pixels, then only pixels close to the ring
        # three iterations seems to be enough for most rings
        mask = clean_mask
        for i in range(3):
            ring = self.ring_fitter(x, y, image, mask)
            dist = np.sqrt((x - ring.center_x)**2 + (y - ring.center_y)**2)
            mask = np.abs(dist - ring.radius) / ring.radius < 0.4

        if np.count_nonzero(mask) <= self.min_pixels.tel[tel_id]:
            self.log.debug(
                f'Skipping event {event_id}-{tel_id}:'
                f' Less then {self.min_pixels.tel[tel_id]} pixels on ring')
            return

        if np.isnan(
            [ring.radius.value, ring.center_x.value,
             ring.center_y.value]).any():
            self.log.debug(
                f'Skipping event {event_id}-{tel_id}: Ring fit did not succeed'
            )
            return

        parameters = self.calculate_muon_parameters(tel_id, image, clean_mask,
                                                    ring)

        # intensity_fitter does not support a mask yet, set ignored pixels to 0
        image[~mask] = 0

        result = self.intensity_fitter(
            tel_id,
            ring.center_x,
            ring.center_y,
            ring.radius,
            image,
            pedestal=self.pedestal.tel[tel_id],
        )

        self.log.info(
            f'Muon fit: r={ring.radius:.2f}'
            f', width={result.width:.4f}'
            f', efficiency={result.optical_efficiency:.2%}', )

        tel_event_index = TelEventIndexContainer(
            **event_index,
            tel_id=tel_id,
        )

        self.writer.write('dl1/event/telescope/parameters/muons',
                          [tel_event_index, ring, parameters, result])

    def calculate_muon_parameters(self, tel_id, image, clean_mask, ring):
        fov_radius = self.get_fov(tel_id)
        x, y = self.get_pixel_coords(tel_id)

        # add ring containment, not filled in fit
        containment = ring_containment(
            ring.radius,
            ring.center_x,
            ring.center_y,
            fov_radius,
        )

        completeness = ring_completeness(
            x,
            y,
            image,
            ring.radius,
            ring.center_x,
            ring.center_y,
            threshold=self.completeness_threshold.tel[tel_id],
        )

        pixel_width = self.get_pixel_width(tel_id)
        intensity_ratio = intensity_ratio_inside_ring(
            x[clean_mask],
            y[clean_mask],
            image[clean_mask],
            ring.radius,
            ring.center_x,
            ring.center_y,
            width=self.ratio_width.tel[tel_id] * pixel_width,
        )

        mse = mean_squared_error(x[clean_mask], y[clean_mask],
                                 image[clean_mask], ring.radius, ring.center_x,
                                 ring.center_y)

        return MuonParametersContainer(
            containment=containment,
            completeness=completeness,
            intensity_ratio=intensity_ratio,
            mean_squared_error=mse,
        )

    def get_fov(self, tel_id):
        '''Guesstimate fov radius for telescope with id `tel_id`'''
        # memoize fov calculation
        if tel_id not in self.field_of_view:
            cam = self.source.subarray.tel[tel_id].camera.geometry
            border = cam.get_border_pixel_mask()

            x, y = self.get_pixel_coords(tel_id)
            self.field_of_view[tel_id] = np.sqrt(x[border]**2 +
                                                 y[border]**2).mean()

        return self.field_of_view[tel_id]

    def get_pixel_width(self, tel_id):
        '''Guesstimate fov radius for telescope with id `tel_id`'''
        # memoize fov calculation
        if tel_id not in self.pixel_widths:
            x, y = self.get_pixel_coords(tel_id)
            self.pixel_widths[tel_id] = CameraGeometry.guess_pixel_width(x, y)

        return self.pixel_widths[tel_id]

    def get_pixel_coords(self, tel_id):
        '''Get pixel coords in telescope frame for telescope with id `tel_id`'''
        # memoize transformation
        if tel_id not in self.pixels_in_tel_frame:
            telescope = self.source.subarray.tel[tel_id]
            cam = telescope.camera.geometry
            camera_frame = CameraFrame(
                focal_length=telescope.optics.equivalent_focal_length,
                rotation=cam.cam_rotation,
            )
            cam_coords = SkyCoord(x=cam.pix_x, y=cam.pix_y, frame=camera_frame)
            tel_coord = cam_coords.transform_to(TelescopeFrame())
            self.pixels_in_tel_frame[tel_id] = tel_coord

        coords = self.pixels_in_tel_frame[tel_id]
        return coords.fov_lon, coords.fov_lat

    def finish(self):
        Provenance().add_output_file(
            self.output,
            role='muon_efficiency_parameters',
        )
        self.writer.close()
コード例 #9
0
class ImagePlotter(Component):
    """ Plotter for camera images """

    display = Bool(
        True,
        help="Display the photoelectron images on-screen as they are produced."
    ).tag(config=True)
    output_path = traits.Path(
        directory_ok=False,
        help=("Output path for the pdf containing all the images."
              " Set to None for no saved output."),
    ).tag(config=True)

    def __init__(self, subarray, config=None, parent=None, **kwargs):
        """
        Plotter for camera images.

        Parameters
        ----------
        config : traitlets.loader.Config
            Configuration specified by config file or cmdline arguments.
            Used to set traitlet values.
            Set to None if no configuration to pass.
        tool : ctapipe.core.Tool
            Tool executable that is calling this component.
            Passes the correct logger to the component.
            Set to None if no Tool to pass.
        kwargs
        """
        super().__init__(config=config, parent=parent, **kwargs)
        self._current_tel = None
        self.c_intensity = None
        self.c_peak_time = None
        self.cb_intensity = None
        self.cb_peak_time = None
        self.pdf = None
        self.subarray = subarray

        self._init_figure()

    def _init_figure(self):
        self.fig = plt.figure(figsize=(16, 7))
        self.ax_intensity = self.fig.add_subplot(1, 2, 1)
        self.ax_peak_time = self.fig.add_subplot(1, 2, 2)
        if self.output_path:
            self.log.info(f"Creating PDF: {self.output_path}")
            self.pdf = PdfPages(self.output_path)

    def plot(self, event, telid):
        image = event.dl1.tel[telid].image
        peak_time = event.dl1.tel[telid].peak_time
        print("plot", image.shape, peak_time.shape)

        if self._current_tel != telid:
            self._current_tel = telid

            self.ax_intensity.cla()
            self.ax_peak_time.cla()

            # Redraw camera
            geom = self.subarray.tel[telid].camera.geometry
            self.c_intensity = CameraDisplay(geom, ax=self.ax_intensity)
            self.c_peak_time = CameraDisplay(geom, ax=self.ax_peak_time)

            if (peak_time != 0.0).all():
                tmaxmin = event.dl0.tel[telid].waveform.shape[1]
                t_chargemax = peak_time[image.argmax()]
                cmap_time = colors.LinearSegmentedColormap.from_list(
                    "cmap_t",
                    [
                        (0 / tmaxmin, "darkgreen"),
                        (0.6 * t_chargemax / tmaxmin, "green"),
                        (t_chargemax / tmaxmin, "yellow"),
                        (1.4 * t_chargemax / tmaxmin, "blue"),
                        (1, "darkblue"),
                    ],
                )
                self.c_peak_time.pixels.set_cmap(cmap_time)

            if not self.cb_intensity:
                self.c_intensity.add_colorbar(ax=self.ax_intensity,
                                              label="Intensity (p.e.)")
                self.cb_intensity = self.c_intensity.colorbar
            else:
                self.c_intensity.colorbar = self.cb_intensity
                self.c_intensity.update(True)
            if not self.cb_peak_time:
                self.c_peak_time.add_colorbar(ax=self.ax_peak_time,
                                              label="Pulse Time (ns)")
                self.cb_peak_time = self.c_peak_time.colorbar
            else:
                self.c_peak_time.colorbar = self.cb_peak_time
                self.c_peak_time.update(True)

        self.c_intensity.image = image
        if peak_time is not None:
            self.c_peak_time.image = peak_time

        self.fig.suptitle("Event_index={}  Event_id={}  Telescope={}".format(
            event.count, event.index.event_id, telid))

        if self.display:
            plt.pause(0.001)
        if self.pdf is not None:
            self.pdf.savefig(self.fig)

    def finish(self):
        if self.pdf is not None:
            self.log.info("Closing PDF")
            self.pdf.close()