class CameraDemo(Tool): name = u"ctapipe-camdemo" description = "Display fake events in a demo camera" delay = traits.Int(50, help="Frame delay in ms", min=20).tag(config=True) cleanframes = traits.Int(100, help="Number of frames between turning on " "cleaning", min=0).tag(config=True) autoscale = traits.Bool(False, help='scale each frame to max if ' 'True').tag(config=True) blit = traits.Bool(False, help='use blit operation to draw on screen (' 'much faster but may cause some draw ' 'artifacts)').tag(config=True) camera = traits.CaselessStrEnum( CameraGeometry.get_known_camera_names(), default_value='NectarCam', help='Name of camera to display').tag(config=True) optics = traits.CaselessStrEnum( OpticsDescription.get_known_optics_names(), default_value='MST', help='Telescope optics description name').tag(config=True) aliases = traits.Dict({ 'delay': 'CameraDemo.delay', 'cleanframes': 'CameraDemo.cleanframes', 'autoscale': 'CameraDemo.autoscale', 'blit': 'CameraDemo.blit', 'camera': 'CameraDemo.camera', 'optics': 'CameraDemo.optics', }) def __init__(self): super().__init__() self._counter = 0 self.imclean = False def start(self): self.log.info("Starting CameraDisplay for {}".format(self.camera)) self._display_camera_animation() def _display_camera_animation(self): # plt.style.use("ggplot") fig = plt.figure(num="ctapipe Camera Demo", figsize=(7, 7)) ax = plt.subplot(111) # load the camera tel = TelescopeDescription.from_name(optics_name=self.optics, camera_name=self.camera) geom = tel.camera # poor-man's coordinate transform from telscope to camera frame (it's # better to use ctapipe.coordiantes when they are stable) scale = tel.optics.effective_focal_length.to(geom.pix_x.unit).value fov = np.deg2rad(4.0) maxwid = np.deg2rad(0.01) maxlen = np.deg2rad(0.03) disp = CameraDisplay(geom, ax=ax, autoupdate=True, title="{}, f={}".format( tel, tel.optics.effective_focal_length)) disp.cmap = plt.cm.terrain def update(frame): centroid = np.random.uniform(-fov, fov, size=2) * scale width = np.random.uniform(0, maxwid) * scale length = np.random.uniform(0, maxlen) * scale + width angle = np.random.uniform(0, 360) intens = np.random.exponential(2) * 50 model = toymodel.generate_2d_shower_model(centroid=centroid, width=width, length=length, psi=angle * u.deg) image, sig, bg = toymodel.make_toymodel_shower_image( geom, model.pdf, intensity=intens, nsb_level_pe=5000) # alternate between cleaned and raw images if self._counter == self.cleanframes: plt.suptitle("Image Cleaning ON") self.imclean = True if self._counter == self.cleanframes * 2: plt.suptitle("Image Cleaning OFF") self.imclean = False self._counter = 0 if self.imclean: cleanmask = tailcuts_clean(geom, image / 80.0) for ii in range(3): dilate(geom, cleanmask) image[cleanmask == 0] = 0 # zero noise pixels self.log.debug("count = {}, image sum={} max={}".format( self._counter, image.sum(), image.max())) disp.image = image if self.autoscale: disp.set_limits_percent(95) else: disp.set_limits_minmax(-100, 4000) disp.axes.figure.canvas.draw() self._counter += 1 return [ ax, ] self.anim = FuncAnimation(fig, update, interval=self.delay, blit=self.blit) plt.show()
class MuonDisplayerTool(Tool): name = 'ctapipe-display-muons' description = t.Unicode(__doc__) events = t.Unicode("", help="input event data file").tag(config=True) outfile = t.Unicode("muons.hdf5", help='HDF5 output file name').tag( config=True) display = t.Bool( help='display the camera events', default=False ).tag(config=True) classes = t.List([ CameraCalibrator, EventSource ]) aliases = t.Dict({ 'input': 'MuonDisplayerTool.events', 'outfile': 'MuonDisplayerTool.outfile', 'display': 'MuonDisplayerTool.display', 'max_events': 'EventSource.max_events', 'allowed_tels': 'EventSource.allowed_tels', }) def setup(self): if self.events == '': raise ToolConfigurationError("please specify --input <events file>") self.log.debug("input: %s", self.events) self.source = event_source(self.events) self.calib = CameraCalibrator( config=self.config, tool=self, eventsource=self.source ) self.writer = HDF5TableWriter(self.outfile, "muons") def start(self): numev = 0 self.num_muons_found = defaultdict(int) for event in tqdm(self.source, desc='detecting muons'): self.calib.calibrate(event) muon_evt = analyze_muon_event(event) if numev == 0: _exclude_some_columns(event.inst.subarray, self.writer) numev += 1 if not muon_evt['MuonIntensityParams']: # No telescopes contained a good muon continue else: if self.display: plot_muon_event(event, muon_evt) for tel_id in muon_evt['TelIds']: idx = muon_evt['TelIds'].index(tel_id) intens_params = muon_evt['MuonIntensityParams'][idx] if intens_params is not None: ring_params = muon_evt['MuonRingParams'][idx] cam_id = str(event.inst.subarray.tel[tel_id].camera) self.num_muons_found[cam_id] += 1 self.log.debug("INTENSITY: %s", intens_params) self.log.debug("RING: %s", ring_params) self.writer.write(table_name=cam_id, containers=[intens_params, ring_params]) self.log.info( "Event Number: %d, found %s muons", numev, dict(self.num_muons_found) ) def finish(self): Provenance().add_output_file(self.outfile, role='dl1.tel.evt.muon') self.writer.close()
class CameraDemo(Tool): name = u"ctapipe-camdemo" description = "Display fake events in a demo camera" delay = traits.Int(20, help="Frame delay in ms").tag(config=True) cleanframes = traits.Int(100, help="Number of frames between turning on " "cleaning").tag(config=True) autoscale = traits.Bool(False, help='scale each frame to max if ' 'True').tag(config=True) blit = traits.Bool(False, help='use blit operation to draw on screen (' 'much faster but may cause some draw ' 'artifacts)').tag(config=True) aliases = traits.Dict({'delay': 'CameraDemo.delay', 'cleanframes': 'CameraDemo.cleanframes', 'autoscale' : 'CameraDemo.autoscale', 'blit': 'CameraDemo.blit'}) def __init__(self): super().__init__() self._counter = 0 self.imclean = False def start(self): self.log.info("Starting Camera Display") self._display_camera_animation() def _display_camera_animation(self): #plt.style.use("ggplot") fig = plt.figure(num="ctapipe Camera Demo", figsize=(7, 7)) ax = plt.subplot(111) # load the camera geom = io.CameraGeometry.from_name("hess", 1) disp = visualization.CameraDisplay(geom, ax=ax, autoupdate=True) disp.cmap = plt.cm.terrain def update(frame): centroid = np.random.uniform(-0.5, 0.5, size=2) width = np.random.uniform(0, 0.01) length = np.random.uniform(0, 0.03) + width angle = np.random.uniform(0, 360) intens = np.random.exponential(2) * 50 model = toymodel.generate_2d_shower_model(centroid=centroid, width=width, length=length, psi=angle * u.deg) image, sig, bg = toymodel.make_toymodel_shower_image(geom, model.pdf, intensity=intens, nsb_level_pe=5000) # alternate between cleaned and raw images if self._counter == self.cleanframes: plt.suptitle("Image Cleaning ON") self.imclean = True if self._counter == self.cleanframes*2: plt.suptitle("Image Cleaning OFF") self.imclean = False self._counter = 0 if self.imclean: cleanmask = cleaning.tailcuts_clean(geom, image, pedvars=80) for ii in range(3): cleaning.dilate(geom, cleanmask) image[cleanmask == 0] = 0 # zero noise pixels self.log.debug("count = {}, image sum={} max={}" .format(self._counter, image.sum(), image.max())) disp.image = image if self.autoscale: disp.set_limits_percent(95) else: disp.set_limits_minmax(-100, 4000) disp.axes.figure.canvas.draw() self._counter += 1 return [ax,] self.anim = FuncAnimation(fig, update, interval=self.delay, blit=self.blit) plt.show()
class IRFFITSWriter(Tool): name = "IRFFITSWriter" description = __doc__ example = """ To generate IRFs from MC gamma only, using default cuts/binning: > lstchain_create_irf_files -g /path/to/DL2_MC_gamma_file.h5 -o /path/to/irf.fits.gz --point-like (Only for point_like IRFs) --overwrite Or to generate all 4 IRFs, using default cuts/binning: > lstchain_create_irf_files -g /path/to/DL2_MC_gamma_file.h5 -p /path/to/DL2_MC_proton_file.h5 -e /path/to/DL2_MC_electron_file.h5 -o /path/to/irf.fits.gz --point-like (Only for point_like IRFs) Or use a config file for cuts and binning information: > lstchain_create_irf_files -g /path/to/DL2_MC_gamma_file.h5 -o /path/to/irf.fits.gz --point-like (Only for point_like IRFs) --config /path/to/config.json Or pass the selection cuts from command-line: > lstchain_create_irf_files -g /path/to/DL2_MC_gamma_file.h5 -o /path/to/irf.fits.gz --point-like (Only for point_like IRFs) --global-gh-cut 0.9 --global-theta-cut 0.2 --irf-obs-time 50 Or use energy-dependent cuts based on a gamma efficiency: > lstchain_create_irf_files -g /path/to/DL2_MC_gamma_file.h5 -o /path/to/irf.fits.gz --point-like (Only for point_like IRFs) --energy-dependent-gh --energy-dependent-theta --gh-efficiency 0.95 --theta-containment 0.68 Or generate source-dependent IRFs > lstchain_create_irf_files -g /path/to/DL2_MC_gamma_file.h5 -o /path/to/irf.fits.gz --point-like --global-gh-cut 0.9 --global-alpha-cut 10 --source-dep """ input_gamma_dl2 = traits.Path(help="Input MC gamma DL2 file", allow_none=True, exists=True, directory_ok=False, file_ok=True).tag(config=True) input_proton_dl2 = traits.Path(help="Input MC proton DL2 file", allow_none=True, exists=True, directory_ok=False, file_ok=True).tag(config=True) input_electron_dl2 = traits.Path(help="Input MC electron DL2 file", allow_none=True, exists=True, directory_ok=False, file_ok=True).tag(config=True) output_irf_file = traits.Path( help="IRF output file", allow_none=True, directory_ok=False, file_ok=True, default_value="./irf.fits.gz", ).tag(config=True) irf_obs_time = traits.Float( help="Observation time for IRF in hours", default_value=50, ).tag(config=True) point_like = traits.Bool( help="True for point_like IRF, False for Full Enclosure", default_value=False, ).tag(config=True) energy_dependent_gh = traits.Bool( help="True for applying energy-dependent gammaness cuts", default_value=False, ).tag(config=True) energy_dependent_theta = traits.Bool( help="True for applying energy-dependent theta cuts", default_value=False, ).tag(config=True) energy_dependent_alpha = traits.Bool( help="True for applying energy-dependent alpha cuts", default_value=False, ).tag(config=True) overwrite = traits.Bool( help="If True, overwrites existing output file without asking", default_value=False, ).tag(config=True) source_dep = traits.Bool( help="True for source-dependent analysis", default_value=False, ).tag(config=True) classes = [EventSelector, DL3Cuts, DataBinning] aliases = { ("g", "input-gamma-dl2"): "IRFFITSWriter.input_gamma_dl2", ("p", "input-proton-dl2"): "IRFFITSWriter.input_proton_dl2", ("e", "input-electron-dl2"): "IRFFITSWriter.input_electron_dl2", ("o", "output-irf-file"): "IRFFITSWriter.output_irf_file", "irf-obs-time": "IRFFITSWriter.irf_obs_time", "global-gh-cut": "DL3Cuts.global_gh_cut", "gh-efficiency": "DL3Cuts.gh_efficiency", "theta-containment": "DL3Cuts.theta_containment", "global-theta-cut": "DL3Cuts.global_theta_cut", "alpha-containment": "DL3Cuts.alpha_containment", "global-alpha-cut": "DL3Cuts.global_alpha_cut", "allowed-tels": "DL3Cuts.allowed_tels", "overwrite": "IRFFITSWriter.overwrite", } flags = { "point-like": ( { "IRFFITSWriter": { "point_like": True } }, "Point like IRFs will be produced, otherwise Full Enclosure", ), "overwrite": ( { "IRFFITSWriter": { "overwrite": True } }, "overwrites output file", ), "source-dep": ( { "IRFFITSWriter": { "source_dep": True } }, "Source-dependent analysis will be performed", ), "energy-dependent-gh": ( { "IRFFITSWriter": { "energy_dependent_gh": True } }, "Uses energy-dependent cuts for gammaness", ), "energy-dependent-theta": ( { "IRFFITSWriter": { "energy_dependent_theta": True } }, "Uses energy-dependent cuts for theta", ), "energy-dependent-alpha": ( { "IRFFITSWriter": { "energy_dependent_alpha": True } }, "Uses energy-dependent cuts for alpha", ), } def setup(self): if self.output_irf_file.absolute().exists(): if self.overwrite: self.log.warning(f"Overwriting {self.output_irf_file}") self.output_irf_file.unlink() else: raise ToolConfigurationError( f"Output file {self.output_irf_file} already exists," " use --overwrite to overwrite") filename = self.output_irf_file.name if not (filename.endswith('.fits') or filename.endswith('.fits.gz')): raise ValueError( f"{filename} is not a correct compressed FITS file name" "(use .fits or .fits.gz).") if self.input_proton_dl2 and self.input_electron_dl2 is not Undefined: self.only_gamma_irf = False else: self.only_gamma_irf = True self.event_sel = EventSelector(parent=self) self.cuts = DL3Cuts(parent=self) self.data_bin = DataBinning(parent=self) self.mc_particle = { "gamma": { "file": self.input_gamma_dl2, "target_spectrum": CRAB_MAGIC_JHEAP2015, }, } Provenance().add_input_file(self.input_gamma_dl2) self.t_obs = self.irf_obs_time * u.hour # Read and update MC information if not self.only_gamma_irf: self.mc_particle["proton"] = { "file": self.input_proton_dl2, "target_spectrum": IRFDOC_PROTON_SPECTRUM, } self.mc_particle["electron"] = { "file": self.input_electron_dl2, "target_spectrum": IRFDOC_ELECTRON_SPECTRUM, } Provenance().add_input_file(self.input_proton_dl2) Provenance().add_input_file(self.input_electron_dl2) self.provenance_log = self.output_irf_file.parent / (self.name + ".provenance.log") def start(self): for particle_type, p in self.mc_particle.items(): self.log.info(f"Simulated {particle_type.title()} Events:") p["events"], p["simulation_info"] = read_mc_dl2_to_QTable( p["file"]) p["mc_type"] = check_mc_type(p["file"]) self.log.debug( f"Simulated {p['mc_type']} {particle_type.title()} Events:") # Calculating event weights for Background IRF if particle_type != "gamma": p["simulated_spectrum"] = PowerLaw.from_simulation( p["simulation_info"], self.t_obs) p["events"]["weight"] = calculate_event_weights( p["events"]["true_energy"], p["target_spectrum"], p["simulated_spectrum"], ) if not self.source_dep: for prefix in ("true", "reco"): k = f"{prefix}_source_fov_offset" p["events"][k] = calculate_source_fov_offset(p["events"], prefix=prefix) # calculate theta / distance between reco and assumed source position p["events"]["theta"] = calculate_theta( p["events"], assumed_source_az=p["events"]["true_az"], assumed_source_alt=p["events"]["true_alt"], ) else: # Alpha cut is applied for source-dependent analysis. # To adapt source-dependent analysis to pyirf codes, # true position is set as reco position for survived events # after alpha cut p["events"][ "true_source_fov_offset"] = calculate_source_fov_offset( p["events"], prefix="true") p["events"]["reco_source_fov_offset"] = p["events"][ "true_source_fov_offset"] self.log.debug(p["simulation_info"]) gammas = self.mc_particle["gamma"]["events"] # Binning of parameters used in IRFs true_energy_bins = self.data_bin.true_energy_bins() reco_energy_bins = self.data_bin.reco_energy_bins() migration_bins = self.data_bin.energy_migration_bins() source_offset_bins = self.data_bin.source_offset_bins() gammas = self.event_sel.filter_cut(gammas) gammas = self.cuts.allowed_tels_filter(gammas) if self.energy_dependent_gh: self.gh_cuts_gamma = self.cuts.energy_dependent_gh_cuts( gammas, reco_energy_bins) gammas = self.cuts.apply_energy_dependent_gh_cuts( gammas, self.gh_cuts_gamma) self.log.info( f"Using gamma efficiency of {self.cuts.gh_efficiency}") else: gammas = self.cuts.apply_global_gh_cut(gammas) self.log.info("Using a global gammaness cut of " f"{self.cuts.global_gh_cut}") if self.point_like: if not self.source_dep: if self.energy_dependent_theta: self.theta_cuts = self.cuts.energy_dependent_theta_cuts( gammas, reco_energy_bins, ) gammas = self.cuts.apply_energy_dependent_theta_cuts( gammas, self.theta_cuts) self.log.info("Using a containment region for theta of " f"{self.cuts.theta_containment}") else: gammas = self.cuts.apply_global_theta_cut(gammas) self.log.info( "Using a global Theta cut of " f"{self.cuts.global_theta_cut} for point-like IRF") else: if self.energy_dependent_alpha: self.alpha_cuts = self.cuts.energy_dependent_alpha_cuts( gammas, reco_energy_bins, ) gammas = self.cuts.apply_energy_dependent_alpha_cuts( gammas, self.alpha_cuts) self.log.info("Using a containment region for alpha of " f"{self.cuts.alpha_containment} %") else: gammas = self.cuts.apply_global_alpha_cut(gammas) self.log.info( 'Using a global Alpha cut of ' f'{self.cuts.global_alpha_cut} for point like IRF') if self.mc_particle["gamma"]["mc_type"] in [ "point_like", "ring_wobble" ]: mean_fov_offset = round( gammas["true_source_fov_offset"].mean().to_value(), 1) fov_offset_bins = [mean_fov_offset - 0.1, mean_fov_offset + 0.1 ] * u.deg self.log.info('Single offset for point like gamma MC') else: fov_offset_bins = self.data_bin.fov_offset_bins() self.log.info('Multiple offset for diffuse gamma MC') if self.energy_dependent_theta: fov_offset_bins = [ round(gammas["true_source_fov_offset"].min().to_value(), 1), round(gammas["true_source_fov_offset"].max().to_value(), 1) ] * u.deg self.log.info("For RAD MAX, the full FoV is used") if not self.only_gamma_irf: background = table.vstack([ self.mc_particle["proton"]["events"], self.mc_particle["electron"]["events"] ]) if self.energy_dependent_gh: background = self.cuts.apply_energy_dependent_gh_cuts( background, self.gh_cuts_gamma) else: background = self.cuts.apply_global_gh_cut(background) background = self.event_sel.filter_cut(background) background = self.cuts.allowed_tels_filter(background) background_offset_bins = self.data_bin.bkg_fov_offset_bins() # For a global gh/theta cut, only a header value is added. # For energy-dependent cuts, along with GADF specified RAD_MAX HDU, # a new HDU is created, GH_CUTS which is based on RAD_MAX table # NOTE: The GH_CUTS HDU is just for provenance and is not supported # by GADF or used by any Science Tools extra_headers = { "TELESCOP": "CTA-N", "INSTRUME": "LST-" + " ".join(map(str, self.cuts.allowed_tels)), "FOVALIGN": "RADEC", } if self.point_like: self.log.info("Generating point_like IRF HDUs") else: self.log.info("Generating Full-Enclosure IRF HDUs") # Updating the HDU headers with the gammaness and theta cuts/efficiency if not self.energy_dependent_gh: extra_headers["GH_CUT"] = self.cuts.global_gh_cut else: extra_headers["GH_EFF"] = (self.cuts.gh_efficiency, "gamma/hadron efficiency") if self.point_like: if not self.source_dep: if self.energy_dependent_theta: extra_headers["TH_CONT"] = ( self.cuts.theta_containment, "Theta containment region in percentage") else: extra_headers["RAD_MAX"] = (self.cuts.global_theta_cut, 'deg') else: if self.energy_dependent_alpha: extra_headers["AL_CONT"] = ( self.cuts.alpha_containment, "Alpha containment region in percentage") else: extra_headers["AL_CUT"] = (self.cuts.global_alpha_cut, 'deg') # Write HDUs self.hdus = [ fits.PrimaryHDU(), ] with np.errstate(invalid="ignore", divide="ignore"): if self.mc_particle["gamma"]["mc_type"] in [ "point_like", "ring_wobble" ]: self.effective_area = effective_area_per_energy( gammas, self.mc_particle["gamma"]["simulation_info"], true_energy_bins, ) self.hdus.append( create_aeff2d_hdu( # add one dimension for single FOV offset self.effective_area[..., np.newaxis], true_energy_bins, fov_offset_bins, point_like=self.point_like, extname="EFFECTIVE AREA", **extra_headers, )) else: self.effective_area = effective_area_per_energy_and_fov( gammas, self.mc_particle["gamma"]["simulation_info"], true_energy_bins, fov_offset_bins, ) self.hdus.append( create_aeff2d_hdu( self.effective_area, true_energy_bins, fov_offset_bins, point_like=self.point_like, extname="EFFECTIVE AREA", **extra_headers, )) self.log.info("Effective Area HDU created") self.edisp = energy_dispersion( gammas, true_energy_bins, fov_offset_bins, migration_bins, ) self.hdus.append( create_energy_dispersion_hdu( self.edisp, true_energy_bins, migration_bins, fov_offset_bins, point_like=self.point_like, extname="ENERGY DISPERSION", **extra_headers, )) self.log.info("Energy Dispersion HDU created") if not self.only_gamma_irf: self.background = background_2d( background, reco_energy_bins=reco_energy_bins, fov_offset_bins=background_offset_bins, t_obs=self.t_obs, ) self.hdus.append( create_background_2d_hdu( self.background.T, reco_energy_bins, background_offset_bins, extname="BACKGROUND", **extra_headers, )) self.log.info("Background HDU created") if not self.point_like: self.psf = psf_table( gammas, true_energy_bins, fov_offset_bins=fov_offset_bins, source_offset_bins=source_offset_bins, ) self.hdus.append( create_psf_table_hdu( self.psf, true_energy_bins, source_offset_bins, fov_offset_bins, extname="PSF", **extra_headers, )) self.log.info("PSF HDU created") if self.energy_dependent_gh: # Create a separate temporary header gh_header = fits.Header() gh_header["CREATOR"] = f"lstchain v{__version__}" gh_header["DATE"] = Time.now().utc.iso for k, v in extra_headers.items(): gh_header[k] = v self.hdus.append( fits.BinTableHDU(self.gh_cuts_gamma, header=gh_header, name="GH_CUTS")) self.log.info("GH CUTS HDU added") if self.energy_dependent_theta and self.point_like: if not self.source_dep: self.hdus.append( create_rad_max_hdu(self.theta_cuts["cut"][:, np.newaxis], reco_energy_bins, fov_offset_bins, **extra_headers)) self.log.info("RAD MAX HDU added") if self.energy_dependent_alpha and self.source_dep: # Create a separate temporary header alpha_header = fits.Header() alpha_header["CREATOR"] = f"lstchain v{__version__}" alpha_header["DATE"] = Time.now().utc.iso for k, v in extra_headers.items(): alpha_header[k] = v self.hdus.append( fits.BinTableHDU(self.alpha_cuts, header=gh_header, name="AL_CUTS")) self.log.info("ALPHA CUTS HDU added") def finish(self): fits.HDUList(self.hdus).writeto(self.output_irf_file, overwrite=self.overwrite) Provenance().add_output_file(self.output_irf_file)
class FITSIndexWriter(Tool): name = "FITSIndexWriter" description = __doc__ example = """ To create DL3 index files with default values: > lstchain_create_dl3_index_files -d /path/to/DL3/files/ Or specify some more configurations: > lstchain_create_dl3_index_files -d /path/to/DL3/files/ -o /path/to/DL3/index/files -p dl3*[run_1-run_n]*.fits.gz --overwrite """ input_dl3_dir = traits.Path( help="Input path of DL3 files", exists=True, directory_ok=True, file_ok=False ).tag(config=True) file_pattern = traits.Unicode( help="File pattern to search in the given Path", default_value="dl3*.fits*" ).tag(config=True) output_index_path = traits.Path( help="Output path for the Index files", exists=True, directory_ok=True, file_ok=False, default_value=None ).tag(config=True) overwrite = traits.Bool( help="If True, overwrites existing output file without asking", default_value=False, ).tag(config=True) aliases = { ("d", "input-dl3-dir"): "FITSIndexWriter.input_dl3_dir", ("o", "output-index-path"): "FITSIndexWriter.output_index_path", ("p", "file-pattern"): "FITSIndexWriter.file_pattern", } flags = { "overwrite": ( {"FITSIndexWriter": {"overwrite": True}}, "overwrite output files if True", ) } def __init__(self, **kwargs): super().__init__(**kwargs) self.file_list = [] self.hdu_index_filename = "hdu-index.fits.gz" self.obs_index_filename = "obs-index.fits.gz" def setup(self): list_files = sorted(self.input_dl3_dir.glob(self.file_pattern)) if list_files == []: self.log.critical(f"No files found with pattern {self.file_pattern}") for f in list_files: self.file_list.append(f.name) Provenance().add_input_file(f) if not self.output_index_path: self.output_index_path = self.input_dl3_dir self.hdu_index_file = self.output_index_path / self.hdu_index_filename self.obs_index_file = self.output_index_path / self.obs_index_filename self.provenance_log = self.output_index_path / (self.name + ".provenance.log") if self.hdu_index_file.exists(): if self.overwrite: self.log.warning(f"Overwriting {self.hdu_index_file}") self.hdu_index_file.unlink() else: raise ToolConfigurationError( f"Output file {self.hdu_index_file} already exists," "use --overwrite to overwrite" ) if self.obs_index_file.exists(): if self.overwrite: self.log.warning(f"Overwriting {self.obs_index_file}") self.obs_index_file.unlink() else: raise ToolConfigurationError( f"Output file {self.obs_index_file} already exists," " use --overwrite to overwrite" ) self.log.debug("HDU Index file: %s", self.hdu_index_file) self.log.debug("OBS Index file: %s", self.obs_index_file) def start(self): create_hdu_index_hdu( self.file_list, self.input_dl3_dir, self.hdu_index_file, self.overwrite, ) create_obs_index_hdu( self.file_list, self.input_dl3_dir, self.obs_index_file, self.overwrite ) self.log.debug("HDULists created for the index files") def finish(self): Provenance().add_output_file(self.hdu_index_file) Provenance().add_output_file(self.obs_index_file)
class DataReductionFITSWriter(Tool): name = "DataReductionFITSWriter" description = __doc__ example = """ To generate DL3 file from an observed data DL2 file, using default cuts: > lstchain_create_dl3_file -d /path/to/DL2_data_file.h5 -o /path/to/DL3/file/ --input-irf /path/to/irf.fits.gz --source-name Crab --source-ra 83.633deg --source-dec 22.01deg Or use a config file for the cuts: > lstchain_create_dl3_file -d /path/to/DL2_data_file.h5 -o /path/to/DL3/file/ --input-irf /path/to/irf.fits.gz --source-name Crab --source-ra 83.633deg --source-dec 22.01deg --overwrite --config /path/to/config.json Or pass the selection cuts from command-line: > lstchain_create_dl3_file -d /path/to/DL2_data_file.h5 -o /path/to/DL3/file/ --input-irf /path/to/irf.fits.gz --source-name Crab --source-ra 83.633deg --source-dec 22.01deg --global-gh-cut 0.9 --overwrite Or generate source-dependent DL3 files > lstchain_create_dl3_file -d /path/to/DL2_data_file.h5 -o /path/to/DL3/file/ --input-irf /path/to/irf.fits.gz --source-name Crab --source-dep --overwrite """ input_dl2 = traits.Path(help="Input data DL2 file", exists=True, directory_ok=False, file_ok=True).tag(config=True) output_dl3_path = traits.Path(help="DL3 output filedir", directory_ok=True, file_ok=False).tag(config=True) input_irf = traits.Path( help="Compressed FITS file of IRFs", exists=True, directory_ok=False, file_ok=True, ).tag(config=True) source_name = traits.Unicode(help="Name of Source").tag(config=True) source_ra = traits.Unicode(help="RA position of the source").tag( config=True) source_dec = traits.Unicode(help="DEC position of the source").tag( config=True) overwrite = traits.Bool( help="If True, overwrites existing output file without asking", default_value=False, ).tag(config=True) source_dep = traits.Bool( help="If True, source-dependent analysis will be performed.", default_value=False, ).tag(config=True) classes = [EventSelector, DL3Cuts] aliases = { ("d", "input-dl2"): "DataReductionFITSWriter.input_dl2", ("o", "output-dl3-path"): "DataReductionFITSWriter.output_dl3_path", "input-irf": "DataReductionFITSWriter.input_irf", "global-gh-cut": "DL3Cuts.global_gh_cut", "source-name": "DataReductionFITSWriter.source_name", "source-ra": "DataReductionFITSWriter.source_ra", "source-dec": "DataReductionFITSWriter.source_dec", } flags = { "overwrite": ( { "DataReductionFITSWriter": { "overwrite": True } }, "overwrite output file if True", ), "source-dep": ( { "DataReductionFITSWriter": { "source_dep": True } }, "source-dependent analysis if True", ), } def setup(self): self.filename_dl3 = dl2_to_dl3_filename(self.input_dl2) self.provenance_log = self.output_dl3_path / (self.name + ".provenance.log") Provenance().add_input_file(self.input_dl2) self.event_sel = EventSelector(parent=self) self.cuts = DL3Cuts(parent=self) self.output_file = self.output_dl3_path.absolute() / self.filename_dl3 if self.output_file.exists(): if self.overwrite: self.log.warning(f"Overwriting {self.output_file}") self.output_file.unlink() else: raise ToolConfigurationError( f"Output file {self.output_file} already exists," " use --overwrite to overwrite") if not (self.source_ra or self.source_dec): self.source_pos = SkyCoord.from_name(self.source_name) elif bool(self.source_ra) != bool(self.source_dec): raise ToolConfigurationError( "Either provide both RA and DEC values for the source or none") else: self.source_pos = SkyCoord(ra=self.source_ra, dec=self.source_dec) self.log.debug(f"Output DL3 file: {self.output_file}") try: with fits.open(self.input_irf) as hdul: self.use_energy_dependent_cuts = ( "GH_CUT" not in hdul["EFFECTIVE AREA"].header) except: raise ToolConfigurationError( f"{self.input_irf} does not have EFFECTIVE AREA HDU, " " to check for global cut information in the Header value") def apply_srcindep_gh_cut(self): ''' apply gammaness cut ''' self.data = self.event_sel.filter_cut(self.data) if self.use_energy_dependent_cuts: self.energy_dependent_gh_cuts = QTable.read(self.input_irf, hdu="GH_CUTS") self.data = self.cuts.apply_energy_dependent_gh_cuts( self.data, self.energy_dependent_gh_cuts) self.log.info("Using gamma efficiency of " f"{self.energy_dependent_gh_cuts.meta['GH_EFF']}") else: with fits.open(self.input_irf) as hdul: self.cuts.global_gh_cut = hdul[1].header["GH_CUT"] self.data = self.cuts.apply_global_gh_cut(self.data) self.log.info(f"Using global G/H cut of {self.cuts.global_gh_cut}") def apply_srcdep_gh_alpha_cut(self): ''' apply gammaness and alpha cut for source-dependent analysis ''' srcdep_assumed_positions = get_srcdep_assumed_positions(self.input_dl2) for i, srcdep_pos in enumerate(srcdep_assumed_positions): data_temp = read_data_dl2_to_QTable(self.input_dl2, srcdep_pos=srcdep_pos) data_temp = self.event_sel.filter_cut(data_temp) if self.use_energy_dependent_cuts: self.energy_dependent_gh_cuts = QTable.read(self.input_irf, hdu="GH_CUTS") data_temp = self.cuts.apply_energy_dependent_gh_cuts( data_temp, self.energy_dependent_gh_cuts) else: with fits.open(self.input_irf) as hdul: self.cuts.global_gh_cut = hdul[1].header["GH_CUT"] data_temp = self.cuts.apply_global_gh_cut(data_temp) with fits.open(self.input_irf) as hdul: self.cuts.global_alpha_cut = hdul[1].header["AL_CUT"] data_temp = self.cuts.apply_global_alpha_cut(data_temp) # set expected source positions as reco positions set_expected_pos_to_reco_altaz(data_temp) if i == 0: self.data = data_temp else: self.data = vstack([self.data, data_temp]) def start(self): if not self.source_dep: self.data = read_data_dl2_to_QTable(self.input_dl2) else: self.data = read_data_dl2_to_QTable(self.input_dl2, 'on') self.effective_time, self.elapsed_time = get_effective_time(self.data) self.run_number = run_info_from_filename(self.input_dl2)[1] if not self.source_dep: self.apply_srcindep_gh_cut() else: self.apply_srcdep_gh_alpha_cut() self.data = add_icrs_position_params(self.data, self.source_pos) self.log.info("Generating event list") self.events, self.gti, self.pointing = create_event_list( data=self.data, run_number=self.run_number, source_name=self.source_name, source_pos=self.source_pos, effective_time=self.effective_time.value, elapsed_time=self.elapsed_time.value, ) self.hdulist = fits.HDUList( [fits.PrimaryHDU(), self.events, self.gti, self.pointing]) irf = fits.open(self.input_irf) self.log.info("Adding IRF HDUs") for irf_hdu in irf[1:]: self.hdulist.append(irf_hdu) def finish(self): self.hdulist.writeto(self.output_file, overwrite=self.overwrite) Provenance().add_output_file(self.output_file)
class CameraDemo(Tool): name = "ctapipe-camdemo" description = "Display fake events in a demo camera" delay = traits.Int(50, help="Frame delay in ms", min=20).tag(config=True) cleanframes = traits.Int(20, help="Number of frames between turning on " "cleaning", min=0).tag(config=True) autoscale = traits.Bool(False, help='scale each frame to max if ' 'True').tag(config=True) blit = traits.Bool(False, help='use blit operation to draw on screen (' 'much faster but may cause some draw ' 'artifacts)').tag(config=True) camera = traits.CaselessStrEnum( CameraDescription.get_known_camera_names(), default_value='NectarCam', help='Name of camera to display').tag(config=True) optics = traits.CaselessStrEnum( OpticsDescription.get_known_optics_names(), default_value='MST', help='Telescope optics description name' ).tag(config=True) num_events = traits.Int(0, help='events to show before exiting (0 for ' 'unlimited)').tag(config=True) display = traits.Bool(True, "enable or disable display (for " "testing)").tag(config=True) aliases = traits.Dict({ 'delay': 'CameraDemo.delay', 'cleanframes': 'CameraDemo.cleanframes', 'autoscale': 'CameraDemo.autoscale', 'blit': 'CameraDemo.blit', 'camera': 'CameraDemo.camera', 'optics': 'CameraDemo.optics', 'num-events': 'CameraDemo.num_events' }) def __init__(self): super().__init__() self._counter = 0 self.imclean = False def start(self): self.log.info(f"Starting CameraDisplay for {self.camera}") self._display_camera_animation() def _display_camera_animation(self): # plt.style.use("ggplot") fig = plt.figure(num="ctapipe Camera Demo", figsize=(7, 7)) ax = plt.subplot(111) # load the camera tel = TelescopeDescription.from_name(optics_name=self.optics, camera_name=self.camera) geom = tel.camera.geometry # poor-man's coordinate transform from telscope to camera frame (it's # better to use ctapipe.coordiantes when they are stable) foclen = tel.optics.equivalent_focal_length.to(geom.pix_x.unit).value fov = np.deg2rad(4.0) scale = foclen minwid = np.deg2rad(0.1) maxwid = np.deg2rad(0.3) maxlen = np.deg2rad(0.5) self.log.debug(f"scale={scale} m, wid=({minwid}-{maxwid})") disp = CameraDisplay( geom, ax=ax, autoupdate=True, title=f"{tel}, f={tel.optics.equivalent_focal_length}" ) disp.cmap = plt.cm.terrain def update(frame): x, y = np.random.uniform(-fov, fov, size=2) * scale width = np.random.uniform(0, maxwid - minwid) * scale + minwid length = np.random.uniform(0, maxlen) * scale + width angle = np.random.uniform(0, 360) intens = np.random.exponential(2) * 500 model = toymodel.Gaussian( x=x * u.m, y=y * u.m, width=width * u.m, length=length * u.m, psi=angle * u.deg, ) self.log.debug( "Frame=%d width=%03f length=%03f intens=%03d", frame, width, length, intens ) image, _, _ = model.generate_image( geom, intensity=intens, nsb_level_pe=3, ) # alternate between cleaned and raw images if self._counter == self.cleanframes: plt.suptitle("Image Cleaning ON") self.imclean = True if self._counter == self.cleanframes * 2: plt.suptitle("Image Cleaning OFF") self.imclean = False self._counter = 0 disp.clear_overlays() if self.imclean: cleanmask = tailcuts_clean(geom, image, picture_thresh=10.0, boundary_thresh=5.0) for ii in range(2): dilate(geom, cleanmask) image[cleanmask == 0] = 0 # zero noise pixels try: hillas = hillas_parameters(geom, image) disp.overlay_moments(hillas, with_label=False, color='red', alpha=0.7, linewidth=2, linestyle='dashed') except HillasParameterizationError: disp.clear_overlays() pass self.log.debug("Frame=%d image_sum=%.3f max=%.3f", self._counter, image.sum(), image.max()) disp.image = image if self.autoscale: disp.set_limits_percent(95) else: disp.set_limits_minmax(-5, 200) disp.axes.figure.canvas.draw() self._counter += 1 return [ax, ] frames = None if self.num_events == 0 else self.num_events repeat = True if self.num_events == 0 else False self.log.info(f"Running for {frames} frames") self.anim = FuncAnimation(fig, update, interval=self.delay, frames=frames, repeat=repeat, blit=self.blit) if self.display: plt.show()
class MuonAnalysis(Tool): """ Detect and extract muon ring parameters, and write the muon ring and intensity parameters to an output table. The resulting output can be read e.g. using for example `pandas.read_hdf(filename, 'dl1/event/telescope/parameters/muon')` """ name = "ctapipe-reconstruct-muons" description = traits.Unicode(__doc__) output = traits.Path(directory_ok=False, help="HDF5 output file name").tag(config=True) completeness_threshold = traits.FloatTelescopeParameter( default_value=30.0, help="Threshold for calculating the ``ring_completeness``").tag( config=True) ratio_width = traits.FloatTelescopeParameter( default_value=1.5, help=("Ring width for intensity ratio" " computation as multiple of pixel diameter"), ).tag(config=True) overwrite = traits.Bool( default_value=False, help="If true, overwrite outputfile without asking").tag(config=True) min_pixels = traits.IntTelescopeParameter( help=("Minimum number of pixels after cleaning and ring finding" "required to process an event"), default_value=100, ).tag(config=True) pedestal = traits.FloatTelescopeParameter( help="Pedestal noise rms", default_value=1.1).tag(config=True) classes = [ CameraCalibrator, TailcutsImageCleaner, EventSource, MuonRingFitter, MuonIntensityFitter, ] aliases = { "i": "EventSource.input_url", "input": "EventSource.input_url", "o": "MuonAnalysis.output", "output": "MuonAnalysis.output", "max-events": "EventSource.max_events", "allowed-tels": "EventSource.allowed_tels", } flags = { "overwrite": ({ "MuonAnalysis": { "overwrite": True } }, "overwrite output file") } def setup(self): if self.output is None: raise ToolConfigurationError( "You need to provide an --output file") if self.output.exists() and not self.overwrite: raise ToolConfigurationError( "Outputfile {self.output} already exists, use `--overwrite` to overwrite" ) self.source = EventSource(parent=self) subarray = self.source.subarray self.calib = CameraCalibrator(subarray=subarray, parent=self) self.ring_fitter = MuonRingFitter(parent=self) self.intensity_fitter = MuonIntensityFitter(subarray=subarray, parent=self) self.cleaning = TailcutsImageCleaner(parent=self, subarray=subarray) self.writer = HDF5TableWriter(self.output, "", add_prefix=True, parent=self, mode="w") self.pixels_in_tel_frame = {} self.field_of_view = {} self.pixel_widths = {} for p in [ "min_pixels", "pedestal", "ratio_width", "completeness_threshold" ]: getattr(self, p).attach_subarray(self.source.subarray) def start(self): for event in tqdm(self.source, desc="Processing events: "): self.process_array_event(event) def process_array_event(self, event): self.calib(event) for tel_id, dl1 in event.dl1.tel.items(): self.process_telescope_event(event.index, tel_id, dl1) self.writer.write("sim/event/subarray/shower", [event.index, event.simulation.shower]) def process_telescope_event(self, event_index, tel_id, dl1): event_id = event_index.event_id if self.source.subarray.tel[tel_id].optics.num_mirrors != 1: self.log.warn(f"Skipping non-single mirror telescope {tel_id}" " set --allowed_tels to get rid of this warning") return self.log.debug(f"Processing event {event_id}, telescope {tel_id}") image = dl1.image if dl1.image_mask is None: dl1.image_mask = self.cleaning(tel_id, image) if np.count_nonzero(dl1.image_mask) <= self.min_pixels.tel[tel_id]: self.log.debug( f"Skipping event {event_id}-{tel_id}:" f" has less then {self.min_pixels.tel[tel_id]} pixels after cleaning" ) return x, y = self.get_pixel_coords(tel_id) # iterative ring fit. # First use cleaning pixels, then only pixels close to the ring # three iterations seems to be enough for most rings mask = dl1.image_mask for i in range(3): ring = self.ring_fitter(x, y, image, mask) dist = np.sqrt((x - ring.center_x)**2 + (y - ring.center_y)**2) mask = np.abs(dist - ring.radius) / ring.radius < 0.4 if np.count_nonzero(mask) <= self.min_pixels.tel[tel_id]: self.log.debug( f"Skipping event {event_id}-{tel_id}:" f" Less then {self.min_pixels.tel[tel_id]} pixels on ring") return if np.isnan( [ring.radius.value, ring.center_x.value, ring.center_y.value]).any(): self.log.debug( f"Skipping event {event_id}-{tel_id}: Ring fit did not succeed" ) return parameters = self.calculate_muon_parameters(tel_id, image, dl1.image_mask, ring) # intensity_fitter does not support a mask yet, set ignored pixels to 0 image[~mask] = 0 result = self.intensity_fitter( tel_id, ring.center_x, ring.center_y, ring.radius, image, pedestal=self.pedestal.tel[tel_id], ) self.log.info(f"Muon fit: r={ring.radius:.2f}" f", width={result.width:.4f}" f", efficiency={result.optical_efficiency:.2%}") tel_event_index = TelEventIndexContainer(**event_index, tel_id=tel_id) self.writer.write( "dl1/event/telescope/parameters/muons", [tel_event_index, ring, parameters, result], ) def calculate_muon_parameters(self, tel_id, image, clean_mask, ring): fov_radius = self.get_fov(tel_id) x, y = self.get_pixel_coords(tel_id) # add ring containment, not filled in fit containment = ring_containment(ring.radius, ring.center_x, ring.center_y, fov_radius) completeness = ring_completeness( x, y, image, ring.radius, ring.center_x, ring.center_y, threshold=self.completeness_threshold.tel[tel_id], ) pixel_width = self.get_pixel_width(tel_id) intensity_ratio = intensity_ratio_inside_ring( x[clean_mask], y[clean_mask], image[clean_mask], ring.radius, ring.center_x, ring.center_y, width=self.ratio_width.tel[tel_id] * pixel_width, ) mse = mean_squared_error( x[clean_mask], y[clean_mask], image[clean_mask], ring.radius, ring.center_x, ring.center_y, ) return MuonParametersContainer( containment=containment, completeness=completeness, intensity_ratio=intensity_ratio, mean_squared_error=mse, ) def get_fov(self, tel_id): """Guesstimate fov radius for telescope with id `tel_id`""" # memoize fov calculation if tel_id not in self.field_of_view: cam = self.source.subarray.tel[tel_id].camera.geometry border = cam.get_border_pixel_mask() x, y = self.get_pixel_coords(tel_id) self.field_of_view[tel_id] = np.sqrt(x[border]**2 + y[border]**2).mean() return self.field_of_view[tel_id] def get_pixel_width(self, tel_id): """Guesstimate fov radius for telescope with id `tel_id`""" # memoize fov calculation if tel_id not in self.pixel_widths: x, y = self.get_pixel_coords(tel_id) self.pixel_widths[tel_id] = CameraGeometry.guess_pixel_width(x, y) return self.pixel_widths[tel_id] def get_pixel_coords(self, tel_id): """Get pixel coords in telescope frame for telescope with id `tel_id`""" # memoize transformation if tel_id not in self.pixels_in_tel_frame: telescope = self.source.subarray.tel[tel_id] cam = telescope.camera.geometry camera_frame = CameraFrame( focal_length=telescope.optics.equivalent_focal_length, rotation=cam.cam_rotation, ) cam_coords = SkyCoord(x=cam.pix_x, y=cam.pix_y, frame=camera_frame) tel_coord = cam_coords.transform_to(TelescopeFrame()) self.pixels_in_tel_frame[tel_id] = tel_coord coords = self.pixels_in_tel_frame[tel_id] return coords.fov_lon, coords.fov_lat def finish(self): Provenance().add_output_file(self.output, role="muon_efficiency_parameters") self.writer.close()
class IRFFITSWriter(Tool): name = "IRFFITSWriter" description = __doc__ example = """ To generate IRFs from MC gamma only, using default cuts/binning: > lstchain_create_irf_files -g /path/to/DL2_MC_gamma_file.h5 -o /path/to/irf.fits.gz --point-like (Only for point_like IRFs) --overwrite Or to generate all 4 IRFs, using default cuts/binning: > lstchain_create_irf_files -g /path/to/DL2_MC_gamma_file.h5 -p /path/to/DL2_MC_proton_file.h5 -e /path/to/DL2_MC_electron_file.h5 -o /path/to/irf.fits.gz --point-like (Only for point_like IRFs) Or use a config file for cuts and binning information: > lstchain_create_irf_files -g /path/to/DL2_MC_gamma_file.h5 -o /path/to/irf.fits.gz --point-like (Only for point_like IRFs) --config /path/to/config.json Or pass the selection cuts from command-line: > lstchain_create_irf_files -g /path/to/DL2_MC_gamma_file.h5 -o /path/to/irf.fits.gz --point-like (Only for point_like IRFs) --fixed-gh-cut 0.9 --fixed-theta-cut 0.2 --irf-obs-time 50 """ input_gamma_dl2 = traits.Path( help="Input MC gamma DL2 file", exists=True, directory_ok=False, file_ok=True ).tag(config=True) input_proton_dl2 = traits.Path( help="Input MC proton DL2 file", exists=True, directory_ok=False, file_ok=True ).tag(config=True) input_electron_dl2 = traits.Path( help="Input MC electron DL2 file", exists=True, directory_ok=False, file_ok=True ).tag(config=True) output_irf_file = traits.Path( help="IRF output file", directory_ok=False, file_ok=True, default_value="./irf.fits.gz", ).tag(config=True) irf_obs_time = traits.Float( help="Observation time for IRF in hours", default_value=50, ).tag(config=True) point_like = traits.Bool( help="True for point_like IRF, False for Full Enclosure", default_value=False, ).tag(config=True) overwrite = traits.Bool( help="If True, overwrites existing output file without asking", default_value=False, ).tag(config=True) classes = [EventSelector, DL3FixedCuts, DataBinning] aliases = { ("g", "input-gamma-dl2"): "IRFFITSWriter.input_gamma_dl2", ("p", "input-proton-dl2"): "IRFFITSWriter.input_proton_dl2", ("e", "input-electron-dl2"): "IRFFITSWriter.input_electron_dl2", ("o", "output-irf-file"): "IRFFITSWriter.output_irf_file", "irf-obs-time": "IRFFITSWriter.irf_obs_time", "fixed-gh-cut": "DL3FixedCuts.fixed_gh_cut", "fixed-theta-cut": "DL3FixedCuts.fixed_theta_cut", "allowed-tels": "DL3FixedCuts.allowed_tels", "overwrite": "IRFFITSWriter.overwrite", } flags = { "point-like": ( {"IRFFITSWriter": {"point_like": True}}, "Point like IRFs will be produced, otherwise Full Enclosure", ), "overwrite": ( {"IRFFITSWriter": {"overwrite": True}}, "overwrites output file", ) } def setup(self): if self.output_irf_file.absolute().exists(): if self.overwrite: self.log.warning(f"Overwriting {self.output_irf_file}") self.output_irf_file.unlink() else: raise ToolConfigurationError( f"Output file {self.output_irf_file} already exists," " use --overwrite to overwrite" ) filename = self.output_irf_file.name if not (filename.endswith('.fits') or filename.endswith('.fits.gz')): raise ValueError("f{filename} is not a correct compressed FITS file name (use .fits or .fits.gz).") if self.input_proton_dl2 and self.input_electron_dl2 is not None: self.only_gamma_irf = False else: self.only_gamma_irf = True self.event_sel = EventSelector(parent=self) self.fixed_cuts = DL3FixedCuts(parent=self) self.data_bin = DataBinning(parent=self) self.mc_particle = { "gamma": { "file": str(self.input_gamma_dl2), "target_spectrum": CRAB_MAGIC_JHEAP2015, }, } Provenance().add_input_file(self.input_gamma_dl2) self.t_obs = self.irf_obs_time * u.hour # Read and update MC information if not self.only_gamma_irf: self.mc_particle["proton"] = { "file": str(self.input_proton_dl2), "target_spectrum": IRFDOC_PROTON_SPECTRUM, } self.mc_particle["electron"] = { "file": str(self.input_electron_dl2), "target_spectrum": IRFDOC_ELECTRON_SPECTRUM, } Provenance().add_input_file(self.input_proton_dl2) Provenance().add_input_file(self.input_electron_dl2) self.provenance_log = self.output_irf_file.parent / ( self.name + ".provenance.log" ) def start(self): for particle_type, p in self.mc_particle.items(): self.log.info(f"Simulated {particle_type.title()} Events:") p["events"], p["simulation_info"] = read_mc_dl2_to_QTable(p["file"]) if p["simulation_info"].viewcone.value == 0.0: p["mc_type"] = "point_like" else: p["mc_type"] = "diffuse" self.log.debug(f"Simulated {p['mc_type']} {particle_type.title()} Events:") # Calculating event weights for Background IRF if particle_type != "gamma": p["simulated_spectrum"] = PowerLaw.from_simulation( p["simulation_info"], self.t_obs ) p["events"]["weight"] = calculate_event_weights( p["events"]["true_energy"], p["target_spectrum"], p["simulated_spectrum"], ) for prefix in ("true", "reco"): k = f"{prefix}_source_fov_offset" p["events"][k] = calculate_source_fov_offset(p["events"], prefix=prefix) # calculate theta / distance between reco and assumed source position p["events"]["theta"] = calculate_theta( p["events"], assumed_source_az=p["events"]["true_az"], assumed_source_alt=p["events"]["true_alt"], ) self.log.debug(p["simulation_info"]) gammas = self.mc_particle["gamma"]["events"] self.log.info(f"Using fixed G/H cut of {self.fixed_cuts.fixed_gh_cut}") gammas = self.event_sel.filter_cut(gammas) gammas = self.fixed_cuts.allowed_tels_filter(gammas) gammas = self.fixed_cuts.gh_cut(gammas) if self.point_like: gammas = self.fixed_cuts.theta_cut(gammas) self.log.info('Theta cuts applied for point like IRF') # Binning of parameters used in IRFs true_energy_bins = self.data_bin.true_energy_bins() reco_energy_bins = self.data_bin.reco_energy_bins() migration_bins = self.data_bin.energy_migration_bins() source_offset_bins = self.data_bin.source_offset_bins() if self.mc_particle["gamma"]["mc_type"] == "point_like": mean_fov_offset = round(gammas["true_source_fov_offset"].mean().to_value(), 1) fov_offset_bins = [mean_fov_offset - 0.1, mean_fov_offset + 0.1] * u.deg self.log.info('Single offset for point like gamma MC') else: fov_offset_bins = self.data_bin.fov_offset_bins() self.log.info('Multiple offset for diffuse gamma MC') if not self.only_gamma_irf: background = table.vstack( [ self.mc_particle["proton"]["events"], self.mc_particle["electron"]["events"], ] ) background = self.event_sel.filter_cut(background) background = self.fixed_cuts.allowed_tels_filter(background) background = self.fixed_cuts.gh_cut(background) background_offset_bins = self.data_bin.bkg_fov_offset_bins() # For a fixed gh/theta cut, only a header value is added. # For energy dependent cuts, a new HDU should be created # GH_CUT and FOV_CUT are temporary non-standard header data extra_headers = { "TELESCOP": "CTA-N", "INSTRUME": "LST-" + " ".join(map(str, self.fixed_cuts.allowed_tels)), "FOVALIGN": "RADEC", "GH_CUT": self.fixed_cuts.fixed_gh_cut, } if self.point_like: self.log.info("Generating point_like IRF HDUs") extra_headers["RAD_MAX"] = str(self.fixed_cuts.fixed_theta_cut * u.deg) else: self.log.info("Generating Full-Enclosure IRF HDUs") # Write HDUs self.hdus = [fits.PrimaryHDU(), ] with np.errstate(invalid="ignore", divide="ignore"): if self.mc_particle["gamma"]["mc_type"] == "point_like": self.effective_area = effective_area_per_energy( gammas, self.mc_particle["gamma"]["simulation_info"], true_energy_bins, ) self.hdus.append( create_aeff2d_hdu( # add one dimension for single FOV offset self.effective_area[..., np.newaxis], true_energy_bins, fov_offset_bins, point_like=self.point_like, extname="EFFECTIVE AREA", **extra_headers, ) ) else: self.effective_area = effective_area_per_energy_and_fov( gammas, self.mc_particle["gamma"]["simulation_info"], true_energy_bins, fov_offset_bins, ) self.hdus.append( create_aeff2d_hdu( self.effective_area, true_energy_bins, fov_offset_bins, point_like=self.point_like, extname="EFFECTIVE AREA", **extra_headers, ) ) self.log.info("Effective Area HDU created") self.edisp = energy_dispersion( gammas, true_energy_bins, fov_offset_bins, migration_bins, ) self.hdus.append( create_energy_dispersion_hdu( self.edisp, true_energy_bins, migration_bins, fov_offset_bins, point_like=self.point_like, extname="ENERGY DISPERSION", **extra_headers, ) ) self.log.info("Energy Dispersion HDU created") if not self.only_gamma_irf: self.background = background_2d( background, reco_energy_bins=reco_energy_bins, fov_offset_bins=background_offset_bins, t_obs=self.t_obs, ) self.hdus.append( create_background_2d_hdu( self.background.T, reco_energy_bins, background_offset_bins, extname="BACKGROUND", **extra_headers, ) ) self.log.info("Background HDU created") if not self.point_like: self.psf = psf_table( gammas, true_energy_bins, fov_offset_bins=fov_offset_bins, source_offset_bins=source_offset_bins, ) self.hdus.append( create_psf_table_hdu( self.psf, true_energy_bins, source_offset_bins, fov_offset_bins, extname="PSF", **extra_headers, ) ) self.log.info("PSF HDU created") def finish(self): fits.HDUList(self.hdus).writeto(self.output_irf_file, overwrite=self.overwrite) Provenance().add_output_file(self.output_irf_file)
class MuonAnalysis(Tool): """ Detect and extract muon ring parameters, and write the muon ring and intensity parameters to an output table. The resulting output can be read e.g. using for example `pandas.read_hdf(filename, 'dl1/event/telescope/parameters/muon')` """ name = 'ctapipe-reconstruct-muons' description = traits.Unicode(__doc__) output = traits.Path(directory_ok=False, help='HDF5 output file name').tag(config=True) completeness_threshold = traits.FloatTelescopeParameter( default_value=30.0, help='Threshold for calculating the ``ring_completeness``', ).tag(config=True) ratio_width = traits.FloatTelescopeParameter( default_value=1.5, help=('Ring width for intensity ratio' ' computation as multiple of pixel diameter')).tag(config=True) overwrite = traits.Bool( default_value=False, help='If true, overwrite outputfile without asking').tag(config=True) min_pixels = traits.IntTelescopeParameter( help=('Minimum number of pixels after cleaning and ring finding' 'required to process an event'), default_value=100, ).tag(config=True) pedestal = traits.FloatTelescopeParameter( help='Pedestal noise rms', default_value=1.1, ).tag(config=True) extractor_name = traits.create_class_enum_trait( ImageExtractor, default_value='GlobalPeakWindowSum', ).tag(config=True) classes = [ CameraCalibrator, TailcutsImageCleaner, EventSource, MuonRingFitter, MuonIntensityFitter, ] + traits.classes_with_traits(ImageExtractor) aliases = { 'i': 'EventSource.input_url', 'input': 'EventSource.input_url', 'o': 'MuonAnalysis.output', 'output': 'MuonAnalysis.output', 'max-events': 'EventSource.max_events', 'allowed-tels': 'EventSource.allowed_tels', } flags = { 'overwrite': ({ 'MuonAnalysis': { 'overwrite': True } }, 'overwrite output file') } def setup(self): if self.output is None: raise ToolConfigurationError( 'You need to provide an --output file') if self.output.exists() and not self.overwrite: raise ToolConfigurationError( 'Outputfile {self.output} already exists, use `--overwrite` to overwrite' ) self.source = self.add_component(EventSource.from_config(parent=self)) self.extractor = self.add_component( ImageExtractor.from_name(self.extractor_name, parent=self, subarray=self.source.subarray)) self.calib = self.add_component( CameraCalibrator( subarray=self.source.subarray, parent=self, image_extractor=self.extractor, )) self.ring_fitter = self.add_component(MuonRingFitter(parent=self, )) self.intensity_fitter = self.add_component( MuonIntensityFitter( subarray=self.source.subarray, parent=self, )) self.cleaning = self.add_component( TailcutsImageCleaner( parent=self, subarray=self.source.subarray, )) self.writer = self.add_component( HDF5TableWriter( self.output, "", add_prefix=True, parent=self, mode='w', )) self.pixels_in_tel_frame = {} self.field_of_view = {} self.pixel_widths = {} for p in [ 'min_pixels', 'pedestal', 'ratio_width', 'completeness_threshold' ]: getattr(self, p).attach_subarray(self.source.subarray) def start(self): for event in tqdm(self.source, desc='Processing events: '): self.process_array_event(event) def process_array_event(self, event): self.calib(event) for tel_id, dl1 in event.dl1.tel.items(): self.process_telescope_event(event.index, tel_id, dl1) self.writer.write('sim/event/subarray/shower', [event.index, event.mc]) def process_telescope_event(self, event_index, tel_id, dl1): event_id = event_index.event_id if self.source.subarray.tel[tel_id].optics.num_mirrors != 1: self.log.warn(f'Skipping non-single mirror telescope {tel_id}' ' set --allowed_tels to get rid of this warning') return self.log.debug(f'Processing event {event_id}, telescope {tel_id}') image = dl1.image clean_mask = self.cleaning(tel_id, image) if np.count_nonzero(clean_mask) <= self.min_pixels.tel[tel_id]: self.log.debug( f'Skipping event {event_id}-{tel_id}:' f' has less then {self.min_pixels.tel[tel_id]} pixels after cleaning' ) return x, y = self.get_pixel_coords(tel_id) # iterative ring fit. # First use cleaning pixels, then only pixels close to the ring # three iterations seems to be enough for most rings mask = clean_mask for i in range(3): ring = self.ring_fitter(x, y, image, mask) dist = np.sqrt((x - ring.center_x)**2 + (y - ring.center_y)**2) mask = np.abs(dist - ring.radius) / ring.radius < 0.4 if np.count_nonzero(mask) <= self.min_pixels.tel[tel_id]: self.log.debug( f'Skipping event {event_id}-{tel_id}:' f' Less then {self.min_pixels.tel[tel_id]} pixels on ring') return if np.isnan( [ring.radius.value, ring.center_x.value, ring.center_y.value]).any(): self.log.debug( f'Skipping event {event_id}-{tel_id}: Ring fit did not succeed' ) return parameters = self.calculate_muon_parameters(tel_id, image, clean_mask, ring) # intensity_fitter does not support a mask yet, set ignored pixels to 0 image[~mask] = 0 result = self.intensity_fitter( tel_id, ring.center_x, ring.center_y, ring.radius, image, pedestal=self.pedestal.tel[tel_id], ) self.log.info( f'Muon fit: r={ring.radius:.2f}' f', width={result.width:.4f}' f', efficiency={result.optical_efficiency:.2%}', ) tel_event_index = TelEventIndexContainer( **event_index, tel_id=tel_id, ) self.writer.write('dl1/event/telescope/parameters/muons', [tel_event_index, ring, parameters, result]) def calculate_muon_parameters(self, tel_id, image, clean_mask, ring): fov_radius = self.get_fov(tel_id) x, y = self.get_pixel_coords(tel_id) # add ring containment, not filled in fit containment = ring_containment( ring.radius, ring.center_x, ring.center_y, fov_radius, ) completeness = ring_completeness( x, y, image, ring.radius, ring.center_x, ring.center_y, threshold=self.completeness_threshold.tel[tel_id], ) pixel_width = self.get_pixel_width(tel_id) intensity_ratio = intensity_ratio_inside_ring( x[clean_mask], y[clean_mask], image[clean_mask], ring.radius, ring.center_x, ring.center_y, width=self.ratio_width.tel[tel_id] * pixel_width, ) mse = mean_squared_error(x[clean_mask], y[clean_mask], image[clean_mask], ring.radius, ring.center_x, ring.center_y) return MuonParametersContainer( containment=containment, completeness=completeness, intensity_ratio=intensity_ratio, mean_squared_error=mse, ) def get_fov(self, tel_id): '''Guesstimate fov radius for telescope with id `tel_id`''' # memoize fov calculation if tel_id not in self.field_of_view: cam = self.source.subarray.tel[tel_id].camera.geometry border = cam.get_border_pixel_mask() x, y = self.get_pixel_coords(tel_id) self.field_of_view[tel_id] = np.sqrt(x[border]**2 + y[border]**2).mean() return self.field_of_view[tel_id] def get_pixel_width(self, tel_id): '''Guesstimate fov radius for telescope with id `tel_id`''' # memoize fov calculation if tel_id not in self.pixel_widths: x, y = self.get_pixel_coords(tel_id) self.pixel_widths[tel_id] = CameraGeometry.guess_pixel_width(x, y) return self.pixel_widths[tel_id] def get_pixel_coords(self, tel_id): '''Get pixel coords in telescope frame for telescope with id `tel_id`''' # memoize transformation if tel_id not in self.pixels_in_tel_frame: telescope = self.source.subarray.tel[tel_id] cam = telescope.camera.geometry camera_frame = CameraFrame( focal_length=telescope.optics.equivalent_focal_length, rotation=cam.cam_rotation, ) cam_coords = SkyCoord(x=cam.pix_x, y=cam.pix_y, frame=camera_frame) tel_coord = cam_coords.transform_to(TelescopeFrame()) self.pixels_in_tel_frame[tel_id] = tel_coord coords = self.pixels_in_tel_frame[tel_id] return coords.fov_lon, coords.fov_lat def finish(self): Provenance().add_output_file( self.output, role='muon_efficiency_parameters', ) self.writer.close()
class MuonDisplayerTool(Tool): name = 'ctapipe-display-muons' description = t.Unicode(__doc__) infile = t.Unicode( help='input file name', default=get_dataset('gamma_test_large.simtel.gz') ).tag(config=True) outfile = t.Unicode(help='output file name', default=None).tag(config=True) display = t.Bool( help='display the camera events', default=False ).tag(config=True) classes = t.List([CameraCalibrator,]) aliases = t.Dict({'infile': 'MuonDisplayerTool.infile', 'outfile': 'MuonDisplayerTool.outfile', 'display' : 'MuonDisplayerTool.display' }) def setup(self): self.calib = CameraCalibrator(config=self.config, tool=self) def start(self): output_parameters = {'MuonEff': [], 'ImpactP': [], 'RingWidth': []} numev = 0 num_muons_found = 0 for event in hessio_event_source(self.infile): self.log.info("Event Number: %d, found %d muons", numev, num_muons_found) self.calib.calibrate(event) muon_evt = analyze_muon_event(event) numev += 1 if not muon_evt['MuonIntensityParams']: # No telescopes contained a good muon continue else: if self.display: plot_muon_event(event, muon_evt) for tid in muon_evt['TelIds']: idx = muon_evt['TelIds'].index(tid) if not muon_evt['MuonIntensityParams'][idx]: continue self.log.info("** Muon params: %s", muon_evt[idx]) output_parameters['MuonEff'].append( muon_evt['MuonIntensityParams'][idx].optical_efficiency_muon ) output_parameters['ImpactP'].append( muon_evt['MuonIntensityParams'][idx].impact_parameter.value ) output_parameters['RingWidth'].append( muon_evt['MuonIntensityParams'][idx].ring_width.value ) print_muon(muon_evt, printer=self.log.info) num_muons_found += 1 t = Table(output_parameters) t['ImpactP'].unit = 'm' t['RingWidth'].unit = 'deg' if self.outfile: t.write(self.outfile)
class MuonDisplayerTool(Tool): name = 'ctapipe-display-muons' description = t.Unicode(__doc__) infile = t.Unicode( help='input file name', default=get_dataset('gamma_test_large.simtel.gz')).tag(config=True) outfile = t.Unicode(help='output file name', default=None).tag(config=True) display = t.Bool(help='display the camera events', default=False).tag(config=True) classes = t.List([ CameraCalibrator, ]) aliases = t.Dict({ 'infile': 'MuonDisplayerTool.infile', 'outfile': 'MuonDisplayerTool.outfile', 'display': 'MuonDisplayerTool.display' }) def setup(self): self.calib = CameraCalibrator() def start(self): output_parameters = { 'MuonEff': [], 'ImpactP': [], 'RingWidth': [], 'RingCont': [], 'RingComp': [], 'RingPixComp': [], 'Core_x': [], 'Core_y': [], 'Impact_x_arr': [], 'Impact_y_arr': [], 'MCImpactP': [], 'ImpactDiff': [], 'RingSize': [], 'RingRadius': [], 'NTels': [] } numev = 0 num_muons_found = 0 for event in event_source(self.infile): self.log.info("Event Number: %d, found %d muons", numev, num_muons_found) self.calib.calibrate(event) muon_evt = analyze_muon_event(event) numev += 1 if not muon_evt[ 'MuonIntensityParams']: # No telescopes contained a good muon continue else: if self.display: plot_muon_event(event, muon_evt) ntels = len(event.r0.tels_with_data) #if(len( event.r0.tels_with_data) <= 1): #continue #print("event.r0.tels_with_data", event.r0.tels_with_data) for tid in muon_evt['TelIds']: idx = muon_evt['TelIds'].index(tid) if muon_evt['MuonIntensityParams'][idx] is not None: print("pos, tid", event.inst.subarray.positions[tid], tid) tel_x = event.inst.subarray.positions[tid][0] tel_y = event.inst.subarray.positions[tid][1] core_x = event.mc.core_x # MC Core x core_y = event.mc.core_y # MC Core y rec_impact_x = muon_evt['MuonIntensityParams'][ idx].impact_parameter_pos_x rec_impact_y = muon_evt['MuonIntensityParams'][ idx].impact_parameter_pos_y print("rec_impact_x, rec_impact_y", rec_impact_x, rec_impact_y) print("event.mc.core_x, event.mc.core_y", event.mc.core_x, event.mc.core_y) impact_mc = np.sqrt( np.power(core_x - tel_x, 2) + np.power(core_y - tel_y, 2)) print("simulated impact", impact_mc) # Coordinate transformation to move the impact point to array coordinates rec_impact_x_arr = tel_x + rec_impact_x rec_impact_y_arr = tel_y + rec_impact_y print("impact_x_arr, impact_y_arr", rec_impact_x_arr, rec_impact_y_arr) # Distance between core of the showe and impact parameter impact_diff = np.sqrt( np.power(core_x - rec_impact_x_arr, 2) + np.power(core_y - rec_impact_y_arr, 2)) print("impact_diff ", impact_diff) self.log.info("** Muon params: %s", muon_evt['MuonIntensityParams'][idx]) output_parameters['MuonEff'].append( muon_evt['MuonIntensityParams'] [idx].optical_efficiency_muon) output_parameters['ImpactP'].append( muon_evt['MuonIntensityParams'] [idx].impact_parameter.value) output_parameters['RingWidth'].append( muon_evt['MuonIntensityParams'] [idx].ring_width.value) output_parameters['RingCont'].append( muon_evt['MuonRingParams'][idx].ring_containment) output_parameters['RingComp'].append( muon_evt['MuonIntensityParams'] [idx].ring_completeness) output_parameters['RingPixComp'].append( muon_evt['MuonIntensityParams'] [idx].ring_pix_completeness) output_parameters['Core_x'].append( event.mc.core_x.value) output_parameters['Core_y'].append( event.mc.core_y.value) output_parameters['Impact_x_arr'].append( rec_impact_x_arr.value) output_parameters['Impact_y_arr'].append( rec_impact_y_arr.value) output_parameters['MCImpactP'].append(impact_mc.value) output_parameters['ImpactDiff'].append( impact_diff.value) output_parameters['RingSize'].append( muon_evt['MuonIntensityParams'][idx].ring_size) output_parameters['RingRadius'].append( muon_evt['MuonRingParams'][idx].ring_radius.value) output_parameters['NTels'].append(ntels) print_muon(muon_evt, printer=self.log.info) num_muons_found += 1 t = Table(output_parameters) t['ImpactP'].unit = 'm' t['RingWidth'].unit = 'deg' #t['MCImpactP'].unit = 'm' if self.outfile: t.write(self.outfile)