def _calculate_shift_factor(self, ix: int, spectrum: Spectrum1DEx) -> float: shift_factor = 0 if self.y_shift != 0: if self.y_shift_by_delta_t: mjd = spectrum.mjd delta_t = tm.delta_t_from_jd(tm.jd_from_mjd(mjd), self.eruption_jd) shift_factor = self.y_shift * delta_t else: shift_factor = self.y_shift * ix return shift_factor
def _draw_plot_data(self, ax: Axes, **kwargs): # The payload should be spectral line fitted models containing Gaussian fits to lines. It's a dictionary keyed # on spec_name (b_e_20190828_3 etc.). Each item is an array of astropy CompoundModels for each spectral line. # Each model will have 1+ Gaussian1D models for the line and 1 Polynomial1D for the continuum. # Compound models named H$\\alpha$ etc. Each Gaussian fit$_{1}$ (wide), fit$_{2}$ (narrow) spectra = kwargs["spectra"] all_line_fits = kwargs["line_fits"] reference_jd = self.eruption_jd # The data is in a slightly awkward form, dicts keyed on spectrum with each item an array of line_fits (as we # would generally be interested in one spectrum & associated data at a time). Best to transform into a # more usable form; an array of dictionaries which can be loaded into a DataFrame / tabular data rows = list() columns = ["line", "fit", "delta_t", "velocity", "velocity_err"] for spec_key, line_fits in all_line_fits.items(): mjd = spectra[spec_key].mjd if spec_key in spectra else None delta_t = tm.delta_t_from_jd(tm.jd_from_mjd(mjd), reference_jd=reference_jd) for line_fit in line_fits: if line_fit.name in self.lines: for sub_fit in line_fit: if sub_fit.name in self.lines[line_fit.name] and isinstance(sub_fit, Gaussian1D): lambda_0 = sub_fit.mean.quantity v = fu.calculate_velocity_from_sigma(lambda_0, sub_fit.fwhm).to("km / s") v_err = 0 * v.unit # TODO: uncertainty rows.append({ "line": line_fit.name.replace("\\", "_"), "fit": sub_fit.name.replace("\\", "_"), "delta_t": delta_t, "velocity": v.value, "velocity_err": v_err.value }) df = DataFrame.from_records(rows, columns=columns) # The line name / fit name will be used to look up the corresponding columns for line_name, line in self.lines.items(): line_field = line_name.replace("\\", "_") for fit_name in line: fit_plot_params = line[fit_name] color = fit_plot_params["color"] if "color" in fit_plot_params else "k" label = f"{line_name} {fit_plot_params['label'] if 'label' in fit_plot_params else fit_name}" fit_field = fit_name.replace("\\", "_") df_line = df.query(f"line == '{line_field}' and fit == '{fit_field}'").sort_values(by="delta_t") if len(df_line) > 0: self._plot_points_to_error_bars_on_ax(ax, x_points=df_line["delta_t"], y_points=df_line["velocity"], y_err_points=df_line["velocity_err"], color=color, label=label) self._plot_points_to_lines_on_ax(ax, x_points=df_line["delta_t"], y_points=df_line["velocity"], color=color, line_style="--", alpha=0.3) return
def _met_to_jd(cls, met: float) -> float: # The issue here is that uvotmaghist/uvotsource don't copy over the UTCFINT & MJDREF fields either # in the HDU headers or in fields (FFS why not?). There's also no easy way to tie an observation # back to the source file. However, MJDREFI doesn't change and the change in UTCFINT amounts to 6 seconds # over the course of the observations, so for now I'll use a median value for this and if time I'll revisit. # Reference values from the first and last observations; # sw00011558001uw1_sk.img.gz: UTCFINIT = -23.57402 # sw00011558033uw2_sk.img.gz: UTCFINIT = -23.62186 # sw00045788001uw2_sk.img.gz: UTCFINIT = -17.07584 # sw00045788105um2_sk.img.gz: UTCFINIT = -24.88188 MJDREFI = 51910 UTCFINT = -20.97886 return tm.jd_from_mjd(MJDREFI + ((met + UTCFINT) / 86400))
def _on_query(self, eruption_jd) -> DataFrame: # Make sure we work with a copy - we don't want to modify the underlying data df = self._data.copy() df = df.query("rate_err == rate_err").query("rate_err > 0") print( f"\tafter filtering out rate_err is NaN or 0, {len(df)} rows left") # We create the standard day and day_err fields, relative to passed eruption jd if "jd" in df.columns: df["day"] = tm.delta_t_from_jd(df["jd"], eruption_jd) df["day_err"] = df["jd_plus_err"] elif "mjd" in df.columns: df["day"] = tm.delta_t_from_jd(tm.jd_from_mjd(df["mjd"]), eruption_jd) df["day_err"] = df["mjd_plus_err"] return df
for plot_config in settings["plots"][plot_group_config]: spectra = {} spectral_lines = {} plot_line_fits = {} delta_t = None # Each plot will generally have 2 spectra (blue arm and red arm) for spec_match in plot_config["spectra"]: # Get all the data source whose name start with the key value - mostly each individually specified filtered_data_sources = { k: v for k, v in data_sources.items() if k.startswith(spec_match) } for spec_name, ds in filtered_data_sources.items(): delta_t = tm.delta_t_from_jd(tm.jd_from_mjd(ds.header["MJD"]), eruption_jd) spectrum = ds.query() print( f"\tUsing spectrum '{spec_name}': Delta-t={delta_t:.2f} & max_flux={spectrum.max_flux}" ) spectra[spec_name] = spectrum flux_units = spectrum.flux.unit if "line_fits" in plot_config: # Pick up any fitted spectral lines configured for fit_match in plot_config["line_fits"]: plot_line_fits.update({ k: v for k, v in line_fit_sets.items() if k.startswith(fit_match)
def _get_spectrum_delta_t(self, spectrum: Spectrum1DEx) -> float: mjd = spectrum.mjd return tm.delta_t_from_jd(tm.jd_from_mjd(mjd), self.eruption_jd)