def test_reduced_arcs_contains_stable_wavelength_solution(config):
        """
        Make sure that the wavelength solution gives same results on different
        runs.
        """
        output = os.path.join(config.output_dir, config.filename)
        reference = os.path.join(config.ref_dir, config.filename)

        if not os.path.exists(output):
            pytest.skip("Output file not found: {}".format(output))

        if not os.path.exists(reference):
            pytest.fail("Reference file not found: {}".format(reference))

        ad_out = config.ad
        ad_ref = astrodata.open(reference)

        for ext_out, ext_ref in zip(ad_out, ad_ref):
            model = astromodels.dict_to_chebyshev(
                dict(
                    zip(ext_out.WAVECAL["name"],
                        ext_out.WAVECAL["coefficients"])))

            ref_model = astromodels.dict_to_chebyshev(
                dict(
                    zip(ext_ref.WAVECAL["name"],
                        ext_ref.WAVECAL["coefficients"])))

            x = np.arange(ext_out.shape[1])
            y = model(x)
            ref_y = ref_model(x)

            np.testing.assert_allclose(y, ref_y, rtol=1)

        del ad_out, ad_ref
예제 #2
0
def assert_wavelength_solutions_are_close(ad, ad_ref):
    """
    Checks if two :class:`~astrodata.AstroData` (or any subclass) have the
    wavelength solution.

    Parameters
    ----------
    ad : :class:`astrodata.AstroData` or any subclass
        AstroData object to be checked.
    ad_ref : :class:`astrodata.AstroData` or any subclass
        AstroData object used as reference

    """
    for ext, ext_ref in zip(ad, ad_ref):
        assert hasattr(ext, "WAVECAL")
        wcal = dict(zip(ext.WAVECAL["name"], ext.WAVECAL["coefficients"]))
        wcal = dict_to_chebyshev(wcal)

        assert hasattr(ext_ref, "WAVECAL")
        wcal_ref = dict(
            zip(ad[0].WAVECAL["name"], ad[0].WAVECAL["coefficients"]))
        wcal_ref = dict_to_chebyshev(wcal_ref)

        assert isinstance(wcal, type(wcal_ref))
        assert_allclose(wcal.parameters, wcal_ref.parameters)
def test_regression_determine_wavelength_solution(ad, fwidth, order, min_snr,
                                                  caplog, change_working_dir,
                                                  ref_ad_factory):
    """
    Make sure that the wavelength solution gives same results on different
    runs.
    """
    caplog.set_level(logging.INFO, logger="geminidr")

    with change_working_dir():
        logutils.config(
            file_name='log_regress_{:s}.txt'.format(ad.data_label()))
        p = primitives_gmos_spect.GMOSSpect([ad])
        p.viewer = geminidr.dormantViewer(p, None)

        p.determineWavelengthSolution(
            order=order,
            min_snr=min_snr,
            fwidth=fwidth,
            **determine_wavelength_solution_parameters)

        wcalibrated_ad = p.writeOutputs().pop()

        for record in caplog.records:
            if record.levelname == "WARNING":
                assert "No acceptable wavelength solution found" not in record.message

    ref_ad = ref_ad_factory(wcalibrated_ad.filename)
    table = wcalibrated_ad[0].WAVECAL
    table_ref = ref_ad[0].WAVECAL

    model = astromodels.dict_to_chebyshev(
        dict(zip(table["name"], table["coefficients"])))

    ref_model = astromodels.dict_to_chebyshev(
        dict(zip(table_ref["name"], table_ref["coefficients"])))

    x = np.arange(wcalibrated_ad[0].shape[1])
    wavelength = model(x)
    ref_wavelength = ref_model(x)

    pixel_scale = wcalibrated_ad[0].pixel_scale()  # arcsec / px
    slit_size_in_arcsec = float(wcalibrated_ad[0].focal_plane_mask().replace(
        'arcsec', ''))
    slit_size_in_px = slit_size_in_arcsec / pixel_scale
    dispersion = abs(
        wcalibrated_ad[0].dispersion(asNanometers=True))  # nm / px

    tolerance = 0.5 * (slit_size_in_px * dispersion)
    np.testing.assert_allclose(wavelength, ref_wavelength, rtol=tolerance)
def test_determine_distortion_comparing_modeled_arrays(ad, ad_ref):
    """
    Runs the `determineDistorion` primitive on a preprocessed data and compare
    its model with the one in the reference file. The distortion model needs to
    be reconstructed because different coefficients might return same results.
    """
    assert ad.filename == ad_ref.filename

    table = ad[0].FITCOORD
    model_dict = dict(zip(table['name'], table['coefficients']))
    model = astromodels.dict_to_chebyshev(model_dict)

    ref_table = ad_ref[0].FITCOORD
    ref_model_dict = dict(zip(ref_table['name'], ref_table['coefficients']))
    ref_model = astromodels.dict_to_chebyshev(ref_model_dict)

    X, Y = np.mgrid[:ad[0].shape[0], :ad[0].shape[1]]

    np.testing.assert_allclose(model(X, Y), ref_model(X, Y), atol=1)
예제 #5
0
def test_regression_for_determine_distortion_using_fitcoord_table(
        ad, change_working_dir, ref_ad_factory):
    """
    Runs the `determineDistortion` primitive on a preprocessed data and compare
    its model with the one in the reference file. The distortion model needs to
    be reconstructed because different coefficients might return same results.

    Parameters
    ----------
    ad : pytest.fixture (AstroData)
        Fixture that reads the filename and loads as an AstroData object.
    change_working_dir : pytest.fixture
        Fixture that changes the working directory
        (see :mod:`astrodata.testing`).
    reference_ad : pytest.fixture
        Fixture that contains a function used to load the reference AstroData
        object (see :mod:`recipe_system.testing`).
    """
    with change_working_dir():
        logutils.config(file_name='log_fitcoord_{:s}.txt'.format(ad.data_label()))
        p = primitives_gmos_spect.GMOSSpect([ad])
        p.viewer = geminidr.dormantViewer(p, None)
        p.determineDistortion(**fixed_parameters_for_determine_distortion)
        distortion_determined_ad = p.writeOutputs().pop()

    ref_ad = ref_ad_factory(distortion_determined_ad.filename)

    table = ad[0].FITCOORD
    model_dict = dict(zip(table['name'], table['coefficients']))
    model = astromodels.dict_to_chebyshev(model_dict)

    ref_table = ref_ad[0].FITCOORD
    ref_model_dict = dict(zip(ref_table['name'], ref_table['coefficients']))
    ref_model = astromodels.dict_to_chebyshev(ref_model_dict)

    X, Y = np.mgrid[:ad[0].shape[0], :ad[0].shape[1]]

    np.testing.assert_allclose(model(X, Y), ref_model(X, Y), atol=1)
예제 #6
0
def assert_have_same_distortion(ad, ad_ref):
    """
    Checks if two :class:`~astrodata.AstroData` (or any subclass) have the
    same distortion.

    Parameters
    ----------
    ad : :class:`astrodata.AstroData`
        AstroData object to be checked.
    ad_ref : :class:`astrodata.AstroData`
        AstroData object used as reference

    """
    for ext, ext_ref in zip(ad, ad_ref):
        distortion = dict(
            zip(ext.FITCOORD["name"], ext.FITCOORD["coefficients"]))
        distortion = dict_to_chebyshev(distortion)

        distortion_ref = dict(
            zip(ext_ref.FITCOORD["name"], ext_ref.FITCOORD["coefficients"]))
        distortion_ref = dict_to_chebyshev(distortion_ref)

        assert isinstance(distortion, type(distortion_ref))
        assert_allclose(distortion.parameters, distortion_ref.parameters)
예제 #7
0
def rebuild_distortion_model(ext):
    """
    Helper function to recover the distortion model from the coefficients stored
    in the `ext.FITCOORD` attribute.

    Parameters
    ----------
    ext : astrodata extension
        Input astrodata extension which contains a `.FITCOORD` with the
        coefficients that can be used to reconstruct the distortion model.

    Returns
    -------
    :class:`~astropy.modeling.models.Model`
        Model that receives 2D data and return a 1D array.
    """
    model = astromodels.dict_to_chebyshev(
        dict(zip(ext.FITCOORD["name"], ext.FITCOORD["coefficients"]))
    )

    return model
def do_plots(ad):
    """
    Generate diagnostic plots.

    Parameters
    ----------
    ad : astrodata
    """
    output_dir = ("./plots/geminidr/gmos/"
                  "test_gmos_spect_ls_determine_wavelength_solution")
    os.makedirs(output_dir, exist_ok=True)

    name, _ = os.path.splitext(ad.filename)
    grating = ad.disperser(pretty=True)
    bin_x = ad.detector_x_bin()
    bin_y = ad.detector_y_bin()
    central_wavelength = ad.central_wavelength() * 1e9  # in nanometers

    package_dir = os.path.dirname(primitives_gmos_spect.__file__)
    arc_table = os.path.join(package_dir, "lookups", "CuAr_GMOS.dat")
    arc_lines = np.loadtxt(arc_table, usecols=[0]) / 10.0

    for ext_num, ext in enumerate(ad):

        if not hasattr(ext, "WAVECAL"):
            continue

        peaks = ext.WAVECAL["peaks"] - 1  # ToDo: Refactor peaks to be 0-indexed
        wavelengths = ext.WAVECAL["wavelengths"]

        wavecal_model = astromodels.dict_to_chebyshev(
            dict(zip(ext.WAVECAL["name"], ext.WAVECAL["coefficients"])))

        middle = ext.data.shape[0] // 2
        sum_size = 10
        r1 = middle - sum_size // 2
        r2 = middle + sum_size // 2

        mask = np.round(np.average(ext.mask[r1:r2], axis=0)).astype(int)
        data = np.ma.masked_where(mask > 0, np.sum(ext.data[r1:r2], axis=0))
        data = (data - data.min()) / data.ptp()

        # -- Plot lines --
        fig, ax = plt.subplots(dpi=150,
                               num="{:s}_{:d}_{:s}_{:.0f}".format(
                                   name, ext_num, grating, central_wavelength))

        w = wavecal_model(np.arange(data.size))

        arcs = [
            ax.vlines(line, 0, 1, color="k", alpha=0.25) for line in arc_lines
        ]
        wavs = [
            ax.vlines(peak, 0, 1, color="r", ls="--", alpha=0.25)
            for peak in wavecal_model(peaks)
        ]

        plot, = ax.plot(w, data, "k-", lw=0.75)

        ax.legend((plot, arcs[0], wavs[0]),
                  ("Normalized Data", "Reference Lines", "Matched Lines"))

        x0, x1 = wavecal_model([0, data.size])
        ax.grid(alpha=0.1)
        ax.set_xlim(x0, x1)
        ax.set_xlabel("Wavelength [nm]")
        ax.set_ylabel("Normalized intensity")
        ax.set_title("Wavelength Calibrated Spectrum for\n"
                     "{:s}\n obtained with {:s} at {:.0f} nm".format(
                         name, grating, central_wavelength))

        if x0 > x1:
            ax.invert_xaxis()

        fig_name = os.path.join(
            output_dir,
            "{:s}_{:d}_{:s}_{:.0f}.png".format(name, ext_num, grating,
                                               central_wavelength))

        fig.savefig(fig_name)
        del fig, ax

        # -- Plot non-linear components ---
        fig, ax = plt.subplots(
            dpi=150,
            num="{:s}_{:d}_{:s}_{:.0f}_non_linear_comps".format(
                name, ext_num, grating, central_wavelength))

        non_linear_model = wavecal_model.copy()
        _ = [setattr(non_linear_model, "c{}".format(k), 0) for k in [0, 1]]
        residuals = wavelengths - wavecal_model(peaks)

        p = np.linspace(min(peaks), max(peaks), 1000)
        ax.plot(wavecal_model(p),
                non_linear_model(p),
                "C0-",
                label="Generic Representation")
        ax.plot(wavecal_model(peaks),
                non_linear_model(peaks) + residuals,
                "ko",
                label="Non linear components and residuals")

        ax.legend()
        ax.grid(alpha=0.25)
        ax.set_xlabel("Wavelength [nm]")
        ax.set_title("Non-linear components for\n"
                     "{:s} obtained with {:s} at {:.0f}".format(
                         name, grating, central_wavelength))

        fig_name = os.path.join(
            output_dir, "{:s}_{:d}_{:s}_{:.0f}_non_linear_comps.png".format(
                name, ext_num, grating, central_wavelength))

        fig.savefig(fig_name)
        del fig, ax

        # -- Plot Wavelength Solution Residuals ---
        fig, ax = plt.subplots(dpi=150,
                               num="{:s}_{:d}_{:s}_{:.0f}_residuals".format(
                                   name, ext_num, grating, central_wavelength))

        ax.plot(wavelengths, wavelengths - wavecal_model(peaks), "ko")

        ax.grid(alpha=0.25)
        ax.set_xlabel("Wavelength [nm]")
        ax.set_ylabel("Residuum [nm]")
        ax.set_title("Wavelength Calibrated Residuum for\n"
                     "{:s} obtained with {:s} at {:.0f}".format(
                         name, grating, central_wavelength))

        fig_name = os.path.join(
            output_dir, "{:s}_{:d}_{:s}_{:.0f}_residuals.png".format(
                name, ext_num, grating, central_wavelength))

        fig.savefig(fig_name)

    # -- Create artifacts ---
    if "BUILD_ID" in os.environ:
        branch_name = os.environ["BRANCH_NAME"].replace("/", "_")
        build_number = int(os.environ["BUILD_NUMBER"])

        tar_name = os.path.join(
            output_dir,
            "plots_{:s}_b{:03d}.tar.gz".format(branch_name, build_number))

        with tarfile.open(tar_name, "w:gz") as tar:
            for _file in glob.glob(os.path.join(output_dir, "*.png")):
                tar.add(name=_file, arcname=os.path.basename(_file))

        target_dir = "./plots/"
        target_file = os.path.join(target_dir, os.path.basename(tar_name))

        os.makedirs(target_dir, exist_ok=True)
        os.rename(tar_name, target_file)
예제 #9
0
    def plotSpectraForQA(self, adinputs=None, **params):
        """
        Converts AstroData containing extracted spectra into a JSON object. Then,
        push it to the Automated Dataflow Coordination Center (ADCC) Server
        (see notes below) using a POST request.

        This will allow the spectra to be visualized using the QAP SpecViewer
        web browser client.

        Notes
        -----
        This primitive only works if the (ADCC) Server is running locally.

        Parameters
        ----------
        adinputs : list of :class:`~astrodata.AstroData`
            Input data containing extracted spectra.
        url : str
            URL address to the ADCC server.

        Returns
        -------
        list of :class:`~astrodata.AstroData`
            Data used for plotting.
        """
        url = params["url"]

        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))

        log.stdinfo('Number of input file(s): {}'.format(len(adinputs)))

        spec_packs = []

        for ad in adinputs:

            log.stdinfo('Reading {} aperture(s) from file: {}'.format(
                len(ad), ad.filename))

            timestamp = time.time()

            if 'NCOMBINE' in ad.phu:
                is_stack = ad.phu['NCOMBINE'] > 1
                stack_size = ad.phu['NCOMBINE']
            else:
                is_stack = False
                stack_size = 1

            group_id = ad.group_id().split('_[')[0]
            group_id += ad.group_id().split(']')[1]

            spec_pack = {
                "apertures": [],
                "data_label": ad.data_label(),
                "filename": ad.filename,
                "group_id": group_id,
                "is_stack": is_stack,
                "stack_size": stack_size,
                "metadata": [],
                "msgtype": "specjson",
                "pixel_scale": ad.pixel_scale(),
                "program_id": ad.program_id(),
                "timestamp": timestamp,
            }

            for i, ext in enumerate(ad):
                data = ext.data
                stddev = np.sqrt(ext.variance)

                if data.ndim > 1:
                    raise TypeError(
                        "Expected 1D data. Found {:d}D data: {:s}".format(
                            data.ndim, ad.filename))

                if hasattr(ext, 'WAVECAL'):

                    wcal_model = astromodels.dict_to_chebyshev(
                        dict(
                            zip(
                                ext.WAVECAL["name"],
                                ext.WAVECAL["coefficients"]
                            )
                        )
                    )

                    wavelength = wcal_model(np.arange(data.size, dtype=float))
                    w_dispersion = ext.hdr["CDELT1"]
                    w_units = ext.hdr["CUNIT1"]

                elif "CDELT1" in ext.hdr:

                    wavelength = (
                        ext.hdr["CRVAL1"] + ext.hdr["CDELT1"] * (
                            np.arange(data.size, dtype=float)
                            + 1 - ext.hdr["CRPIX1"]))

                    w_dispersion = ext.hdr["CDELT1"]
                    w_units = ext.hdr["CUNIT1"]

                else:
                    w_units = "px"
                    w_dispersion = 1

                # Clean up bad data
                mask = np.logical_not(np.ma.masked_invalid(data).mask)

                wavelength = wavelength[mask]
                data = data[mask]
                stddev = stddev[mask]

                # Round and convert data/stddev to int to minimize data transfer load
                wavelength = np.round(wavelength, decimals=3)
                data = np.round(data)
                stddev = np.round(stddev)

                _intensity = [[w, int(d)] for w, d in zip(wavelength, data)]
                _stddev = [[w, int(s)] for w, s in zip(wavelength, stddev)]

                center = np.round(ext.hdr["XTRACTED"])
                lower = np.round(ext.hdr["XTRACTLO"])
                upper = np.round(ext.hdr["XTRACTHI"])

                aperture = {
                    "center": center,
                    "lower": lower,
                    "upper": upper,
                    "dispersion": w_dispersion,
                    "wavelength_units": w_units,
                    "intensity": _intensity,
                    "stddev": _stddev,
                }

                spec_pack["apertures"].append(aperture)

                log.stdinfo(' Aperture center: {}, Lower: {}, Upper: {}'.format(
                    center, lower, upper))

            spec_packs.append(spec_pack)

            spec_packs_json = json.dumps(spec_packs)

            with open("spec_data.json", 'w') as json_buffer:
                json.dump(spec_packs, json_buffer)

            # Convert string to bytes
            spec_packs_json = spec_packs_json.encode("utf-8")

            try:
                log.stdinfo('Sending data to QA SpecViewer')
                post_request = urllib.request.Request(url)
                postr = urllib.request.urlopen(post_request, spec_packs_json)
                postr.read()
                postr.close()
                log.stdinfo('Success.')

            except urllib.error.URLError:
                log.warning('Failed to connect to ADCC Server.\n'
                            'Make sure it is up and running.')

        return adinputs
예제 #10
0
    def applyQECorrection(self, adinputs=None, **params):
        """
        This primitive applies a wavelength-dependent QE correction to
        a 2D spectral image, based on the wavelength solution of an
        associated processed_arc.

        It is only designed to work on FLATs, and therefore unmosaicked data.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        sfx = params["suffix"]
        arc = params["arc"]

        # Get a suitable arc frame (with distortion map) for every science AD
        if arc is None:
            self.getProcessedArc(adinputs, refresh=False)
            arc_list = self._get_cal(adinputs, 'processed_arc')
        else:
            arc_list = arc

        distort_model = models.Identity(2)

        for ad, arc in zip(*gt.make_lists(adinputs, arc_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning(
                    "No changes will be made to {}, since it has "
                    "already been processed by applyQECorrection".format(
                        ad.filename))
                continue

            if 'e2v' in ad.detector_name(pretty=True):
                log.warning("{} has the e2v CCDs, so no QE correction "
                            "is necessary".format(ad.filename))
                continue

            # Determines whether to multiply or divide by QE correction
            is_flat = 'FLAT' in ad.tags

            # If the arc's binning doesn't match, we may still be able to
            # fall back to the approximate solution
            xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()
            if arc is not None and (arc.detector_x_bin() != xbin
                                    or arc.detector_y_bin() != ybin):
                log.warning(
                    "Science frame {} and arc {} have different binnings,"
                    "so cannot use arc".format(ad.filename, arc.filename))
                arc = None

            # OK, we definitely want to try to do this, get a wavelength solution
            try:
                wavecal = arc[0].WAVECAL
            except (TypeError, AttributeError):
                wave_model = None
            else:
                model_dict = dict(zip(wavecal['name'],
                                      wavecal['coefficients']))
                wave_model = astromodels.dict_to_chebyshev(model_dict)
                if not isinstance(wave_model, models.Chebyshev1D):
                    log.warning("Problem reading wavelength solution from arc "
                                "{}".format(arc.filename))

            if wave_model is None:
                if 'sq' in self.mode:
                    raise OSError("No wavelength solution for {}".format(
                        ad.filename))
                else:
                    log.warning("Using approximate wavelength solution for "
                                "{}".format(ad.filename))

            try:
                fitcoord = arc[0].FITCOORD
            except (TypeError, AttributeError):
                # distort_model already has Identity inverse so nothing required
                pass
            else:
                # TODO: This is copied from determineDistortion() and will need
                # to be refactored out. Or we might be able to simply replace it
                # with a gWCS.pixel_to_world() call
                model_dict = dict(
                    zip(fitcoord['inv_name'], fitcoord['inv_coefficients']))
                m_inverse = astromodels.dict_to_chebyshev(model_dict)
                if not isinstance(m_inverse, models.Chebyshev2D):
                    log.warning("Problem reading distortion model from arc "
                                "{}".format(arc.filename))
                else:
                    distort_model.inverse = models.Mapping(
                        (0, 1, 1)) | (m_inverse & models.Identity(1))

            if distort_model.inverse == distort_model:  # Identity(2)
                if 'sq' in self.mode:
                    raise OSError("No distortion model for {}".format(
                        ad.filename))
                else:
                    log.warning(
                        "Proceeding without a disortion correction for "
                        "{}".format(ad.filename))

            ad_detsec = ad.detector_section()
            adg = transform.create_mosaic_transform(ad, geotable)
            if arc is not None:
                arc_detsec = arc.detector_section()[0]
                shifts = [
                    c1 - c2 for c1, c2 in zip(
                        np.array(ad_detsec).min(axis=0), arc_detsec)
                ]
                xshift, yshift = shifts[0] / xbin, shifts[2] / ybin  # x1, y1
                if xshift or yshift:
                    log.stdinfo("Found a shift of ({},{}) pixels between "
                                "{} and the calibration.".format(
                                    xshift, yshift, ad.filename))
                add_shapes, add_transforms = [], []
                for (arr, trans) in adg:
                    # Try to work out shape of this Block in the unmosaicked
                    # arc, and then apply a shift to align it with the
                    # science Block before applying the same transform.
                    if xshift == 0:
                        add_shapes.append(
                            ((arc_detsec.y2 - arc_detsec.y1) // ybin,
                             arr.shape[1]))
                    else:
                        add_shapes.append(
                            (arr.shape[0],
                             (arc_detsec.x2 - arc_detsec.x1) // xbin))
                    t = transform.Transform(
                        models.Shift(-xshift) & models.Shift(-yshift))
                    t.append(trans)
                    add_transforms.append(t)
                adg.calculate_output_shape(
                    additional_array_shapes=add_shapes,
                    additional_transforms=add_transforms)
                origin_shift = models.Shift(-adg.origin[1]) & models.Shift(
                    -adg.origin[0])
                for t in adg.transforms:
                    t.append(origin_shift)

            # Irrespective of arc or not, apply the distortion model (it may
            # be Identity), recalculate output_shape and reset the origin
            for t in adg.transforms:
                t.append(distort_model.copy())
            adg.calculate_output_shape()
            adg.reset_origin()

            # Now we know the shape of the output, we can construct the
            # approximate wavelength solution; ad.dispersion() returns a list!
            if wave_model is None:
                wave_model = (
                    models.Shift(-0.5 * adg.output_shape[1])
                    | models.Scale(ad.dispersion(asNanometers=True)[0])
                    | models.Shift(ad.central_wavelength(asNanometers=True)))

            for ccd, (block, trans) in enumerate(adg, start=1):
                if ccd == 2:
                    continue
                for ext, corner in zip(block, block.corners):
                    ygrid, xgrid = np.indices(ext.shape)
                    xgrid += corner[1]  # No need for ygrid
                    xnew = trans(xgrid, ygrid)[0]
                    # Some unit-based stuff here to prepare for gWCS
                    waves = wave_model(xnew) * u.nm
                    try:
                        qe_correction = qeModel(ext)(
                            (waves / u.nm).to(u.dimensionless_unscaled).value)
                    except TypeError:  # qeModel() returns None
                        msg = "No QE correction found for {}:{}".format(
                            ad.filename, ext.hdr['EXTVER'])
                        if 'sq' in self.mode:
                            raise ValueError(msg)
                        else:
                            log.warning(msg)
                    log.fullinfo(
                        "Mean relative QE of EXTVER {} is {:.5f}".format(
                            ext.hdr['EXTVER'], qe_correction.mean()))
                    if not is_flat:
                        qe_correction = 1. / qe_correction
                    qe_correction[qe_correction < 0] = 0
                    qe_correction[qe_correction > 10] = 0
                    ext.multiply(qe_correction)

            # Timestamp and update the filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)

        return adinputs