def test_lin_spline_ax0_ord1(self): """ Fit object spectrum with a low-order cubic spline, rejecting the sky. Check that the pixel rejection is as expected, as well as the values. """ fit1d = fit_1D(self.data, weights=self.weights, function='spline1', order=1, axis=0, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug) fit_vals = fit1d.evaluate() assert_allclose(fit_vals, self.sky, atol=20., rtol=0.02) # Also check that the rejected pixels were as expected (from previous # runs) for the central column: assert_equal(fit1d.mask[:11, 70], False) assert_equal(fit1d.mask[11:21, 70], True) assert_equal(fit1d.mask[21:, 70], False) assert fit1d.regions_pix == ((1, 30), )
def test_chebyshev_ax0_lin(self): """ Fit linear sky background along the slit, rejecting the object spectrum. Require resulting fit to match the true sky model within tolerances that roughly allow for model noise & fitting systematics and check that the expected pixels are rejected. """ fit1d = fit_1D(self.data, weights=self.weights, function='chebyshev', order=1, axis=0, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug) fit_vals = fit1d.evaluate() # diff = abs(fit_vals - self.sky) # tol = 20 + 0.015 * abs(self.sky) # fits.writeto('diff.fits', diff) # fits.writeto('std.fits', 1.5*self.std) # fits.writeto('tol.fits', tol) # fits.writeto('where.fits', (diff > tol).astype(np.int16)) # Stick to numpy.testing for comparing results, even if it gets a bit # convoluted, because it performs extra checks and reports useful # information in case of failure. All of the following comparison # methods work, but I'm still a bit undecided on the optimal criterion # for success/failure: assert_allclose(fit_vals, self.sky, atol=20., rtol=0.015) # assert not np.any(np.abs(fit_vals-self.sky) > 2.*self.std) # assert_array_less(np.abs(fit_vals-self.sky), 2.*self.std) # Also check that the rejected pixels were as expected (from previous # runs) for the central column: assert_equal(fit1d.mask[:12, 70], False) assert_equal(fit1d.mask[12:21, 70], True) assert_equal(fit1d.mask[21:, 70], False)
def test_chebyshev_def_ax_quartic(self): """ Fit object spectrum with Chebyshev polynomial, rejecting the sky. """ fit_vals = fit_1D(self.data, weights=self.weights, function='chebyshev', order=4, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug).evaluate() assert_allclose(fit_vals, self.obj + self.bglev, atol=15., rtol=0.015)
def test_legendre_ax1_quartic(self): """ Fit object spectrum with Legendre polynomial, rejecting the sky. """ fit_vals = fit_1D(self.data, weights=self.weights, function='legendre', order=4, axis=1, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug).evaluate() assert_allclose(fit_vals, self.obj + self.bglev, atol=20., rtol=0.01)
def test_chebyshev_1_model_ax0_lin(self): """ Fit linear sky background along a single Nx1 column, rejecting the object spectrum. """ fit_vals = fit_1D(self.data[:, 70:71], weights=self.weights[:, 70:71], function='chebyshev', order=1, axis=0, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug).evaluate() assert_allclose(fit_vals, self.sky[:, 70:71], atol=10., rtol=0.)
def test_cubic_spline_def_ax_ord3(self): """ Fit object spectrum in transposed lambda-y-x cube with cubic spline, rejecting the sky. """ fit_vals = fit_1D(self.data.T, weights=self.weights.T, function='spline3', order=3, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug).evaluate() assert_allclose(fit_vals, self.obj.T + self.bglev, atol=20., rtol=0.01)
def test_cubic_spline_ax0_ord3(self): """ Fit object spectrum in x-y-lambda cube with cubic spline, rejecting the sky. """ fit_vals = fit_1D(self.data, weights=self.weights, function='spline3', order=3, axis=0, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug).evaluate() assert_allclose(fit_vals, self.obj + self.bglev, atol=20., rtol=0.01)
def test_chebyshev_1_model_def_ax_quartic(self): """ Fit object spectrum in a single 1xN row, rejecting the sky. """ fit_vals = fit_1D(self.data[16:17], weights=self.weights[16:17], function='chebyshev', order=4, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug).evaluate() assert_allclose(fit_vals, self.obj[16:17] + self.bglev, atol=5., rtol=0.015)
def test_cubic_spline_def_ax_ord3(self): """ Fit object spectrum with a low-order cubic spline, rejecting the sky with grow=1. """ fit_vals = fit_1D(self.data, weights=self.weights, function='spline3', order=3, sigma_lower=2.5, sigma_upper=2.5, niter=5, grow=1, plot=debug).evaluate() assert_allclose(fit_vals, self.obj + self.bglev, atol=20., rtol=0.01)
def test_chebyshev_ax1_quartic_grow2(self): """ Fit object spectrum using higher thresholds than the last test to reject sky lines and grow=2 to compensate. Specify axis=1 explicitly, rather than the default of -1 (last axis). """ fit_vals = fit_1D(self.data, weights=self.weights, function='chebyshev', order=4, axis=1, sigma_lower=3.7, sigma_upper=3.7, niter=5, grow=2, plot=debug).evaluate() assert_allclose(fit_vals, self.obj + self.bglev, atol=15., rtol=0.015)
def test_chebyshev_ax0_lin_grow2(self): """ Fit background along the slit, rejecting the object spectrum with grow=2, which for these parameters produces a slightly closer match than grow=0. """ fit_vals = fit_1D(self.data, weights=self.weights, function='chebyshev', order=1, axis=0, sigma_lower=3., sigma_upper=2.3, niter=5, grow=2, plot=debug).evaluate() assert_allclose(fit_vals, self.sky, atol=15., rtol=0.015)
def test_chebyshev_ax0_lin_slices_noiter(self): """ Fit linear sky background along the slit with the object spectrum region excluded by the user and no other rejection (same test as above but passing a list of slice objects rather than a regions string). """ fit1d = fit_1D(self.data, weights=self.weights, function='chebyshev', order=1, axis=0, niter=0, regions=[slice(0, 10), slice(22, 30)], plot=debug) fit_vals = fit1d.evaluate() assert_allclose(fit_vals, self.sky, atol=20., rtol=0.02) assert fit1d.regions_pix == ((1,10),(23,30))
def test_chebyshev_ax0_lin_regions_noiter(self): """ Fit linear sky background along the slit with the object spectrum region excluded by the user and no other rejection. """ fit1d = fit_1D(self.data, weights=self.weights, function='chebyshev', order=1, axis=0, niter=0, regions="1:10,23:30", plot=debug) fit_vals = fit1d.evaluate() assert_allclose(fit_vals, self.sky, atol=20., rtol=0.02) assert fit1d.regions_pix == ((1,10),(23,30))
def test_cubic_spline_ax1_ord3_grow1(self): """ Fit object spectrum in transposed x-lambda-y cube with cubic spline, rejecting the sky with grow=1. """ fit_vals = fit_1D(np.rollaxis(self.data, 0, 2), weights=np.rollaxis(self.weights, 0, 2), function='spline3', order=3, axis=1, sigma_lower=3.5, sigma_upper=3.5, niter=5, grow=1, plot=debug).evaluate() assert_allclose(fit_vals, np.rollaxis(self.obj, 0, 2) + self.bglev, atol=20., rtol=0.01)
def test_chebyshev_single_quartic(self): """ Fit object spectrum, rejecting the sky in a single 1D array. """ fit_vals = fit_1D(self.data[16], weights=self.weights[16], function='chebyshev', order=4, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug).evaluate() assert_allclose(fit_vals, self.obj[16], atol=30., rtol=0.02)
def test_spline(self): # Fit the more-coarsely-sampled Gaussian model: fit1d = fit_1D(self.data_coarse, function='spline3', order=15, niter=0, plot=debug) # Evaluate the fits onto 5x finer sampling: fit_vals = fit1d.evaluate( np.arange(0., self.data_coarse.shape[-1], 0.2) ) # Compare fit values with the 5x sampled version of the original model # (ignoring the equivalent of the end 3 pixels from the input, where # the fit diverges a bit): assert_allclose(fit_vals[:,15:-15], self.data_fine[:,15:-15], atol=0.1)
def test_chebyshev_def_ax_quartic(self): """ Fit object spectrum in transposed lambda-y-x cube with Chebyshev polynomial, rejecting the sky. """ # Here we transpose the input cube before fitting object spectra along # the default last axis, just because that makes more sense than trying # to fit the background with rejection along one spatial axis that is # too short to have clean sky regions. fit_vals = fit_1D(self.data.T, weights=self.weights.T, function='chebyshev', order=4, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug).evaluate() assert_allclose(fit_vals, self.obj.T + self.bglev, atol=20., rtol=0.01)
def test_chebyshev_ax0_quartic(self): """ Fit object spectrum in x-y-lambda cube with Chebyshev polynomial, rejecting the sky. """ fit_vals = fit_1D(self.data, weights=self.weights, function='chebyshev', order=4, axis=0, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug).evaluate() assert_allclose(fit_vals, self.obj, atol=45., rtol=0.015)
def test_cubic_spline_def_ax_ord3_masked(self): """ Fit masked object spectrum with a low-order cubic spline, rejecting the sky with grow=1. """ fit1d = fit_1D(self.masked_data, weights=self.weights, function='spline3', order=3, sigma_lower=2.5, sigma_upper=2.5, niter=5, grow=1, plot=debug) fit_vals = fit1d.evaluate() assert_allclose(fit_vals, self.obj + self.bglev, atol=20., rtol=0.01) # Ensure that masked input values have been passed through to the # output mask by the fitter: assert_equal(fit1d.mask[4:6,80:93], True) assert_equal(fit1d.mask[24:27,24:27], True)
def test_chebyshev_def_ax_quartic_masked(self): """ Fit masked object spectrum with Chebyshev polynomial, rejecting sky. """ fit1d = fit_1D(self.masked_data, weights=self.weights, function='chebyshev', order=4, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug) fit_vals = fit1d.evaluate() assert_allclose(fit_vals, self.obj + self.bglev, atol=20., rtol=0.01) # Ensure that masked input values have been passed through to the # output mask by the fitter: assert_equal(fit1d.mask[4:6,80:93], True) assert_equal(fit1d.mask[24:27,24:27], True) assert fit1d.regions_pix == ((1,140),)
def test_chebyshev_1_model_def_ax_quartic(self): """ Fit object spectrum in a single 1xN row, rejecting the sky. """ fit_vals = fit_1D(self.data[16:17], weights=self.weights[16:17], function='chebyshev', order=4, sigma_lower=2.5, sigma_upper=2.5, niter=5, plot=debug).evaluate() # This should work, but currently fails because fit_1D is returning # a result with shape (140, 1) from (1, 140) inputs. assert_allclose(fit_vals, self.obj[16:17], atol=30., rtol=0.02)
def build_fit_1D(fit1d_params, data, points, weights): """ Create a fit_1D from the given parameter dictionary and x/y/weights Parameters ---------- fit1d_params : dict Dictionary of parameters for the fit_1D data : list X coordinates points : list Y values weights : list weights Returns ------- :class:`~gempy.library.fitting.fit_1D` fitter """ return fit_1D(data, points=points, weights=weights, **fit1d_params)
def subtractOverscan(self, adinputs=None, **params): """ This primitive subtracts the overscan level from the image. The level for each row (currently the primitive requires that the overscan region be a vertical strip) is determined in one of the following ways, according to the *function* and *order* parameters: "poly": a polynomial of degree *order* (1=linear, etc) "spline": using *order* equally-sized cubic spline pieces or, if order=None or 0, a spline that provides a reduced chi^2=1 "none": no function is fit, and the value for each row is determined by the overscan pixels in that row The fitting is done iteratively but, in the first instance, a running median of the rows is calculated and rows that deviate from this median are rejected (and used in place of the actual value if function="none") Parameters ---------- suffix: str suffix to be added to output files niterate: int number of rejection iterations high_reject: float/None number of standard deviations above which to reject high pixels low_reject: float/None number of standard deviations above which to reject low pixels nbiascontam: int/None number of columns adjacent to the illuminated region to reject function: str/None function to fit ("chebyshev" | "spline" | "none") order: int/None order of polynomial fit or number of spline pieces """ log = self.log log.debug(gt.log_message("primitive", self.myself(), "starting")) timestamp_key = self.timestamp_keys[self.myself()] sfx = params["suffix"] fit1d_params = fit_1D.translate_params(params) # We need some of these parameters for pre-processing function = (fit1d_params.pop("function") or "none").lower() lsigma = params["lsigma"] hsigma = params["hsigma"] order = params["order"] nbiascontam = params["nbiascontam"] for ad in adinputs: if ad.phu.get(timestamp_key): log.warning("No changes will be made to {}, since it has " "already been processed by subtractOverscan". format(ad.filename)) continue osec_list = ad.overscan_section() dsec_list = ad.data_section() for ext, osec, dsec in zip(ad, osec_list, dsec_list): x1, x2, y1, y2 = osec.x1, osec.x2, osec.y1, osec.y2 axis = np.argmin([y2 - y1, x2 - x1]) if axis == 1: if x1 > dsec.x1: # Bias on right x1 += nbiascontam x2 -= 1 else: # Bias on left x1 += 1 x2 -= nbiascontam pixels = np.arange(y1, y2) sigma = ext.read_noise() / np.sqrt(x2 - x1) else: if y1 > dsec.y1: # Bias on top y1 += nbiascontam y2 -= 1 else: # Bias on bottom y1 += 1 y2 -= nbiascontam pixels = np.arange(x1, x2) sigma = ext.read_noise() / np.sqrt(y2 - y1) data = np.mean(ext.data[y1:y2, x1:x2], axis=axis) if ext.is_in_adu(): sigma /= ext.gain() # The UnivariateSpline will make reduced-chi^2=1 so it will # fit bad rows. Need to mask these before starting, so use a # running median. Probably a good starting point for all fits. runmed = at.boxcar(data, operation=np.median, size=2) residuals = data - runmed mask = np.logical_or(residuals > hsigma * sigma if hsigma is not None else False, residuals < -lsigma * sigma if lsigma is not None else False) if "spline" in function and order is None: data = np.where(mask, runmed, data) if function == "none": bias = data else: fit1d = fit_1D(np.ma.masked_array(data, mask=mask), points=pixels, weights=np.full_like(data, 1. / sigma), function=function, **fit1d_params) bias = fit1d.evaluate(np.arange(ext.data.shape[1-axis])) sigma = fit1d.rms # using "-=" won't change from int to float if axis == 1: ext.data = ext.data - bias[:, np.newaxis].astype(np.float32) else: ext.data = ext.data - bias.astype(np.float32) ext.hdr.set('OVERSEC', '[{}:{},{}:{}]'.format(x1+1,x2,y1+1,y2), self.keyword_comments['OVERSEC']) ext.hdr.set('OVERSCAN', np.mean(data), self.keyword_comments['OVERSCAN']) ext.hdr.set('OVERRMS', sigma, self.keyword_comments['OVERRMS']) # Timestamp, and update filename gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key) ad.update_filename(suffix=sfx, strip=True) return adinputs
def normalizeFlat(self, adinputs=None, **params): """ This primitive normalizes a GMOS Longslit spectroscopic flatfield in a manner similar to that performed by gsflat in Gemini-IRAF. A cubic spline is fitted along the dispersion direction of each row, separately for each CCD. As this primitive is GMOS-specific, we know the dispersion direction will be along the rows, and there will be 3 CCDs. For Hamamatsu CCDs, the 21 unbinned columns at each CCD edge are masked out, following the procedure in gsflat. TODO: Should we add these in the BPM? Parameters ---------- suffix : str/None suffix to be added to output files center : int/None central row/column for 1D extraction (None => use middle) nsum : int number of rows/columns around center to combine function : str type of function to fit (splineN or polynomial types) order : int/str Order of the spline fit to be performed (can be 3 ints, separated by commas) lsigma : float/None lower rejection limit in standard deviations hsigma : float/None upper rejection limit in standard deviations niter : int maximum number of rejection iterations grow : float/False growth radius for rejected pixels threshold : float threshold (relative to peak) for flagging unilluminated pixels interactive : bool set to activate an interactive preview to fine tune the input parameters """ log = self.log log.debug(gt.log_message("primitive", self.myself(), "starting")) timestamp_key = self.timestamp_keys[self.myself()] # For flexibility, the code is going to pass whatever validated # parameters it gets (apart from suffix and spectral_order) to # the spline fitter suffix = params["suffix"] threshold = params["threshold"] spectral_order = params["order"] all_fp_init = [fit_1D.translate_params(params)] * 3 interactive_reduce = params["interactive"] # Parameter validation should ensure we get an int or a list of 3 ints try: orders = [int(x) for x in spectral_order] except TypeError: orders = [spectral_order] * 3 # capture the per extension order into the fit parameters for order, fp_init in zip(orders, all_fp_init): fp_init["order"] = order for ad in adinputs: xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin() array_info = gt.array_information(ad) is_hamamatsu = 'Hamamatsu' in ad.detector_name(pretty=True) ad_tiled = self.tileArrays([ad], tile_all=False)[0] ad_fitted = astrodata.create(ad.phu) all_fp_init = [] # If the entire row is unilluminated, we want to fit # the pixels but still keep the edges masked for ext in ad_tiled: try: ext.mask ^= (np.bitwise_and.reduce(ext.mask, axis=1) & DQ.unilluminated)[:, None] except TypeError: # ext.mask is None pass else: if is_hamamatsu: ext.mask[:, :21 // xbin] = 1 ext.mask[:, -21 // xbin:] = 1 all_fp_init.append(fit_1D.translate_params(params)) # Parameter validation should ensure we get an int or a list of 3 ints try: orders = [int(x) for x in spectral_order] except TypeError: orders = [spectral_order] * 3 # capture the per extension order into the fit parameters for order, fp_init in zip(orders, all_fp_init): fp_init["order"] = order # Interactive or not if interactive_reduce: # all_X arrays are used to track appropriate inputs for each of the N extensions all_pixels = [] all_domains = [] nrows = ad_tiled[0].shape[0] for ext, order, indices in zip(ad_tiled, orders, array_info.extensions): pixels = np.arange(ext.shape[1]) all_pixels.append(pixels) dispaxis = 2 - ext.dispersion_axis() all_domains.append([0, ext.shape[dispaxis] - 1]) config = self.params[self.myself()] config.update(**params) # Create a 'row' parameter to add to the UI so the user can select the row they # want to fit. reinit_params = [ "row", ] reinit_extras = { "row": RangeField("Row of data to operate on", int, int(nrows / 2), min=1, max=nrows) } # This function is used by the interactive fitter to generate the x,y,weights to use # for each fit. We only want to fit a single row of data interactively, so that we can # be responsive in the UI. The 'row' extra parameter defined above will create a # slider for the user and we will have access to the selected value in the 'extras' # dictionary passed in here. def reconstruct_points(conf, extras): r = min(0, extras['row'] - 1) all_coords = [] for rppixels, rpext in zip(all_pixels, ad_tiled): masked_data = np.ma.masked_array( rpext.data[r], mask=None if rpext.mask is None else rpext.mask[r]) if rpext.variance is None: weights = None else: weights = np.sqrt(at.divide0( 1., rpext.variance[r])) all_coords.append([rppixels, masked_data, weights]) return all_coords visualizer = fit1d.Fit1DVisualizer(reconstruct_points, all_fp_init, config=config, reinit_params=reinit_params, reinit_extras=reinit_extras, tab_name_fmt="CCD {}", xlabel='x', ylabel='y', reinit_live=True, domains=all_domains, title="Normalize Flat", enable_user_masking=False) geminidr.interactive.server.interactive_fitter(visualizer) # The fit models were done on a single row, so we need to # get the parameters that were used in the final fit for # each one, and then rerun it on the full data for that # extension. all_m_final = visualizer.results() for m_final, ext in zip(all_m_final, ad_tiled): masked_data = np.ma.masked_array(ext.data, mask=ext.mask) weights = np.sqrt(at.divide0(1., ext.variance)) fit1d_params = m_final.extract_params() fitted_data = fit_1D(masked_data, weights=weights, **fit1d_params, axis=1).evaluate() # Copy header so we have the _section() descriptors ad_fitted.append(fitted_data, header=ext.hdr) else: for ext, indices, fit1d_params in zip(ad_tiled, array_info.extensions, all_fp_init): masked_data = np.ma.masked_array(ext.data, mask=ext.mask) weights = np.sqrt(at.divide0(1., ext.variance)) fitted_data = fit_1D(masked_data, weights=weights, **fit1d_params, axis=1).evaluate() # Copy header so we have the _section() descriptors ad_fitted.append(fitted_data, header=ext.hdr) # Find the largest spline value for each row across all extensions # and mask pixels below the requested fraction of the peak row_max = np.array([ ext_fitted.data.max(axis=1) for ext_fitted in ad_fitted ]).max(axis=0) # Prevent runtime error in division row_max[row_max == 0] = np.inf for ext_fitted in ad_fitted: ext_fitted.mask = np.where( (ext_fitted.data.T / row_max).T < threshold, DQ.unilluminated, DQ.good).astype(DQ.datatype) for ext_fitted, indices in zip(ad_fitted, array_info.extensions): tiled_arrsec = ext_fitted.array_section() for i in indices: ext = ad[i] arrsec = ext.array_section() slice_ = (slice((arrsec.y1 - tiled_arrsec.y1) // ybin, (arrsec.y2 - tiled_arrsec.y1) // ybin), slice((arrsec.x1 - tiled_arrsec.x1) // xbin, (arrsec.x2 - tiled_arrsec.x1) // xbin)) # Suppress warnings to do with fitted_data==0 # (which create NaNs in variance) with np.errstate(invalid='ignore', divide='ignore'): ext.divide(ext_fitted.nddata[slice_]) np.nan_to_num(ext.data, copy=False, posinf=0, neginf=0) np.nan_to_num(ext.variance, copy=False) # Timestamp and update filename gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key) ad.update_filename(suffix=suffix, strip=True) return adinputs
def addIllumMaskToDQ(self, adinputs=None, suffix=None, illum_mask=None, shift=None, max_shift=20): """ Adds an illumination mask to each AD object. This is only done for full-frame (not Central Spectrum) GMOS spectra, and is calculated by making a model illumination patter from the attached MDF and cross- correlating it with the spatial profile of the data. Parameters ---------- suffix : str suffix to be added to output files illum_mask : str/None name of illumination mask mask (None -> use default) shift : int/None user-defined shift to apply to illumination mask max_shift : int maximum shift (in unbinned pixels) allowable for the cross- correlation """ offset_dict = { ("GMOS-N", "Hamamatsu-N"): 1.5, ("GMOS-N", "e2vDD"): -0.2, ("GMOS-N", "EEV"): 0.7, ("GMOS-S", "Hamamatsu-S"): 5.5, ("GMOS-S", "EEV"): 3.8 } edges = 50 # try to eliminate issues at the very edges log = self.log log.debug(gt.log_message("primitive", self.myself(), "starting")) timestamp_key = self.timestamp_keys[self.myself()] # Do this now for memory management reasons. We'll be creating large # arrays temporarily and don't want the permanent mask arrays to # fragment the free memory. for ad in adinputs: for ext in ad: if ext.mask is None: ext.mask = np.zeros_like(ext.data).astype(DQ.datatype) for ad, illum in zip( *gt.make_lists(adinputs, illum_mask, force_ad=True)): if ad.phu.get(timestamp_key): log.warning( 'No changes will be made to {}, since it has ' 'already been processed by addIllumMaskToDQ'.format( ad.filename)) continue ybin = ad.detector_y_bin() ad_detsec = ad.detector_section() no_bridges = all(detsec.y1 > 1600 and detsec.y2 < 2900 for detsec in ad_detsec) has_48rows = (all(detsec.y2 == 4224 for detsec in ad_detsec) and 'Hamamatsu' in ad.detector_name(pretty=True)) if illum: log.fullinfo("Using {} as illumination mask".format( illum.filename)) final_illum = gt.clip_auxiliary_data(ad, aux=illum, aux_type='bpm', return_dtype=DQ.datatype) for ext, illum_ext in zip(ad, final_illum): if illum_ext is not None: # Ensure we're only adding the unilluminated bit iext = np.where(illum_ext.data > 0, DQ.unilluminated, 0).astype(DQ.datatype) ext.mask |= iext elif not no_bridges: # i.e. there are bridges. try: mdf = ad.MDF except AttributeError: log.warning(f"MDF not found for {ad.filename} - cannot " "add illumination mask.") continue # Default operation for GMOS full-frame LS # Sadly, we cannot do this reliably without concatenating the # arrays and using a big chunk of memory. row_medians = np.percentile(np.concatenate( [ext.data for ext in ad], axis=1), 95, axis=1) row_medians -= at.boxcar(row_medians, size=50 // ybin) # Construct a model of the slit illumination from the MDF # coefficients are from G-IRAF except c0, approx. from data model = np.zeros_like(row_medians, dtype=int) for ypos, ysize in mdf['slitpos_my', 'slitsize_my']: y = ypos + np.array([-0.5, 0.5]) * ysize c0 = offset_dict[ad.instrument(), ad.detector_name(pretty=True)] if ad.instrument() == "GMOS-S": c1, c2, c3 = (0.99911, -1.7465e-5, 3.0494e-7) else: c1, c2, c3 = (0.99591859227, 5.3042211333437e-8, 1.7447902551997e-7) yccd = ((c0 + y * (c1 + y * (c2 + y * c3))) * 1.611444 / ad.pixel_scale() + 0.5 * model.size).astype(int) model[yccd[0]:yccd[1] + 1] = 1 log.stdinfo("Expected slit location from pixels " f"{yccd[0]+1} to {yccd[1]+1}") if shift is None: max_shift = 50 mshift = max_shift // ybin + 2 mshift2 = mshift + edges # model[] indexing avoids reduction in signal as slit # is shifted off the top of the image cntr = model.size - edges - mshift2 - 1 xcorr = correlate(row_medians[edges:-edges], model[mshift2:-mshift2], mode='full')[cntr - mshift:cntr + mshift] # This line avoids numerical errors in the spline fit xcorr -= np.median(xcorr) # This calculates the offsets of each point from the # straight line between its neighbours std = (xcorr[1:-1] - 0.5 * (xcorr + np.roll(xcorr, 2))[2:]).std() xspline = fit_1D(xcorr, function="spline3", order=None, weights=np.full(len(xcorr), 1. / std)).evaluate() yshift = xspline.argmax() - mshift maxima = xspline[1:-1][np.logical_and( np.diff(xspline[:-1]) > 0, np.diff(xspline[1:]) < 0)] significant_maxima = (maxima > xspline.max() - 3 * std).sum() if significant_maxima > 1 or abs( yshift // ybin) > max_shift: log.warning( f"{ad.filename}: cross-correlation peak is" " untrustworthy so not adding illumination " "mask. Please re-run with a specified shift.") yshift = None else: yshift = shift if yshift is not None: log.stdinfo( f"{ad.filename}: Shifting mask by {yshift} pixels") row_mask = np.ones_like(model, dtype=int) if yshift < 0: row_mask[:yshift] = 1 - model[-yshift:] elif yshift > 0: row_mask[yshift:] = 1 - model[:-yshift] else: row_mask[:] = 1 - model for ext in ad: ext.mask |= (row_mask * DQ.unilluminated).astype( DQ.datatype)[:, np.newaxis] if has_48rows: actual_rows = 48 // ybin for ext in ad: ext.mask[:actual_rows] |= DQ.unilluminated # Timestamp and update filename gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key) ad.update_filename(suffix=suffix, strip=True) return adinputs