Exemple #1
0
    def get_specobjs(self):
        """Get the updated version of SpecObjs

        Returns:
            SpecObjs: SpecObjs Class
        """
        if self._use_updates:
            msgs.work("Have not updated SpecObjs yet")
            return self.specobjs
        else:
            return None
Exemple #2
0
def flexure_obj_oldbuggyversion(specobjs,
                                maskslits,
                                method,
                                sky_spectrum,
                                sky_file=None,
                                mxshft=None):
    """Correct wavelengths for flexure, object by object

    Parameters
    ----------
    method : str
        Options are: 'boxcar' (recommended) or 'slitpix'.

    Returns
    -------
    flex_list: list
        list of dicts containing flexure results.  Aligned with
        specobjs.  Filled with a basically empty dict if the slit is
        skipped or there is no object

    """
    msgs.work("Consider doing 2 passes in flexure as in LowRedux")
    # Load Archive
    #    skyspec_fil, arx_sky = flexure_archive(spectrograph=spectrograph, skyspec_fil=skyspec_fil)

    # Loop on objects
    flex_list = []

    gdslits = np.where(~maskslits)[0]
    for sl in range(len(specobjs)):
        # Reset
        flex_dict = dict(polyfit=[],
                         shift=[],
                         subpix=[],
                         corr=[],
                         corr_cen=[],
                         spec_file=sky_file,
                         smooth=[],
                         arx_spec=[],
                         sky_spec=[])
        if sl not in gdslits:
            flex_list.append(flex_dict.copy())
            continue
        msgs.info(
            "Working on flexure in slit (if an object was detected): {:d}".
            format(sl))
        for specobj in specobjs[sl]:  # for convenience
            if specobj is None:
                continue

            # Using boxcar
            if method in ['boxcar', 'slitcen']:
                sky_wave = specobj.BOX_WAVE  #.to('AA').value
                sky_flux = specobj.BOX_COUNTS_SKY
            else:
                msgs.error(
                    "Not ready for this flexure method: {}".format(method))

            # Generate 1D spectrum for object
            obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sky_wave, sky_flux))

            # Calculate the shift
            fdict = flex_shift(obj_sky, sky_spectrum, mxshft=mxshft)

            # Simple interpolation to apply
            npix = len(sky_wave)
            x = np.linspace(0., 1., npix)
            # Apply
            for attr in ['boxcar', 'optimal']:
                if not hasattr(specobj, attr):
                    continue
                if 'WAVE' in getattr(specobj, attr).keys():
                    msgs.info(
                        "Applying flexure correction to {0:s} extraction for object:"
                        .format(attr) + msgs.newline() +
                        "{0:s}".format(str(specobj)))
                    f = interpolate.interp1d(x,
                                             sky_wave,
                                             bounds_error=False,
                                             fill_value="extrapolate")
                    getattr(specobj, attr)['WAVE'] = f(x + fdict['shift'] /
                                                       (npix - 1)) * units.AA
            # Shift sky spec too
            cut_sky = fdict['sky_spec']
            x = np.linspace(0., 1., cut_sky.npix)
            f = interpolate.interp1d(x,
                                     cut_sky.wavelength.value,
                                     bounds_error=False,
                                     fill_value="extrapolate")
            twave = f(x + fdict['shift'] / (cut_sky.npix - 1)) * units.AA
            new_sky = xspectrum1d.XSpectrum1D.from_tuple((twave, cut_sky.flux))

            # Update dict
            for key in [
                    'polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth',
                    'arx_spec'
            ]:
                flex_dict[key].append(fdict[key])
            flex_dict['sky_spec'].append(new_sky)
        flex_list.append(flex_dict.copy())
    return flex_list
Exemple #3
0
def flex_shift(obj_skyspec, arx_skyspec, mxshft=20):
    """ Calculate shift between object sky spectrum and archive sky spectrum

    Parameters
    ----------
    obj_skyspec
    arx_skyspec

    Returns
    -------
    flex_dict: dict
      Contains flexure info
    """

    # TODO None of these routines should have dependencies on XSpectrum1d!

    # Determine the brightest emission lines
    msgs.warn("If we use Paranal, cut down on wavelength early on")
    arx_amp, arx_amp_cont, arx_cent, arx_wid, _, arx_w, arx_yprep, nsig = arc.detect_lines(
        arx_skyspec.flux.value)
    obj_amp, obj_amp_cont, obj_cent, obj_wid, _, obj_w, obj_yprep, nsig_obj = arc.detect_lines(
        obj_skyspec.flux.value)

    # Keep only 5 brightest amplitude lines (xxx_keep is array of
    # indices within arx_w of the 5 brightest)
    arx_keep = np.argsort(arx_amp[arx_w])[-5:]
    obj_keep = np.argsort(obj_amp[obj_w])[-5:]

    # Calculate wavelength (Angstrom per pixel)
    arx_disp = np.append(
        arx_skyspec.wavelength.value[1] - arx_skyspec.wavelength.value[0],
        arx_skyspec.wavelength.value[1:] - arx_skyspec.wavelength.value[:-1])
    #arx_disp = (np.amax(arx_sky.wavelength.value)-np.amin(arx_sky.wavelength.value))/arx_sky.wavelength.size
    obj_disp = np.append(
        obj_skyspec.wavelength.value[1] - obj_skyspec.wavelength.value[0],
        obj_skyspec.wavelength.value[1:] - obj_skyspec.wavelength.value[:-1])
    #obj_disp = (np.amax(obj_sky.wavelength.value)-np.amin(obj_sky.wavelength.value))/obj_sky.wavelength.size

    # Calculate resolution (lambda/delta lambda_FWHM)..maybe don't need
    # this? can just use sigmas
    arx_idx = (arx_cent + 0.5).astype(
        np.int)[arx_w][arx_keep]  # The +0.5 is for rounding
    arx_res = arx_skyspec.wavelength.value[arx_idx]/\
              (arx_disp[arx_idx]*(2*np.sqrt(2*np.log(2)))*arx_wid[arx_w][arx_keep])
    obj_idx = (obj_cent + 0.5).astype(
        np.int)[obj_w][obj_keep]  # The +0.5 is for rounding
    obj_res = obj_skyspec.wavelength.value[obj_idx]/ \
              (obj_disp[obj_idx]*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep])
    #obj_res = (obj_sky.wavelength.value[0]+(obj_disp*obj_cent[obj_w][obj_keep]))/(
    #    obj_disp*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep])

    if not np.all(np.isfinite(obj_res)):
        msgs.warn(
            'Failed to measure the resolution of the object spectrum, likely due to error '
            'in the wavelength image.')
        return None
    msgs.info("Resolution of Archive={0} and Observation={1}".format(
        np.median(arx_res), np.median(obj_res)))

    # Determine sigma of gaussian for smoothing
    arx_sig2 = np.power(arx_disp[arx_idx] * arx_wid[arx_w][arx_keep], 2)
    obj_sig2 = np.power(obj_disp[obj_idx] * obj_wid[obj_w][obj_keep], 2)

    arx_med_sig2 = np.median(arx_sig2)
    obj_med_sig2 = np.median(obj_sig2)

    if obj_med_sig2 >= arx_med_sig2:
        smooth_sig = np.sqrt(obj_med_sig2 - arx_med_sig2)  # Ang
        smooth_sig_pix = smooth_sig / np.median(arx_disp[arx_idx])
        arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix * 2 *
                                               np.sqrt(2 * np.log(2)))
    else:
        msgs.warn("Prefer archival sky spectrum to have higher resolution")
        smooth_sig_pix = 0.
        msgs.warn("New Sky has higher resolution than Archive.  Not smoothing")
        #smooth_sig = np.sqrt(arx_med_sig**2-obj_med_sig**2)

    #Determine region of wavelength overlap
    min_wave = max(np.amin(arx_skyspec.wavelength.value),
                   np.amin(obj_skyspec.wavelength.value))
    max_wave = min(np.amax(arx_skyspec.wavelength.value),
                   np.amax(obj_skyspec.wavelength.value))

    #Smooth higher resolution spectrum by smooth_sig (flux is conserved!)
    #    if np.median(obj_res) >= np.median(arx_res):
    #        msgs.warn("New Sky has higher resolution than Archive.  Not smoothing")
    #obj_sky_newflux = ndimage.gaussian_filter(obj_sky.flux, smooth_sig)
    #    else:
    #tmp = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)
    #        arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2)))
    #arx_sky.flux = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)

    # Define wavelengths of overlapping spectra
    keep_idx = np.where((obj_skyspec.wavelength.value >= min_wave)
                        & (obj_skyspec.wavelength.value <= max_wave))[0]
    #keep_wave = [i for i in obj_sky.wavelength.value if i>=min_wave if i<=max_wave]

    #Rebin both spectra onto overlapped wavelength range
    if len(keep_idx) <= 50:
        msgs.warn("Not enough overlap between sky spectra")
        return None
    else:  #rebin onto object ALWAYS
        keep_wave = obj_skyspec.wavelength[keep_idx]
        arx_skyspec = arx_skyspec.rebin(keep_wave)
        obj_skyspec = obj_skyspec.rebin(keep_wave)
        # Trim edges (rebinning is junk there)
        arx_skyspec.data['flux'][0, :2] = 0.
        arx_skyspec.data['flux'][0, -2:] = 0.
        obj_skyspec.data['flux'][0, :2] = 0.
        obj_skyspec.data['flux'][0, -2:] = 0.

    # Normalize spectra to unit average sky count
    norm = np.sum(obj_skyspec.flux.value) / obj_skyspec.npix
    obj_skyspec.flux = obj_skyspec.flux / norm
    norm2 = np.sum(arx_skyspec.flux.value) / arx_skyspec.npix
    arx_skyspec.flux = arx_skyspec.flux / norm2
    if (norm < 0.):
        msgs.warn("Bad normalization of object in flexure algorithm")
        msgs.warn("Will try the median")
        norm = np.median(obj_skyspec.flux.value)
        if (norm < 0.):
            msgs.warn("Improper sky spectrum for flexure.  Is it too faint??")
            return None
    if (norm2 < 0.):
        msgs.warn(
            'Bad normalization of archive in flexure. You are probably using wavelengths '
            'well beyond the archive.')
        return None

    # Deal with bad pixels
    msgs.work("Need to mask bad pixels")

    # Deal with underlying continuum
    msgs.work("Consider taking median first [5 pixel]")
    everyn = obj_skyspec.npix // 20
    bspline_par = dict(everyn=everyn)
    mask, ct = utils.robust_polyfit(obj_skyspec.wavelength.value,
                                    obj_skyspec.flux.value,
                                    3,
                                    function='bspline',
                                    sigma=3.,
                                    bspline_par=bspline_par)
    obj_sky_cont = utils.func_val(ct, obj_skyspec.wavelength.value, 'bspline')
    obj_sky_flux = obj_skyspec.flux.value - obj_sky_cont
    mask, ct_arx = utils.robust_polyfit(arx_skyspec.wavelength.value,
                                        arx_skyspec.flux.value,
                                        3,
                                        function='bspline',
                                        sigma=3.,
                                        bspline_par=bspline_par)
    arx_sky_cont = utils.func_val(ct_arx, arx_skyspec.wavelength.value,
                                  'bspline')
    arx_sky_flux = arx_skyspec.flux.value - arx_sky_cont

    # Consider sharpness filtering (e.g. LowRedux)
    msgs.work("Consider taking median first [5 pixel]")

    #Cross correlation of spectra
    #corr = np.correlate(arx_skyspec.flux, obj_skyspec.flux, "same")
    corr = np.correlate(arx_sky_flux, obj_sky_flux, "same")

    #Create array around the max of the correlation function for fitting for subpixel max
    # Restrict to pixels within maxshift of zero lag
    lag0 = corr.size // 2
    #mxshft = settings.argflag['reduce']['flexure']['maxshift']
    max_corr = np.argmax(corr[lag0 - mxshft:lag0 + mxshft]) + lag0 - mxshft
    subpix_grid = np.linspace(max_corr - 3., max_corr + 3., 7)

    #Fit a 2-degree polynomial to peak of correlation function. JFH added this if/else to not crash for bad slits
    if np.any(np.isfinite(corr[subpix_grid.astype(np.int)])):
        fit = utils.func_fit(subpix_grid, corr[subpix_grid.astype(np.int)],
                             'polynomial', 2)
        success = True
        max_fit = -0.5 * fit[1] / fit[2]
    else:
        fit = utils.func_fit(subpix_grid, 0.0 * subpix_grid, 'polynomial', 2)
        success = False
        max_fit = 0.0
        msgs.warn('Flexure compensation failed for one of your objects')

    #Calculate and apply shift in wavelength
    shift = float(max_fit) - lag0
    msgs.info("Flexure correction of {:g} pixels".format(shift))
    #model = (fit[2]*(subpix_grid**2.))+(fit[1]*subpix_grid)+fit[0]

    flex_dict = dict(polyfit=fit,
                     shift=shift,
                     subpix=subpix_grid,
                     corr=corr[subpix_grid.astype(np.int)],
                     sky_spec=obj_skyspec,
                     arx_spec=arx_skyspec,
                     corr_cen=corr.size / 2,
                     smooth=smooth_sig_pix,
                     success=success)
    # Return
    return flex_dict
Exemple #4
0
def flexure_obj(specobjs, maskslits, method, sky_file, mxshft=None):
    """Correct wavelengths for flexure, object by object

    Args:
        specobjs (pypeit.specobjs.Specobjs):
        maskslits (ndarray):
            True = masked slit
        method (str)
          'boxcar' -- Recommneded
          'slitpix' --
        sky_file (str):
            Sky file
        mxshft (int, optional):
            Passed to flex_shift()

    Returns:
        list:  list of dicts containing flexure results
            Aligned with specobjs
            Filled with a basically empty dict if the slit is skipped or there is no object
    """
    sv_fdict = None
    msgs.work("Consider doing 2 passes in flexure as in LowRedux")
    # Load Archive
    sky_spectrum = load_sky_spectrum(sky_file)

    nslits = len(maskslits)
    gdslits = np.where(~maskslits)[0]

    # Loop on objects
    flex_list = []

    # Slit/objects to come back to
    return_later_sobjs = []

    # Loop over slits, and then over objects here
    for slit in range(nslits):
        msgs.info(
            "Working on flexure in slit (if an object was detected): {:d}".
            format(slit))
        # TODO -- This only will work for MultiSlit
        indx = specobjs.slitorder_indices(slit)
        this_specobjs = specobjs[indx]
        # Reset
        flex_dict = dict(polyfit=[],
                         shift=[],
                         subpix=[],
                         corr=[],
                         corr_cen=[],
                         spec_file=sky_file,
                         smooth=[],
                         arx_spec=[],
                         sky_spec=[])
        # If no objects on this slit append an empty dictionary
        if slit not in gdslits:
            flex_list.append(flex_dict.copy())
            continue
        for ss, specobj in enumerate(this_specobjs):
            if specobj is None:
                continue
            if len(specobj._data.keys()
                   ) == 1:  # Nothing extracted; only the trace exists
                continue
            msgs.info(
                "Working on flexure for object # {:d}".format(specobj.OBJID) +
                "in slit # {:d}".format(specobj.SLITID))
            # Using boxcar
            if method in ['boxcar', 'slitcen']:
                sky_wave = specobj.BOX_WAVE  #.to('AA').value
                sky_flux = specobj.BOX_COUNTS_SKY
            else:
                msgs.error(
                    "Not ready for this flexure method: {}".format(method))

            # Generate 1D spectrum for object
            obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sky_wave, sky_flux))

            # Calculate the shift
            fdict = flex_shift(obj_sky, sky_spectrum, mxshft=mxshft)
            punt = False
            if fdict is None:
                msgs.warn(
                    "Flexure shift calculation failed for this spectrum.")
                if sv_fdict is not None:
                    msgs.warn(
                        "Will used saved estimate from a previous slit/object")
                    fdict = copy.deepcopy(sv_fdict)
                else:
                    # One does not exist yet
                    # Save it for later
                    return_later_sobjs.append([slit, ss])
                    punt = True
            else:
                sv_fdict = copy.deepcopy(fdict)

            # Punt?
            if punt:
                break

            # Interpolate
            new_sky = specobj.flexure_interp(sky_wave, fdict)
            # Update dict
            for key in [
                    'polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth',
                    'arx_spec'
            ]:
                flex_dict[key].append(fdict[key])
            flex_dict['sky_spec'].append(new_sky)

        flex_list.append(flex_dict.copy())

        # Do we need to go back?
        for items in return_later_sobjs:
            if sv_fdict is None:
                msgs.info("No flexure corrections could be made")
                break
            # Setup
            slit, ss = items
            flex_dict = flex_list[slit]
            specobj = specobjs[ss]
            sky_wave = specobj.BOX_WAVE  #.to('AA').value
            # Copy me
            fdict = copy.deepcopy(sv_fdict)
            # Interpolate
            new_sky = specobj.flexure_interp(sky_wave, fdict)
            # Update dict
            for key in [
                    'polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth',
                    'arx_spec'
            ]:
                flex_dict[key].append(fdict[key])
            flex_dict['sky_spec'].append(new_sky)

    return flex_list
Exemple #5
0
def standard_sensfunc(wave, flux, ivar, mask_bad, flux_std, mask_balm=None, mask_tell=None,
                 maxiter=35, upper=2.0, lower=2.0, polyorder=5, balm_mask_wid=50., nresln=20., telluric=True,
                 resolution=2700., polycorrect=True, debug=False, polyfunc=False, show_QA=False):
    """
    Generate a sensitivity function based on observed flux and standard spectrum.

    Parameters
    ----------
    wave : ndarray
      wavelength as observed
    flux : ndarray
      counts/s as observed
    ivar : ndarray
      inverse variance
    flux_std : Quantity array
      standard star true flux (erg/s/cm^2/A)
    msk_bad : ndarray
      mask for bad pixels. True is good.
    msk_star: ndarray
      mask for hydrogen recombination lines. True is good.
    msk_tell:ndarray
      mask for telluric regions. True is good.
    maxiter : integer
      maximum number of iterations for polynomial fit
    upper : integer
      number of sigma for rejection in polynomial
    lower : integer
      number of sigma for rejection in polynomial
    polyorder : integer
      order of polynomial fit
    balm_mask_wid: float
      in units of angstrom
      Mask parameter for Balmer absorption. A region equal to
      balm_mask_wid is masked.
    resolution: integer/float.
      spectra resolution
      This paramters should be removed in the future. The resolution should be estimated from spectra directly.
    debug : bool
      if True shows some dubugging plots

    Returns
    -------
    sensfunc
    """
    # Create copy of the arrays to avoid modification
    wave_obs = wave.copy()
    flux_obs = flux.copy()
    ivar_obs = ivar.copy()
    # preparing arrays
    if np.any(np.invert(np.isfinite(ivar_obs))):
        msgs.warn("NaN are present in the inverse variance")

    # check masks
    if mask_tell is None:
        mask_tell = np.ones_like(wave_obs,dtype=bool)
    if mask_balm is None:
        mask_balm = np.ones_like(wave_obs, dtype=bool)

    # Removing outliers
    # Calculate log of flux_obs setting a floor at TINY
    logflux_obs = 2.5 * np.log10(np.maximum(flux_obs, TINY))
    # Set a fix value for the variance of logflux
    logivar_obs = np.ones_like(logflux_obs) * (10.0 ** 2)
    # Calculate log of flux_std model setting a floor at TINY
    logflux_std = 2.5 * np.log10(np.maximum(flux_std, TINY))
    # Calculate ratio setting a floor at MAGFUNC_MIN and a ceiling at
    # MAGFUNC_MAX
    magfunc = logflux_std - logflux_obs
    magfunc = np.maximum(np.minimum(magfunc, MAGFUNC_MAX), MAGFUNC_MIN)
    msk_magfunc = (magfunc < 0.99 * MAGFUNC_MAX) & (magfunc > 0.99 * MAGFUNC_MIN)

    # Define two new masks, True is good and False is masked pixel
    # mask for all bad pixels on sensfunc
    masktot = mask_bad & msk_magfunc & np.isfinite(ivar_obs) & np.isfinite(logflux_obs) & np.isfinite(logflux_std)
    logivar_obs[np.invert(masktot)] = 0.0
    # mask used for polynomial fit
    msk_fit_sens = masktot & mask_tell & mask_balm

    # Polynomial fitting to derive a smooth sensfunc (i.e. without telluric)
    pypeitFit = fitting.robust_fit(wave_obs[msk_fit_sens], magfunc[msk_fit_sens], polyorder,
                                             function='polynomial', maxiter=maxiter,
                                             lower=lower, upper=upper,
                                             groupbadpix=False,
                                             grow=0, sticky=True, use_mad=True)
    magfunc_poly = pypeitFit.eval(wave_obs)

    # Polynomial corrections on Hydrogen Recombination lines
    if ((sum(msk_fit_sens) > 0.5 * len(msk_fit_sens)) & polycorrect):
        ## Only correct Hydrogen Recombination lines with polyfit in the telluric free region
        balmer_clean = np.zeros_like(wave_obs, dtype=bool)
        # Commented out the bluest recombination lines since they are weak for spectroscopic standard stars.
        #836.4, 3969.6, 3890.1, 4102.8, 4102.8, 4341.6, 4862.7,   \
        lines_hydrogen = np.array([5407.0, 6564.6, 8224.8, 8239.2, 8203.6, 8440.3, 8469.6, 8504.8, 8547.7, 8600.8, \
                                   8667.4, 8752.9, 8865.2, 9017.4, 9229.0, 10049.4, 10938.1, 12818.1, 21655.0])
        for line_hydrogen in lines_hydrogen:
            ihydrogen = np.abs(wave_obs - line_hydrogen) <= balm_mask_wid
            balmer_clean[ihydrogen] = True
        msk_clean = ((balmer_clean) | (magfunc == MAGFUNC_MAX) | (magfunc == MAGFUNC_MIN)) & \
                    (magfunc_poly > MAGFUNC_MIN) & (magfunc_poly < MAGFUNC_MAX)
        magfunc[msk_clean] = magfunc_poly[msk_clean]
        msk_badpix = np.isfinite(ivar_obs) & (ivar_obs > 0)
        magfunc[np.invert(msk_badpix)] = magfunc_poly[np.invert(msk_badpix)]
    else:
        ## if half more than half of your spectrum is masked (or polycorrect=False) then do not correct it with polyfit
        msgs.warn('No polynomial corrections performed on Hydrogen Recombination line regions')

    if not telluric:
        # Apply mask to ivar
        #logivar_obs[~msk_fit_sens] = 0.

        # ToDo
        # Compute an effective resolution for the standard. This could be improved
        # to setup an array of breakpoints based on the resolution. At the
        # moment we are using only one number
        msgs.work("Should pull resolution from arc line analysis")
        msgs.work("At the moment the resolution is taken as the PixelScale")
        msgs.work("This needs to be changed!")
        std_pix = np.median(np.abs(wave_obs - np.roll(wave_obs, 1)))
        std_res = np.median(wave_obs/resolution) # median resolution in units of Angstrom.
        #std_res = std_pix
        #resln = std_res
        if (nresln * std_res) < std_pix:
            msgs.warn("Bspline breakpoints spacing shoud be larger than 1pixel")
            msgs.warn("Changing input nresln to fix this")
            nresln = std_res / std_pix

        # Fit magfunc with bspline
        kwargs_bspline = {'bkspace': std_res * nresln}
        kwargs_reject = {'maxrej': 5}
        msgs.info("Initialize bspline for flux calibration")
#        init_bspline = pydl.bspline(wave_obs, bkspace=kwargs_bspline['bkspace'])
        init_bspline = bspline.bspline(wave_obs, bkspace=kwargs_bspline['bkspace'])
        fullbkpt = init_bspline.breakpoints

        # TESTING turning off masking for now
        # remove masked regions from breakpoints
        msk_obs = np.ones_like(wave_obs).astype(bool)
        msk_obs[np.invert(masktot)] = False
        msk_bkpt = interpolate.interp1d(wave_obs, msk_obs, kind='nearest',
                                        fill_value='extrapolate')(fullbkpt)
        init_breakpoints = fullbkpt[msk_bkpt > 0.999]

        # init_breakpoints = fullbkpt
        msgs.info("Bspline fit on magfunc. ")
        bset1, bmask = fitting.iterfit(wave_obs, magfunc, invvar=logivar_obs, inmask=msk_fit_sens, upper=upper, lower=lower,
                                    fullbkpt=init_breakpoints, maxiter=maxiter, kwargs_bspline=kwargs_bspline,
                                    kwargs_reject=kwargs_reject)
        logfit1, _ = bset1.value(wave_obs)
        logfit_bkpt, _ = bset1.value(init_breakpoints)

        if debug:
            # Check for calibration
            plt.figure(1)
            plt.plot(wave_obs, magfunc, drawstyle='steps-mid', color='black', label='magfunc')
            plt.plot(wave_obs, logfit1, color='cornflowerblue', label='logfit1')
            plt.plot(wave_obs[np.invert(msk_fit_sens)], magfunc[np.invert(msk_fit_sens)], '+', color='red', markersize=5.0,
                     label='masked magfunc')
            plt.plot(wave_obs[np.invert(msk_fit_sens)], logfit1[np.invert(msk_fit_sens)], '+', color='red', markersize=5.0,
                     label='masked logfit1')
            plt.plot(init_breakpoints, logfit_bkpt, '.', color='green', markersize=4.0, label='breakpoints')
            plt.plot(init_breakpoints, np.interp(init_breakpoints, wave_obs, magfunc), '.', color='green',
                     markersize=4.0,
                     label='breakpoints')
            plt.plot(wave_obs, 1.0 / np.sqrt(logivar_obs), color='orange', label='sigma')
            plt.legend()
            plt.xlabel('Wavelength [ang]')
            plt.ylim(0.0, 1.2 * MAGFUNC_MAX)
            plt.title('1st Bspline fit')
            plt.show()
        # Create sensitivity function
        magfunc = np.maximum(np.minimum(logfit1, MAGFUNC_MAX), MAGFUNC_MIN)
        if ((sum(msk_fit_sens) > 0.5 * len(msk_fit_sens)) & polycorrect):
            msk_clean = ((magfunc == MAGFUNC_MAX) | (magfunc == MAGFUNC_MIN)) & \
                        (magfunc_poly > MAGFUNC_MIN) & (magfunc_poly<MAGFUNC_MAX)
            magfunc[msk_clean] = magfunc_poly[msk_clean]
            msk_badpix = np.isfinite(ivar_obs) & (ivar_obs>0)
            magfunc[np.invert(msk_badpix)] = magfunc_poly[np.invert(msk_badpix)]
        else:
            ## if half more than half of your spectrum is masked (or polycorrect=False) then do not correct it with polyfit
            msgs.warn('No polynomial corrections performed on Hydrogen Recombination line regions')

    # Calculate sensfunc
    if polyfunc:
        sensfunc = 10.0 ** (0.4 * magfunc_poly)
        magfunc = magfunc_poly
    else:
        sensfunc = 10.0 ** (0.4 * magfunc)

    if debug:
        plt.figure()
        magfunc_raw = logflux_std - logflux_obs
        plt.plot(wave_obs[masktot],magfunc_raw[masktot] , 'k-',lw=3,label='Raw Magfunc')
        plt.plot(wave_obs[masktot],magfunc_poly[masktot] , 'c-',lw=3,label='Polynomial Fit')
        plt.plot(wave_obs[np.invert(mask_tell)], magfunc_raw[np.invert(mask_tell)], 's',
                 color='0.7',label='Telluric Region')
        plt.plot(wave_obs[np.invert(mask_balm)], magfunc_raw[np.invert(mask_balm)], 'r+',label='Recombination Line region')
        plt.plot(wave_obs[masktot], magfunc[masktot],'b-',label='Final Magfunc')
        plt.legend(fancybox=True, shadow=True)
        plt.xlim([0.995*np.min(wave_obs[masktot]),1.005*np.max(wave_obs[masktot])])
        plt.ylim([0.,1.2*np.max(magfunc[masktot])])
        plt.show()
        plt.close()

    return sensfunc, masktot
Exemple #6
0
def lacosmic(det,
             sciframe,
             saturation,
             nonlinear,
             varframe=None,
             maxiter=1,
             grow=1.5,
             remove_compact_obj=True,
             sigclip=5.0,
             sigfrac=0.3,
             objlim=5.0):
    """
    Identify cosmic rays using the L.A.Cosmic algorithm
    U{http://www.astro.yale.edu/dokkum/lacosmic/}
    (article : U{http://arxiv.org/abs/astro-ph/0108003})
    This routine is mostly courtesy of Malte Tewes

    Args:
        det:
        sciframe:
        saturation:
        nonlinear:
        varframe:
        maxiter:
        grow:
        remove_compact_obj:
        sigclip:
        sigfrac:
        objlim:

    Returns:
        ndarray: mask of cosmic rays (0=no CR, 1=CR)

    """

    dnum = parse.get_dnum(det)

    msgs.info("Detecting cosmic rays with the L.A.Cosmic algorithm")
    #    msgs.work("Include these parameters in the settings files to be adjusted by the user")
    # Set the settings
    scicopy = sciframe.copy()
    crmask = np.cast['bool'](np.zeros(sciframe.shape))
    sigcliplow = sigclip * sigfrac

    # Determine if there are saturated pixels
    satpix = np.zeros_like(sciframe)
    #    satlev = settings_det['saturation']*settings_det['nonlinear']
    satlev = saturation * nonlinear
    wsat = np.where(sciframe >= satlev)
    if wsat[0].size == 0: satpix = None
    else:
        satpix[wsat] = 1.0
        satpix = np.cast['bool'](satpix)

    # Define the kernels
    laplkernel = np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0],
                           [0.0, -1.0, 0.0]])  # Laplacian kernal
    growkernel = np.ones((3, 3))
    for i in range(1, maxiter + 1):
        msgs.info("Convolving image with Laplacian kernel")
        # Subsample, convolve, clip negative values, and rebin to original size
        subsam = utils.subsample(scicopy)
        conved = signal.convolve2d(subsam,
                                   laplkernel,
                                   mode="same",
                                   boundary="symm")
        cliped = conved.clip(min=0.0)
        lplus = utils.rebin_evlist(cliped, np.array(cliped.shape) / 2.0)

        msgs.info("Creating noise model")
        # Build a custom noise map, and compare  this to the laplacian
        m5 = ndimage.filters.median_filter(scicopy, size=5, mode='mirror')
        if varframe is None:
            noise = np.sqrt(np.abs(m5))
        else:
            noise = np.sqrt(varframe)
        msgs.info("Calculating Laplacian signal to noise ratio")

        # Laplacian S/N
        s = lplus / (2.0 * noise
                     )  # Note that the 2.0 is from the 2x2 subsampling

        # Remove the large structures
        sp = s - ndimage.filters.median_filter(s, size=5, mode='mirror')

        msgs.info("Selecting candidate cosmic rays")
        # Candidate cosmic rays (this will include HII regions)
        candidates = sp > sigclip
        nbcandidates = np.sum(candidates)

        msgs.info("{0:5d} candidate pixels".format(nbcandidates))

        # At this stage we use the saturated stars to mask the candidates, if available :
        if satpix is not None:
            msgs.info("Masking saturated pixels")
            candidates = np.logical_and(np.logical_not(satpix), candidates)
            nbcandidates = np.sum(candidates)

            msgs.info(
                "{0:5d} candidate pixels not part of saturated stars".format(
                    nbcandidates))

        msgs.info("Building fine structure image")

        # We build the fine structure image :
        m3 = ndimage.filters.median_filter(scicopy, size=3, mode='mirror')
        m37 = ndimage.filters.median_filter(m3, size=7, mode='mirror')
        f = m3 - m37
        f /= noise
        f = f.clip(min=0.01)

        msgs.info("Removing suspected compact bright objects")

        # Now we have our better selection of cosmics :

        if remove_compact_obj:
            cosmics = np.logical_and(candidates, sp / f > objlim)
        else:
            cosmics = candidates
        nbcosmics = np.sum(cosmics)

        msgs.info("{0:5d} remaining candidate pixels".format(nbcosmics))

        # What follows is a special treatment for neighbors, with more relaxed constains.

        msgs.info("Finding neighboring pixels affected by cosmic rays")

        # We grow these cosmics a first time to determine the immediate neighborhod  :
        growcosmics = np.cast['bool'](signal.convolve2d(
            np.cast['float32'](cosmics),
            growkernel,
            mode="same",
            boundary="symm"))

        # From this grown set, we keep those that have sp > sigmalim
        # so obviously not requiring sp/f > objlim, otherwise it would be pointless
        growcosmics = np.logical_and(sp > sigclip, growcosmics)

        # Now we repeat this procedure, but lower the detection limit to sigmalimlow :

        finalsel = np.cast['bool'](signal.convolve2d(
            np.cast['float32'](growcosmics),
            growkernel,
            mode="same",
            boundary="symm"))
        finalsel = np.logical_and(sp > sigcliplow, finalsel)

        # Unmask saturated pixels:
        if satpix is not None:
            msgs.info("Masking saturated stars")
            finalsel = np.logical_and(np.logical_not(satpix), finalsel)

        ncrp = np.sum(finalsel)

        msgs.info("{0:5d} pixels detected as cosmics".format(ncrp))

        # We find how many cosmics are not yet known :
        newmask = np.logical_and(np.logical_not(crmask), finalsel)
        nnew = np.sum(newmask)

        # We update the mask with the cosmics we have found :
        crmask = np.logical_or(crmask, finalsel)

        msgs.info(
            "Iteration {0:d} -- {1:d} pixels identified as cosmic rays ({2:d} new)"
            .format(i, ncrp, nnew))
        if ncrp == 0: break
    # Additional algorithms (not traditionally implemented by LA cosmic) to remove some false positives.
    msgs.work(
        "The following algorithm would be better on the rectified, tilts-corrected image"
    )
    filt = ndimage.sobel(sciframe, axis=1, mode='constant')
    filty = ndimage.sobel(filt / np.sqrt(np.abs(sciframe)),
                          axis=0,
                          mode='constant')
    filty[np.where(np.isnan(filty))] = 0.0

    sigimg = cr_screen(filty)

    sigsmth = ndimage.filters.gaussian_filter(sigimg, 1.5)
    sigsmth[np.where(np.isnan(sigsmth))] = 0.0
    sigmask = np.cast['bool'](np.zeros(sciframe.shape))
    sigmask[np.where(sigsmth > sigclip)] = True
    crmask = np.logical_and(crmask, sigmask)
    msgs.info("Growing cosmic ray mask by 1 pixel")
    crmask = grow_masked(crmask.astype(np.float), grow, 1.0)

    return crmask.astype(bool)
Exemple #7
0
def apply_sens_tell_specobjs(specobjs, sens_meta, sens_table, airmass, exptime, extinct_correct=True, tell_correct=False,
                            longitude=None, latitude=None, debug=False, show=False):

    # TODO This function should operate on a single object
    func = sens_meta['FUNC'][0]
    polyorder_vec = sens_meta['POLYORDER_VEC'][0]
    nimgs = len(specobjs)

    if show:
        fig = plt.figure(figsize=(12, 8))
        xmin, xmax = [], []
        ymin, ymax = [], []

    for ispec in range(nimgs):
        # get the ECH_ORDER, ECH_ORDERINDX, WAVELENGTH from your science
        sobj_ispec = specobjs[ispec]
        ## TODO Comment on the logich here. Hard to follow
        try:
            ech_order, ech_orderindx, idx = sobj_ispec.ech_order, sobj_ispec.ech_orderindx, sobj_ispec.idx
            msgs.info('Applying sensfunc to Echelle data')
        except:
            ech_orderindx = 0
            idx = sobj_ispec.idx
            msgs.info('Applying sensfunc to Longslit/Multislit data')

        for extract_type in ['boxcar', 'optimal']:
            extract = getattr(sobj_ispec, extract_type)

            if len(extract) == 0:
                continue
            msgs.info("Fluxing {:s} extraction for:".format(extract_type) + msgs.newline() + "{}".format(idx))
            wave = extract['WAVE'].value.copy()
            wave_mask = wave > 1.0
            counts = extract['COUNTS'].copy()
            counts_ivar = extract['COUNTS_IVAR'].copy()
            mask = extract['MASK'].copy()

            # get sensfunc from the sens_table
            coeff = sens_table[ech_orderindx]['OBJ_THETA'][0:polyorder_vec[ech_orderindx] + 2]
            wave_min = sens_table[ech_orderindx]['WAVE_MIN']
            wave_max = sens_table[ech_orderindx]['WAVE_MAX']
            sensfunc = np.zeros_like(wave)
            sensfunc[wave_mask] = np.exp(utils.func_val(coeff, wave[wave_mask], func,
                                             minx=wave_min, maxx=wave_max))

            # get telluric from the sens_table
            if tell_correct:
                msgs.work('Evaluate telluric!')
                telluric = None
            else:
                telluric = None

            flam, flam_ivar, outmask = apply_sens_tell_spec(wave, counts, counts_ivar, sensfunc, airmass, exptime,
                                                      mask=mask, extinct_correct=extinct_correct, telluric=telluric,
                                                      longitude=longitude, latitude=latitude, debug=debug)
            flam_sig = np.sqrt(utils.inverse(flam_ivar))
            # The following will be changed directly in the specobjs, so do not need to return anything.
            extract['MASK'] = outmask
            extract['FLAM'] = flam
            extract['FLAM_SIG'] = flam_sig
            extract['FLAM_IVAR'] = flam_ivar

            if show:
                xmin_ispec = wave[wave_mask].min()
                xmax_ispec = wave[wave_mask].max()
                xmin.append(xmin_ispec)
                xmax.append(xmax_ispec)
                ymin_ispec, ymax_ispec = coadd1d.get_ylim(flam, flam_ivar, outmask)
                ymin.append(ymin_ispec)
                ymax.append(ymax_ispec)

                med_width = (2.0 * np.ceil(0.1 / 10.0 * np.size(wave[outmask])) + 1).astype(int)
                flam_med, flam_ivar_med = coadd1d.median_filt_spec(flam, flam_ivar, outmask, med_width)
                if extract_type == 'boxcar':
                    plt.plot(wave[wave_mask], flam_med[wave_mask], color='black', drawstyle='steps-mid', zorder=1, alpha=0.8)
                    #plt.plot(wave[wave_mask], np.sqrt(utils.calc_ivar(flam_ivar_med[wave_mask])), zorder=2, color='m',
                    #         alpha=0.7, drawstyle='steps-mid', linestyle=':')
                else:
                    plt.plot(wave[wave_mask], flam_med[wave_mask], color='dodgerblue', drawstyle='steps-mid', zorder=1, alpha=0.8)
                    #plt.plot(wave[wave_mask], np.sqrt(utils.calc_ivar(flam_ivar_med[wave_mask])), zorder=2, color='red',
                    #         alpha=0.7, drawstyle='steps-mid', linestyle=':')
    if show:
        xmin_final, xmax_final = np.min(xmin), np.max(xmax)
        ymax_final = 1.3*np.median(ymax)
        ymin_final = -0.15*ymax_final
        plt.xlim([xmin_final, xmax_final])
        plt.ylim([ymin_final, ymax_final])
        plt.title('Blue is Optimal extraction and Black is Boxcar extraction',fontsize=16)
        plt.xlabel('Wavelength (Angstrom)')
        plt.ylabel('Flux')
        plt.show()
Exemple #8
0
    def operations(self, key, axisID):
        """Canvas operations

        Args:
            key (str): Which key has been pressed
            axisID (int): The index of the axis where the key has been pressed (see get_axisID)
        """
        # Check if the user really wants to quit
        if key == 'q' and self._qconf:
            if self._changes:
                self.update_infobox(
                    message=
                    "WARNING: There are unsaved changes!!\nPress q again to exit",
                    yesno=False)
                self._qconf = True
            else:
                msgs.bug(
                    "Need to change this to kill and return the results to PypeIt"
                )
                plt.close()
        elif self._qconf:
            self.update_infobox(default=True)
            self._qconf = False

        # Manage responses from questions posed to the user.
        if self._respreq[0]:
            if key != "y" and key != "n":
                return
            else:
                # Switch off the required response
                self._respreq[0] = False
                # Deal with the response
                if self._respreq[1] == "write":
                    # First remove the old file, and save the new one
                    msgs.work("Not implemented yet!")
                    self.write()
                else:
                    return
            # Reset the info box
            self.update_infobox(default=True)
            return

        if key == '?':
            self.print_help()
        elif key == 'a':
            if self._fitdict['coeff'] is not None:
                self.auto_id()
            else:
                msgs.info("You must identify a few lines first")
        elif key == 'c':
            wclr = np.where((self._lineflg == 2) | (self._lineflg == 3))
            self._lineflg[wclr] = 0
            self.replot()
        elif key == 'd':
            self._lineflg *= 0
            self._lineids *= 0.0
            self._fitdict['coeff'] = None
            self.replot()
        elif key == 'f':
            self.fitsol_fit()
            self.replot()
        elif key == 'l':
            self.load_IDs()
        elif key == 'q':
            if self._changes:
                self.update_infobox(
                    message=
                    "WARNING: There are unsaved changes!!\nPress q again to exit",
                    yesno=False)
                self._qconf = True
            else:
                plt.close()
        elif key == 'r':
            if self._detns_idx == -1:
                msgs.info("You must select a line first")
            elif self._fitr is None:
                msgs.info("You must select a fitting region first")
            else:
                msgs.work("Feature not yet implemented")
        elif key == 's':
            self.save_IDs()
        elif key == 'w':
            self.toggle_wavepix(toggled=True)
            self.replot()
        elif key == 'z':
            self.delete_line_id()
        elif key == '+':
            if self._fitdict["polyorder"] < 10:
                self._fitdict["polyorder"] += 1
                self.update_infobox(message="Polynomial order = {0:d}".format(
                    self._fitdict["polyorder"]),
                                    yesno=False)
                self.fitsol_fit()
                self.replot()
            else:
                self.update_infobox(message="Polynomial order must be <= 10",
                                    yesno=False)
        elif key == '-':
            if self._fitdict["polyorder"] > 1:
                self._fitdict["polyorder"] -= 1
                self.update_infobox(message="Polynomial order = {0:d}".format(
                    self._fitdict["polyorder"]),
                                    yesno=False)
                self.fitsol_fit()
                self.replot()
            else:
                self.update_infobox(message="Polynomial order must be >= 1",
                                    yesno=False)
        self.canvas.draw()
Exemple #9
0
def comb_frames(frames_arr, saturation=None,
                     maskvalue=1048577, method='weightmean', satpix='reject', cosmics=None,
                     n_lohi=[0,0], sig_lohi=[3.,3.], replace='maxnonsat'):
    """
    Combine several frames

    .. todo::
        - Make better use of np.ma.MaskedArray objects throughout?
        - More testing of replacement code necessary?
        - Improve docstring...

    Parameters
    ----------
    frames_arr : ndarray (3D)
      Array of frames to be combined
    weights : str, or None (optional)
      How should the frame combination by weighted (not currently
      implemented)
    maskvalue : int (optional)
      What should the masked values be set to (should be greater than
      the detector's saturation value -- Default = 1 + 2**20)
    reject : dict, optional
      Set the rejection parameters:  cosmics, lowhigh, level, replace
      Perhaps these should be called out separately
    satpix : str, optional
      Method for handling saturated pixels
    saturation : float, optional
      Saturation value;  only required for some choices of reject['replace']

    Returns
    -------
    comb_frame : ndarray
    """
    ###########
    # FIRST DO SOME CHECKS ON THE INPUT
    ###########
    # Was printtype specified
    if frames_arr is None:
        msgs.error("No frames were given to comb_frames to combine")
    (sz_x, sz_y, num_frames) = np.shape(frames_arr)
    if num_frames == 1:
        msgs.info("Only one frame to combine!")
        msgs.info("Returning input frame")
        return frames_arr[:, :, 0]
    else:
        msgs.info("Combining {0:d} frames".format(num_frames))

    # Check if the user has allowed the combination of long and short
    # frames (e.g. different exposure times)
    msgs.work("lscomb feature has not been included here yet...")
    # Check the user hasn't requested to reject more frames than available
    if n_lohi[0] > 0 and n_lohi[1] > 0 and n_lohi[0] + n_lohi[1] >= num_frames:
        msgs.error('You cannot reject more frames than are available with \'n_lohi\'.'
                   + msgs.newline() + 'There are {0:d} frames '.format(num_frames)
                   + 'and n_lohi will reject {0:d} low and {1:d} high values.'.format(
                                                                n_lohi[0], n_lohi[1]))

    # Calculate the values to be used if all frames are rejected in some pixels
    if replace == 'min':
        allrej_arr = np.amin(frames_arr, axis=2)
    elif replace == 'max':
        allrej_arr = np.amax(frames_arr, axis=2)
    elif replace == 'mean':
        allrej_arr = np.mean(frames_arr, axis=2)
    elif replace == 'median':
        allrej_arr = np.median(frames_arr, axis=2)
    elif replace == 'weightmean':
        msgs.work("No weights are implemented yet")
        allrej_arr = frames_arr.copy()
        allrej_arr = masked_weightmean(allrej_arr, maskvalue)
    elif replace == 'maxnonsat':
        allrej_arr = frames_arr.copy()
        allrej_arr = maxnonsat(allrej_arr, saturation)
    else:
        msgs.error("You must specify what to do in case all pixels are rejected")

    ################
    # Saturated Pixels
    msgs.info("Finding saturated and non-linear pixels")
    if satpix == 'force':
        # If a saturated pixel is in one of the frames, force them to
        # all have saturated pixels
#		satw = np.zeros_like(frames_arr)
#		satw[np.where(frames_arr > settings.spect['det']['saturation']*settings.spect['det']['nonlinear'])] = 1.0
#		satw = np.any(satw,axis=2)
#		del satw
        setsat = np.zeros_like(frames_arr)
        setsat[frames_arr > saturation] = 1
    elif satpix == 'reject':
        # Ignore saturated pixels in frames if possible
        frames_arr[frames_arr > saturation] = maskvalue
    elif satpix == 'nothing':
        # Don't do anything special for saturated pixels (Hopefully the
        # user has specified how to deal with them below!)
        pass
    else:
        msgs.error('Option \'{0}\' '.format(satpix)
                   + 'for dealing with saturated pixels was not recognised.')

    ################
    # Cosmic Rays
    if cosmics > 0.0:
        msgs.info("Rejecting cosmic rays")  # Use a robust statistic
        masked_fa = np.ma.MaskedArray(frames_arr, mask=frames_arr==maskvalue)
        medarr = np.ma.median(masked_fa, axis=2)
        stdarr = 1.4826*np.ma.median(np.ma.absolute(masked_fa - medarr[:,:,None]), axis=2)
        indx = (frames_arr != maskvalue) \
                    & (frames_arr > (medarr.data + cosmics * stdarr.data)[:,:,None])
        frames_arr[indx] = maskvalue
        # Delete unecessary arrays
        del medarr, stdarr
    else:
        msgs.info("Not rejecting cosmic rays")

    ################
    # Low and High pixel rejection --- Masks *additional* pixels
    rejlo, rejhi = n_lohi
    if n_lohi[0] > 0 or n_lohi[1] > 0:

        # First reject low pixels
        frames_arr = np.sort(frames_arr, axis=2)
        if n_lohi[0] > 0:
            msgs.info("Rejecting {0:d} deviant low pixels".format(n_lohi[0]))
            while rejlo > 0:
                xi, yi = np.indices(sz_x, sz_y)
                frames_arr[xi, yi, np.argmin(frames_arr, axis=2)] = maskvalue
                del xi, yi
                rejlo -= 1

        # Now reject high pixels
        if n_lohi[1] > 0:
            msgs.info("Rejecting {0:d} deviant high pixels".format(n_lohi[1]))
            frames_arr[np.where(frames_arr == maskvalue)] *= -1
            while rejhi > 0:
                xi, yi = np.indices(sz_x, sz_y)
                frames_arr[xi, yi, np.argmax(frames_arr, axis=2)] = -maskvalue
                del xi, yi
                rejhi -= 1
            frames_arr[np.where(frames_arr) == -maskvalue] *= -1

# TODO: Do we need this?
# The following is an example of *not* masking additional pixels
#		if reject['lowhigh'][1] > 0:
#			msgs.info("Rejecting {0:d} deviant high pixels".format(reject['lowhigh'][1]))
#			masktemp[:,:,-reject['lowhigh'][0]:] = True
    else:
        msgs.info("Not rejecting any low/high pixels")

    ################
    # Deviant Pixels
    # TODO: sig_lohi (what was level) is not actually used, instead this
    # just selects if cosmics should be used.  Is this intentional?  Why
    # not just do: `if cosmics > 0:`?
    if sig_lohi[0] > 0.0 or sig_lohi[1] > 0.0:
        msgs.info("Rejecting deviant pixels")  # Use a robust statistic

        masked_fa = np.ma.MaskedArray(frames_arr, mask=frames_arr==maskvalue)
        medarr = np.ma.median(masked_fa, axis=2)
        stdarr = 1.4826*np.ma.median(np.ma.absolute(masked_fa - medarr[:,:,None]), axis=2)
        indx = (frames_arr != maskvalue) \
                    & ( (frames_arr > (medarr.data + cosmics*stdarr.data)[:,:,None])
                        | (frames_arr < (medarr.data - cosmics*stdarr.data)[:,:,None]))
        frames_arr[indx] = maskvalue

        # Delete unecessary arrays
        del medarr, stdarr
    else:
        msgs.info("Not rejecting deviant pixels")

    ##############
    # Combine the arrays
    msgs.info("Combining frames with a {0:s} operation".format(method))
    if method == 'mean':
        comb_frame = np.ma.mean(np.ma.MaskedArray(frames_arr, mask=frames_arr==maskvalue), axis=2)
    elif method == 'median':
        comb_frame = np.ma.median(np.ma.MaskedArray(frames_arr, mask=frames_arr==maskvalue), axis=2)
    elif method == 'weightmean':
        comb_frame = frames_arr.copy()
        comb_frame = masked_weightmean(comb_frame, maskvalue)
    else:
        msgs.error("Combination type '{0:s}' is unknown".format(method))

    ##############
    # If any pixels are completely masked, apply user-specified function
    msgs.info("Replacing completely masked pixels with the {0:s} value of the input frames".format(replace))
    indx = comb_frame == maskvalue
    comb_frame[indx] = allrej_arr[indx]
    # Delete unecessary arrays
    del allrej_arr

    ##############
    # Apply the saturated pixels:
    if satpix == 'force':
        msgs.info("Applying saturated pixels to final combined image")
        comb_frame[setsat] = saturation # settings.spect[dnum]['saturation']

    ##############
    # And return a 2D numpy array
    msgs.info("{0:d} frames combined successfully!".format(num_frames))
    # Make sure the returned array is the correct type
    comb_frame = np.array(comb_frame, dtype=np.float)
    return comb_frame
Exemple #10
0
    def reduce_all(self):
        """
        Main driver of the entire reduction

        Calibration and extraction via a series of calls to reduce_exposure()

        """
        # Validate the parameter set
        self.par.validate_keys(required=[
            'rdx', 'calibrations', 'scienceframe', 'reduce', 'flexure'
        ])
        self.tstart = time.time()

        # Find the standard frames
        is_standard = self.fitstbl.find_frames('standard')

        # Find the science frames
        is_science = self.fitstbl.find_frames('science')

        # Frame indices
        frame_indx = np.arange(len(self.fitstbl))

        # Iterate over each calibration group and reduce the standards
        for i in range(self.fitstbl.n_calib_groups):

            # Find all the frames in this calibration group
            in_grp = self.fitstbl.find_calib_group(i)

            # Find the indices of the standard frames in this calibration group:
            grp_standards = frame_indx[is_standard & in_grp]

            # Reduce all the standard frames, loop on unique comb_id
            u_combid_std = np.unique(self.fitstbl['comb_id'][grp_standards])
            for j, comb_id in enumerate(u_combid_std):
                frames = np.where(self.fitstbl['comb_id'] == comb_id)[0]
                bg_frames = np.where(self.fitstbl['bkg_id'] == comb_id)[0]
                if not self.outfile_exists(frames[0]) or self.overwrite:
                    std_spec2d, std_sobjs = self.reduce_exposure(
                        frames, bg_frames=bg_frames)
                    # TODO come up with sensible naming convention for save_exposure for combined files
                    self.save_exposure(frames[0], std_spec2d, std_sobjs,
                                       self.basename)
                else:
                    msgs.info(
                        'Output file: {:s} already exists'.format(
                            self.fitstbl.construct_basename(frames[0])) +
                        '. Set overwrite=True to recreate and overwrite.')

        # Iterate over each calibration group again and reduce the science frames
        for i in range(self.fitstbl.n_calib_groups):
            # Find all the frames in this calibration group
            in_grp = self.fitstbl.find_calib_group(i)

            # Find the indices of the science frames in this calibration group:
            grp_science = frame_indx[is_science & in_grp]
            # Associate standards (previously reduced above) for this setup
            std_outfile = self.get_std_outfile(frame_indx[is_standard])
            # Reduce all the science frames; keep the basenames of the science frames for use in flux calibration
            science_basename = [None] * len(grp_science)
            # Loop on unique comb_id
            u_combid = np.unique(self.fitstbl['comb_id'][grp_science])
            for j, comb_id in enumerate(u_combid):
                frames = np.where(self.fitstbl['comb_id'] == comb_id)[0]
                # Find all frames whose comb_id matches the current frames bkg_id.
                bg_frames = np.where((self.fitstbl['comb_id'] ==
                                      self.fitstbl['bkg_id'][frames][0])
                                     & (self.fitstbl['comb_id'] >= 0))[0]
                # JFH changed the syntax below to that above, which allows frames to be used more than once
                # as a background image. The syntax below would require that we could somehow list multiple
                # numbers for the bkg_id which is impossible without a comma separated list
                #                bg_frames = np.where(self.fitstbl['bkg_id'] == comb_id)[0]
                if not self.outfile_exists(frames[0]) or self.overwrite:
                    # TODO -- Should we reset/regenerate self.slits.mask for a new exposure
                    sci_spec2d, sci_sobjs = self.reduce_exposure(
                        frames, bg_frames=bg_frames, std_outfile=std_outfile)
                    science_basename[j] = self.basename
                    # TODO come up with sensible naming convention for save_exposure for combined files
                    self.save_exposure(frames[0], sci_spec2d, sci_sobjs,
                                       self.basename)
                else:
                    msgs.warn(
                        'Output file: {:s} already exists'.format(
                            self.fitstbl.construct_basename(frames[0])) +
                        '. Set overwrite=True to recreate and overwrite.')

            msgs.info('Finished calibration group {0}'.format(i))

        # Check if this is an IFU reduction. If so, make a datacube
        if self.spectrograph.pypeline == "IFU" and self.par['reduce']['cube'][
                'make_cube']:
            msgs.work("Generate datacube")

        # Finish
        self.print_end_time()
Exemple #11
0
def flexure_obj(specobjs, maskslits, method, sky_file, mxshft=None):
    """Correct wavelengths for flexure, object by object

    Parameters:
    ----------
    method : str
      'boxcar' -- Recommneded
      'slitpix' --
    sky_file: str

    Returns:
    ----------
    flex_list: list
      list of dicts containing flexure results
        Aligned with specobjs
        Filled with a basically empty dict if the slit is skipped or there is no object

    """
    sv_fdict = None
    msgs.work("Consider doing 2 passes in flexure as in LowRedux")
    # Load Archive
    sky_spectrum = load_sky_spectrum(sky_file)

    nslits = len(maskslits)
    gdslits = np.where(~maskslits)[0]

    # Loop on objects
    flex_list = []
    # Loop over slits, and then over objects here
    for slit in range(nslits):
        msgs.info("Working on flexure in slit (if an object was detected): {:d}".format(slit))
        indx = specobjs.slitid == slit
        this_specobjs = specobjs[indx]
        # Reset
        flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[],
                         corr_cen=[], spec_file=sky_file, smooth=[],
                         arx_spec=[], sky_spec=[])
        # If no objects on this slit append an empty dictionary
        if slit not in gdslits:
            flex_list.append(flex_dict.copy())
            continue
        for specobj in this_specobjs:
            if specobj is None:
                continue
            msgs.info("Working on flexure for object # {:d}".format(specobj.objid) + "in slit # {:d}".format(specobj.slitid))
            # Using boxcar
            if method in ['boxcar', 'slitcen']:
                sky_wave = specobj.boxcar['WAVE'] #.to('AA').value
                sky_flux = specobj.boxcar['COUNTS_SKY']
            else:
                msgs.error("Not ready for this flexure method: {}".format(method))

            # Generate 1D spectrum for object
            obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sky_wave, sky_flux))

            # Calculate the shift
            fdict = flex_shift(obj_sky, sky_spectrum, mxshft=mxshft)
            if fdict is None:
                msgs.warn("Flexure shift calculation failed for this spectrum.")
                if sv_fdict is not None:
                    msgs.warn("Will used saved estimate from a previous slit/object")
                    fdict = copy.deepcopy(sv_fdict)
                else:
                    msgs.warn("No previous good solution.  Punting on this object")
                    continue
            else:
                sv_fdict = copy.deepcopy(fdict)

            # Simple interpolation to apply
            npix = len(sky_wave)
            x = np.linspace(0., 1., npix)
            # Apply
            for attr in ['boxcar', 'optimal']:
                if not hasattr(specobj, attr):
                    continue
                if 'WAVE' in getattr(specobj, attr).keys():
                    msgs.info("Applying flexure correction to {0:s} extraction for object:".format(attr) +
                              msgs.newline() + "{0:s}".format(str(specobj)))
                    f = interpolate.interp1d(x, sky_wave, bounds_error=False, fill_value="extrapolate")
                    getattr(specobj, attr)['WAVE'] = f(x+fdict['shift']/(npix-1))*units.AA
            # Shift sky spec too
            cut_sky = fdict['sky_spec']
            x = np.linspace(0., 1., cut_sky.npix)
            f = interpolate.interp1d(x, cut_sky.wavelength.value, bounds_error=False, fill_value="extrapolate")
            twave = f(x + fdict['shift']/(cut_sky.npix-1))*units.AA
            new_sky = xspectrum1d.XSpectrum1D.from_tuple((twave, cut_sky.flux))

            # Update dict
            for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']:
                flex_dict[key].append(fdict[key])
            flex_dict['sky_spec'].append(new_sky)
        flex_list.append(flex_dict.copy())
    return flex_list
Exemple #12
0
def spec_flexure_slit(slits,
                      slitord,
                      slit_bpm,
                      sky_file,
                      method="boxcar",
                      specobjs=None,
                      slit_specs=None,
                      mxshft=None):
    """Calculate the spectral flexure for every slit (global) or object (local)

    Args:
        slits (:class:`~pypeit.slittrace.SlitTraceSet`):
            Slit trace set
        slitord (`numpy.ndarray`_):
            Array of slit/order numbers
        slit_bpm (`numpy.ndarray`_):
            True = masked slit
        sky_file (str):
            Sky file
        method (:obj:`str`, optional):
            Two methods are available:
                - 'boxcar': Recommended for object extractions. This
                  method uses the boxcar extracted sky and wavelength
                  spectra from the input specobjs
                - 'slitcen': Recommended when no objects are being
                  extracted. This method uses a spectrum (stored in
                  slitspecs) that is extracted from the center of
                  each slit.
        specobjs (:class:`~pypeit.specobjs.Specobjs`, optional):
            Spectral extractions
        slit_specs (list, optional):
            A list of linetools.xspectrum1d, one for each slit. The spectra stored in
            this list are sky spectra, extracted from the center of each slit.
        mxshft (int, optional):
            Passed to flex_shift()

    Returns:
        :obj:`list`: A list of :obj:`dict` objects containing flexure
        results of each slit. This is filled with a basically empty
        dict if the slit is skipped.
    """
    sv_fdict = None
    msgs.work("Consider doing 2 passes in flexure as in LowRedux")

    # Determine the method
    slit_cen = True if (specobjs is None) or (method == "slitcen") else False

    # Load Archive. Save the line information to avoid the performance hit from calling it on the archive sky spectrum
    # multiple times
    sky_spectrum = load_sky_spectrum(sky_file)
    sky_lines = arc.detect_lines(sky_spectrum.flux.value)

    nslits = slits.nslits
    gpm = np.logical_not(slit_bpm)
    gdslits = np.where(gpm)[0]

    # Initialise the flexure list for each slit
    flex_list = []
    # Slit/objects to come back to
    return_later_sobjs = []

    # Loop over slits, and then over objects
    for islit in range(nslits):
        msgs.info("Working on spectral flexure of slit: {:d}".format(islit))

        # Reset
        flex_dict = dict(polyfit=[],
                         shift=[],
                         subpix=[],
                         corr=[],
                         corr_cen=[],
                         spec_file=sky_file,
                         smooth=[],
                         arx_spec=[],
                         sky_spec=[],
                         method=[])

        # If no objects on this slit append an empty dictionary
        if islit not in gdslits:
            flex_list.append(flex_dict.copy())
            continue

        if slit_cen:
            sky_wave = slit_specs[islit].wavelength.value
            sky_flux = slit_specs[islit].flux.value

            # Calculate the shift
            fdict = spec_flex_shift(slit_specs[islit],
                                    sky_spectrum,
                                    sky_lines,
                                    mxshft=mxshft)
            # Failed?
            if fdict is not None:
                # Update dict
                for key in [
                        'polyfit', 'shift', 'subpix', 'corr', 'corr_cen',
                        'smooth', 'arx_spec'
                ]:
                    flex_dict[key].append(fdict[key])
                # Interpolate
                sky_wave_new = flexure_interp(fdict['shift'], sky_wave)
                flex_dict['sky_spec'].append(
                    xspectrum1d.XSpectrum1D.from_tuple(
                        (sky_wave_new, sky_flux)))
                flex_dict['method'].append("slitcen")
        else:
            i_slitord = slitord[islit]
            indx = specobjs.slitorder_indices(i_slitord)
            this_specobjs = specobjs[indx]
            # Loop through objects
            for ss, sobj in enumerate(this_specobjs):
                if sobj is None:
                    continue
                if sobj['BOX_WAVE'] is None:  #len(specobj._data.keys()) == 1:  # Nothing extracted; only the trace exists
                    continue
                msgs.info(
                    "Working on flexure for object # {:d}".format(sobj.OBJID) +
                    "in slit # {:d}".format(islit))

                # Using boxcar
                sky_wave = sobj.BOX_WAVE
                sky_flux = sobj.BOX_COUNTS_SKY

                # Generate 1D spectrum for object
                obj_sky = xspectrum1d.XSpectrum1D.from_tuple(
                    (sky_wave, sky_flux))

                # Calculate the shift
                fdict = spec_flex_shift(obj_sky,
                                        sky_spectrum,
                                        sky_lines,
                                        mxshft=mxshft)
                punt = False
                if fdict is None:
                    msgs.warn(
                        "Flexure shift calculation failed for this spectrum.")
                    if sv_fdict is not None:
                        msgs.warn(
                            "Will used saved estimate from a previous slit/object"
                        )
                        fdict = copy.deepcopy(sv_fdict)
                    else:
                        # One does not exist yet
                        # Save it for later
                        return_later_sobjs.append([islit, ss])
                        punt = True
                else:
                    sv_fdict = copy.deepcopy(fdict)

                # Punt?
                if punt:
                    break

                # Update dict
                for key in [
                        'polyfit', 'shift', 'subpix', 'corr', 'corr_cen',
                        'smooth', 'arx_spec', 'sky_spec'
                ]:
                    flex_dict[key].append(fdict[key])
                flex_dict['method'].append("boxcar")

        # Check if we need to go back
        # TODO :: This code just throws an error... probably need to delete or fix this "local" spectral flexure code
        if not slit_cen:
            # Do we need to go back?
            for items in return_later_sobjs:
                if sv_fdict is None:
                    msgs.info("No flexure corrections could be made")
                    break
                # Setup
                msgs.error("This probably needs to be updated")
                slit, ss = items
                flex_dict = flex_list[slit]
                sobj = specobjs[ss]
                # Copy me
                fdict = copy.deepcopy(sv_fdict)
                # Update dict
                for key in [
                        'polyfit', 'shift', 'subpix', 'corr', 'corr_cen',
                        'smooth', 'arx_spec', 'sky_spec'
                ]:
                    flex_dict[key].append(fdict[key])
                flex_dict['method'].append("boxcar")

        # Append, this will be an empty dictionary if the flexure failed
        flex_list.append(flex_dict.copy())

    return flex_list
Exemple #13
0
def spec_flex_shift(obj_skyspec, arx_skyspec, arx_lines, mxshft=20):
    """ Calculate shift between object sky spectrum and archive sky spectrum

    Args:
        obj_skyspec (:class:`linetools.spectra.xspectrum1d.XSpectrum1d`):
            Spectrum of the sky related to our object
        arx_skyspec (:class:`linetools.spectra.xspectrum1d.XSpectrum1d`):
            Archived sky spectrum
        arx_lines (tuple): Line information returned by arc.detect_lines for
            the Archived sky spectrum
        mxshft (float, optional):
            Maximum allowed shift from flexure;  note there are cases that
            have been known to exceed even 30 pixels..

    Returns:
        dict: Contains flexure info
    """

    # TODO None of these routines should have dependencies on XSpectrum1d!

    # Determine the brightest emission lines
    msgs.warn("If we use Paranal, cut down on wavelength early on")
    arx_amp, arx_amp_cont, arx_cent, arx_wid, _, arx_w, arx_yprep, nsig \
            = arx_lines
    obj_amp, obj_amp_cont, obj_cent, obj_wid, _, obj_w, obj_yprep, nsig_obj \
            = arc.detect_lines(obj_skyspec.flux.value)

    # Keep only 5 brightest amplitude lines (xxx_keep is array of
    # indices within arx_w of the 5 brightest)
    arx_keep = np.argsort(arx_amp[arx_w])[-5:]
    obj_keep = np.argsort(obj_amp[obj_w])[-5:]

    # Calculate wavelength (Angstrom per pixel)
    arx_disp = np.append(
        arx_skyspec.wavelength.value[1] - arx_skyspec.wavelength.value[0],
        arx_skyspec.wavelength.value[1:] - arx_skyspec.wavelength.value[:-1])
    obj_disp = np.append(
        obj_skyspec.wavelength.value[1] - obj_skyspec.wavelength.value[0],
        obj_skyspec.wavelength.value[1:] - obj_skyspec.wavelength.value[:-1])

    # Calculate resolution (lambda/delta lambda_FWHM)..maybe don't need
    # this? can just use sigmas
    arx_idx = (arx_cent + 0.5).astype(
        np.int)[arx_w][arx_keep]  # The +0.5 is for rounding
    arx_res = arx_skyspec.wavelength.value[arx_idx]/\
              (arx_disp[arx_idx]*(2*np.sqrt(2*np.log(2)))*arx_wid[arx_w][arx_keep])
    obj_idx = (obj_cent + 0.5).astype(
        np.int)[obj_w][obj_keep]  # The +0.5 is for rounding
    obj_res = obj_skyspec.wavelength.value[obj_idx]/ \
              (obj_disp[obj_idx]*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep])

    if not np.all(np.isfinite(obj_res)):
        msgs.warn(
            'Failed to measure the resolution of the object spectrum, likely due to error '
            'in the wavelength image.')
        return None
    msgs.info("Resolution of Archive={0} and Observation={1}".format(
        np.median(arx_res), np.median(obj_res)))

    # Determine sigma of gaussian for smoothing
    arx_sig2 = np.power(arx_disp[arx_idx] * arx_wid[arx_w][arx_keep], 2)
    obj_sig2 = np.power(obj_disp[obj_idx] * obj_wid[obj_w][obj_keep], 2)

    arx_med_sig2 = np.median(arx_sig2)
    obj_med_sig2 = np.median(obj_sig2)

    if obj_med_sig2 >= arx_med_sig2:
        smooth_sig = np.sqrt(obj_med_sig2 - arx_med_sig2)  # Ang
        smooth_sig_pix = smooth_sig / np.median(arx_disp[arx_idx])
        arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix * 2 *
                                               np.sqrt(2 * np.log(2)))
    else:
        msgs.warn("Prefer archival sky spectrum to have higher resolution")
        smooth_sig_pix = 0.
        msgs.warn("New Sky has higher resolution than Archive.  Not smoothing")
        #smooth_sig = np.sqrt(arx_med_sig**2-obj_med_sig**2)

    #Determine region of wavelength overlap
    min_wave = max(np.amin(arx_skyspec.wavelength.value),
                   np.amin(obj_skyspec.wavelength.value))
    max_wave = min(np.amax(arx_skyspec.wavelength.value),
                   np.amax(obj_skyspec.wavelength.value))

    #Smooth higher resolution spectrum by smooth_sig (flux is conserved!)
    #    if np.median(obj_res) >= np.median(arx_res):
    #        msgs.warn("New Sky has higher resolution than Archive.  Not smoothing")
    #obj_sky_newflux = ndimage.gaussian_filter(obj_sky.flux, smooth_sig)
    #    else:
    #tmp = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)
    #        arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2)))
    #arx_sky.flux = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)

    # Define wavelengths of overlapping spectra
    keep_idx = np.where((obj_skyspec.wavelength.value >= min_wave)
                        & (obj_skyspec.wavelength.value <= max_wave))[0]
    #keep_wave = [i for i in obj_sky.wavelength.value if i>=min_wave if i<=max_wave]

    #Rebin both spectra onto overlapped wavelength range
    if len(keep_idx) <= 50:
        msgs.warn("Not enough overlap between sky spectra")
        return None

    # rebin onto object ALWAYS
    keep_wave = obj_skyspec.wavelength[keep_idx]
    arx_skyspec = arx_skyspec.rebin(keep_wave)
    obj_skyspec = obj_skyspec.rebin(keep_wave)
    # Trim edges (rebinning is junk there)
    arx_skyspec.data['flux'][0, :2] = 0.
    arx_skyspec.data['flux'][0, -2:] = 0.
    obj_skyspec.data['flux'][0, :2] = 0.
    obj_skyspec.data['flux'][0, -2:] = 0.

    # Set minimum to 0.  For bad rebinning and for pernicious extractions
    obj_skyspec.data['flux'][0, :] = np.maximum(obj_skyspec.data['flux'][0, :],
                                                0.)
    arx_skyspec.data['flux'][0, :] = np.maximum(arx_skyspec.data['flux'][0, :],
                                                0.)

    # Normalize spectra to unit average sky count
    norm = np.sum(obj_skyspec.flux.value) / obj_skyspec.npix
    norm2 = np.sum(arx_skyspec.flux.value) / arx_skyspec.npix
    if norm <= 0:
        msgs.warn("Bad normalization of object in flexure algorithm")
        msgs.warn("Will try the median")
        norm = np.median(obj_skyspec.flux.value)
        if norm <= 0:
            msgs.warn("Improper sky spectrum for flexure.  Is it too faint??")
            return None
    if norm2 <= 0:
        msgs.warn(
            'Bad normalization of archive in flexure. You are probably using wavelengths '
            'well beyond the archive.')
        return None
    obj_skyspec.flux = obj_skyspec.flux / norm
    arx_skyspec.flux = arx_skyspec.flux / norm2

    # Deal with bad pixels
    msgs.work("Need to mask bad pixels")

    # Deal with underlying continuum
    msgs.work("Consider taking median first [5 pixel]")
    everyn = obj_skyspec.npix // 20
    pypeitFit_obj, _ = fitting.iterfit(obj_skyspec.wavelength.value,
                                       obj_skyspec.flux.value,
                                       nord=3,
                                       kwargs_bspline={'everyn': everyn},
                                       kwargs_reject={
                                           'groupbadpix': True,
                                           'maxrej': 1
                                       },
                                       maxiter=15,
                                       upper=3.0,
                                       lower=3.0)
    obj_sky_cont, _ = pypeitFit_obj.value(obj_skyspec.wavelength.value)

    obj_sky_flux = obj_skyspec.flux.value - obj_sky_cont
    pypeitFit_sky, _ = fitting.iterfit(arx_skyspec.wavelength.value,
                                       arx_skyspec.flux.value,
                                       nord=3,
                                       kwargs_bspline={'everyn': everyn},
                                       kwargs_reject={
                                           'groupbadpix': True,
                                           'maxrej': 1
                                       },
                                       maxiter=15,
                                       upper=3.0,
                                       lower=3.0)
    arx_sky_cont, _ = pypeitFit_sky.value(arx_skyspec.wavelength.value)
    arx_sky_flux = arx_skyspec.flux.value - arx_sky_cont

    # Consider sharpness filtering (e.g. LowRedux)
    msgs.work("Consider taking median first [5 pixel]")

    #Cross correlation of spectra
    #corr = np.correlate(arx_skyspec.flux, obj_skyspec.flux, "same")
    corr = np.correlate(arx_sky_flux, obj_sky_flux, "same")

    #Create array around the max of the correlation function for fitting for subpixel max
    # Restrict to pixels within maxshift of zero lag
    lag0 = corr.size // 2
    #mxshft = settings.argflag['reduce']['flexure']['maxshift']
    max_corr = np.argmax(corr[lag0 - mxshft:lag0 + mxshft]) + lag0 - mxshft
    subpix_grid = np.linspace(max_corr - 3., max_corr + 3., 7)

    #Fit a 2-degree polynomial to peak of correlation function. JFH added this if/else to not crash for bad slits
    if np.any(np.isfinite(corr[subpix_grid.astype(np.int)])):
        fit = fitting.PypeItFit(xval=subpix_grid,
                                yval=corr[subpix_grid.astype(np.int)],
                                func='polynomial',
                                order=np.atleast_1d(2))
        fit.fit()
        success = True
        max_fit = -0.5 * fit.fitc[1] / fit.fitc[2]
    else:
        fit = fitting.PypeItFit(xval=subpix_grid,
                                yval=0.0 * subpix_grid,
                                func='polynomial',
                                order=np.atleast_1d(2))
        fit.fit()
        success = False
        max_fit = 0.0
        msgs.warn('Flexure compensation failed for one of your objects')

    #Calculate and apply shift in wavelength
    shift = float(max_fit) - lag0
    msgs.info("Flexure correction of {:g} pixels".format(shift))
    #model = (fit[2]*(subpix_grid**2.))+(fit[1]*subpix_grid)+fit[0]

    return dict(polyfit=fit,
                shift=shift,
                subpix=subpix_grid,
                corr=corr[subpix_grid.astype(np.int)],
                sky_spec=obj_skyspec,
                arx_spec=arx_skyspec,
                corr_cen=corr.size / 2,
                smooth=smooth_sig_pix,
                success=success)