Exemplo n.º 1
0
def test_func_fit():
    """ Run the parameter setup script
    """
    x = np.pi * np.linspace(0, 1., 100)
    y = np.sin(x)
    # Polynomial
    pcoeff = utils.func_fit(x, y, 'polynomial', 3)
    np.testing.assert_allclose(pcoeff,
                               np.array([
                                   -4.74660344e-02, 1.30745471e+00,
                                   -4.16175760e-01, 3.08557167e-18
                               ]),
                               atol=1e-9)
    # Legendre
    lcoeff = utils.func_fit(x, y, 'legendre', 3)
    np.testing.assert_allclose(lcoeff,
                               np.array([
                                   6.37115652e-01, 6.83317251e-17,
                                   -6.84581686e-01, -7.59352737e-17
                               ]),
                               atol=1e-9)
    # bspline
    bcoeff = utils.func_fit(x, y, 'bspline', 2)
    np.testing.assert_allclose(bcoeff[0], [
        0., 0., 0., 0.31733259, 0.95199777, 1.58666296, 2.22132814, 3.14159265,
        3.14159265, 3.14159265
    ],
                               atol=1e-5)
Exemplo n.º 2
0
def flex_shift(obj_skyspec, arx_skyspec, mxshft=20):
    """ Calculate shift between object sky spectrum and archive sky spectrum

    Parameters
    ----------
    obj_skyspec
    arx_skyspec

    Returns
    -------
    flex_dict: dict
      Contains flexure info
    """

    # TODO None of these routines should have dependencies on XSpectrum1d!

    # Determine the brightest emission lines
    msgs.warn("If we use Paranal, cut down on wavelength early on")
    arx_amp, arx_amp_cont, arx_cent, arx_wid, _, arx_w, arx_yprep, nsig = arc.detect_lines(
        arx_skyspec.flux.value)
    obj_amp, obj_amp_cont, obj_cent, obj_wid, _, obj_w, obj_yprep, nsig_obj = arc.detect_lines(
        obj_skyspec.flux.value)

    # Keep only 5 brightest amplitude lines (xxx_keep is array of
    # indices within arx_w of the 5 brightest)
    arx_keep = np.argsort(arx_amp[arx_w])[-5:]
    obj_keep = np.argsort(obj_amp[obj_w])[-5:]

    # Calculate wavelength (Angstrom per pixel)
    arx_disp = np.append(
        arx_skyspec.wavelength.value[1] - arx_skyspec.wavelength.value[0],
        arx_skyspec.wavelength.value[1:] - arx_skyspec.wavelength.value[:-1])
    #arx_disp = (np.amax(arx_sky.wavelength.value)-np.amin(arx_sky.wavelength.value))/arx_sky.wavelength.size
    obj_disp = np.append(
        obj_skyspec.wavelength.value[1] - obj_skyspec.wavelength.value[0],
        obj_skyspec.wavelength.value[1:] - obj_skyspec.wavelength.value[:-1])
    #obj_disp = (np.amax(obj_sky.wavelength.value)-np.amin(obj_sky.wavelength.value))/obj_sky.wavelength.size

    # Calculate resolution (lambda/delta lambda_FWHM)..maybe don't need
    # this? can just use sigmas
    arx_idx = (arx_cent + 0.5).astype(
        np.int)[arx_w][arx_keep]  # The +0.5 is for rounding
    arx_res = arx_skyspec.wavelength.value[arx_idx]/\
              (arx_disp[arx_idx]*(2*np.sqrt(2*np.log(2)))*arx_wid[arx_w][arx_keep])
    obj_idx = (obj_cent + 0.5).astype(
        np.int)[obj_w][obj_keep]  # The +0.5 is for rounding
    obj_res = obj_skyspec.wavelength.value[obj_idx]/ \
              (obj_disp[obj_idx]*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep])
    #obj_res = (obj_sky.wavelength.value[0]+(obj_disp*obj_cent[obj_w][obj_keep]))/(
    #    obj_disp*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep])

    if not np.all(np.isfinite(obj_res)):
        msgs.warn(
            'Failed to measure the resolution of the object spectrum, likely due to error '
            'in the wavelength image.')
        return None
    msgs.info("Resolution of Archive={0} and Observation={1}".format(
        np.median(arx_res), np.median(obj_res)))

    # Determine sigma of gaussian for smoothing
    arx_sig2 = np.power(arx_disp[arx_idx] * arx_wid[arx_w][arx_keep], 2)
    obj_sig2 = np.power(obj_disp[obj_idx] * obj_wid[obj_w][obj_keep], 2)

    arx_med_sig2 = np.median(arx_sig2)
    obj_med_sig2 = np.median(obj_sig2)

    if obj_med_sig2 >= arx_med_sig2:
        smooth_sig = np.sqrt(obj_med_sig2 - arx_med_sig2)  # Ang
        smooth_sig_pix = smooth_sig / np.median(arx_disp[arx_idx])
        arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix * 2 *
                                               np.sqrt(2 * np.log(2)))
    else:
        msgs.warn("Prefer archival sky spectrum to have higher resolution")
        smooth_sig_pix = 0.
        msgs.warn("New Sky has higher resolution than Archive.  Not smoothing")
        #smooth_sig = np.sqrt(arx_med_sig**2-obj_med_sig**2)

    #Determine region of wavelength overlap
    min_wave = max(np.amin(arx_skyspec.wavelength.value),
                   np.amin(obj_skyspec.wavelength.value))
    max_wave = min(np.amax(arx_skyspec.wavelength.value),
                   np.amax(obj_skyspec.wavelength.value))

    #Smooth higher resolution spectrum by smooth_sig (flux is conserved!)
    #    if np.median(obj_res) >= np.median(arx_res):
    #        msgs.warn("New Sky has higher resolution than Archive.  Not smoothing")
    #obj_sky_newflux = ndimage.gaussian_filter(obj_sky.flux, smooth_sig)
    #    else:
    #tmp = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)
    #        arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2)))
    #arx_sky.flux = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)

    # Define wavelengths of overlapping spectra
    keep_idx = np.where((obj_skyspec.wavelength.value >= min_wave)
                        & (obj_skyspec.wavelength.value <= max_wave))[0]
    #keep_wave = [i for i in obj_sky.wavelength.value if i>=min_wave if i<=max_wave]

    #Rebin both spectra onto overlapped wavelength range
    if len(keep_idx) <= 50:
        msgs.warn("Not enough overlap between sky spectra")
        return None
    else:  #rebin onto object ALWAYS
        keep_wave = obj_skyspec.wavelength[keep_idx]
        arx_skyspec = arx_skyspec.rebin(keep_wave)
        obj_skyspec = obj_skyspec.rebin(keep_wave)
        # Trim edges (rebinning is junk there)
        arx_skyspec.data['flux'][0, :2] = 0.
        arx_skyspec.data['flux'][0, -2:] = 0.
        obj_skyspec.data['flux'][0, :2] = 0.
        obj_skyspec.data['flux'][0, -2:] = 0.

    # Normalize spectra to unit average sky count
    norm = np.sum(obj_skyspec.flux.value) / obj_skyspec.npix
    obj_skyspec.flux = obj_skyspec.flux / norm
    norm2 = np.sum(arx_skyspec.flux.value) / arx_skyspec.npix
    arx_skyspec.flux = arx_skyspec.flux / norm2
    if (norm < 0.):
        msgs.warn("Bad normalization of object in flexure algorithm")
        msgs.warn("Will try the median")
        norm = np.median(obj_skyspec.flux.value)
        if (norm < 0.):
            msgs.warn("Improper sky spectrum for flexure.  Is it too faint??")
            return None
    if (norm2 < 0.):
        msgs.warn(
            'Bad normalization of archive in flexure. You are probably using wavelengths '
            'well beyond the archive.')
        return None

    # Deal with bad pixels
    msgs.work("Need to mask bad pixels")

    # Deal with underlying continuum
    msgs.work("Consider taking median first [5 pixel]")
    everyn = obj_skyspec.npix // 20
    bspline_par = dict(everyn=everyn)
    mask, ct = utils.robust_polyfit(obj_skyspec.wavelength.value,
                                    obj_skyspec.flux.value,
                                    3,
                                    function='bspline',
                                    sigma=3.,
                                    bspline_par=bspline_par)
    obj_sky_cont = utils.func_val(ct, obj_skyspec.wavelength.value, 'bspline')
    obj_sky_flux = obj_skyspec.flux.value - obj_sky_cont
    mask, ct_arx = utils.robust_polyfit(arx_skyspec.wavelength.value,
                                        arx_skyspec.flux.value,
                                        3,
                                        function='bspline',
                                        sigma=3.,
                                        bspline_par=bspline_par)
    arx_sky_cont = utils.func_val(ct_arx, arx_skyspec.wavelength.value,
                                  'bspline')
    arx_sky_flux = arx_skyspec.flux.value - arx_sky_cont

    # Consider sharpness filtering (e.g. LowRedux)
    msgs.work("Consider taking median first [5 pixel]")

    #Cross correlation of spectra
    #corr = np.correlate(arx_skyspec.flux, obj_skyspec.flux, "same")
    corr = np.correlate(arx_sky_flux, obj_sky_flux, "same")

    #Create array around the max of the correlation function for fitting for subpixel max
    # Restrict to pixels within maxshift of zero lag
    lag0 = corr.size // 2
    #mxshft = settings.argflag['reduce']['flexure']['maxshift']
    max_corr = np.argmax(corr[lag0 - mxshft:lag0 + mxshft]) + lag0 - mxshft
    subpix_grid = np.linspace(max_corr - 3., max_corr + 3., 7)

    #Fit a 2-degree polynomial to peak of correlation function. JFH added this if/else to not crash for bad slits
    if np.any(np.isfinite(corr[subpix_grid.astype(np.int)])):
        fit = utils.func_fit(subpix_grid, corr[subpix_grid.astype(np.int)],
                             'polynomial', 2)
        success = True
        max_fit = -0.5 * fit[1] / fit[2]
    else:
        fit = utils.func_fit(subpix_grid, 0.0 * subpix_grid, 'polynomial', 2)
        success = False
        max_fit = 0.0
        msgs.warn('Flexure compensation failed for one of your objects')

    #Calculate and apply shift in wavelength
    shift = float(max_fit) - lag0
    msgs.info("Flexure correction of {:g} pixels".format(shift))
    #model = (fit[2]*(subpix_grid**2.))+(fit[1]*subpix_grid)+fit[0]

    flex_dict = dict(polyfit=fit,
                     shift=shift,
                     subpix=subpix_grid,
                     corr=corr[subpix_grid.astype(np.int)],
                     sky_spec=obj_skyspec,
                     arx_spec=arx_skyspec,
                     corr_cen=corr.size / 2,
                     smooth=smooth_sig_pix,
                     success=success)
    # Return
    return flex_dict
Exemplo n.º 3
0
def basis(xfit, yfit, coeff, npc, pnpc, weights=None, skipx0=True, x0in=None, mask=None,
          function='polynomial'):
    nrow = xfit.shape[0]
    ntrace = xfit.shape[1]
    if x0in is None:
        x0in = np.arange(float(ntrace))

    # Mask out some orders if they are bad
    if mask is None or mask.size == 0:
        usetrace = np.arange(ntrace)
        outmask = np.ones((nrow, ntrace))
    else:
        usetrace = np.where(np.in1d(np.arange(ntrace), mask) == False)[0]
        outmask = np.ones((nrow, ntrace))
        outmask[:,mask] = 0.0

    # Do the PCA analysis
    eigc, hidden = get_pc(coeff[1:npc+1, usetrace], npc)

    modl = func_vander(xfit[:,0], function, npc)
    eigv = np.dot(modl[:,1:], eigc)

    med_hidden = np.median(hidden, axis=1)
    med_highorder = med_hidden.copy()
    med_highorder[0] = 0

    high_order_matrix = med_highorder.T[np.newaxis,:].repeat(ntrace, axis=0)

    # y = hidden[0,:]
    # coeff0 = utils.robust_regression(x0in[usetrace], y, pnpc[1], 0.1, function=function)

    # y = hidden[1,:]
    # coeff1 = utils.robust_regression(x0in[usetrace], y, pnpc[2], 0.1, function=function)

    coeffstr = []
    for i in range(1, npc+1):
        # if pnpc[i] == 0:
        #     coeffstr.append([-9.99E9])
        #     continue
        # coeff0 = utils.robust_regression(x0in[usetrace], hidden[i-1,:], pnpc[i], 0.1, function=function, min=x0in[0], max=x0in[-1])
        if weights is not None:
            tmask, coeff0 = utils.robust_polyfit(x0in[usetrace], hidden[i-1, :], pnpc[i],
                                                   weights=weights[usetrace], sigma=2.0, function=function,
                                                   minx=x0in[0], maxx=x0in[-1])
        else:
            tmask, coeff0 = utils.robust_polyfit(x0in[usetrace], hidden[i-1, :], pnpc[i],
                                                   sigma=2.0, function=function,
                                                   minx=x0in[0], maxx=x0in[-1])
        coeffstr.append(coeff0)
        high_order_matrix[:, i-1] = utils.func_val(coeff0, x0in, function, minx=x0in[0], maxx=x0in[-1])
    # high_order_matrix[:,1] = utils.func_val(coeff1, x0in, function)
    high_fit = high_order_matrix.copy()

    high_order_fit = np.dot(eigv, high_order_matrix.T)
    sub = (yfit - high_order_fit) * outmask

    numer = np.sum(sub, axis=0)
    denom = np.sum(outmask, axis=0)
    x0 = np.zeros(ntrace, dtype=np.float)
    fitmask = np.zeros(ntrace, dtype=np.float)
    #fitmask[mask] = 1
    x0fit = np.zeros(ntrace, dtype=np.float)
    chisqnu = 0.0
    chisqold = 0.0
    robust = True
    #svx0 = numer/(denom+(denom == 0).astype(np.int))
    if not skipx0:
        fitmask = (np.abs(denom) > 10).astype(np.int)
        if robust:
            good = np.where(fitmask != 0)[0]
            bad = np.where(fitmask == 0)[0]
            x0[good] = numer[good]/denom[good]
            imask = np.zeros(ntrace, dtype=np.float)
            imask[bad] = 1.0
            ttmask, x0res = utils.robust_polyfit(x0in, x0, pnpc[0], weights=weights, sigma=2.0,
                                                   function=function, minx=x0in[0], maxx=x0in[-1], initialmask=imask)
            x0fit = utils.func_val(x0res, x0in, function, minx=x0in[0], maxx=x0in[-1])
            good = np.where(ttmask == 0)[0]
            xstd = 1.0  # This should represent the dispersion in the fit
            chisq = ((x0[good]-x0fit[good])/xstd)**2.0
            chisqnu = np.sum(chisq)/np.sum(ttmask)
            fitmask = 1.0-ttmask
            msgs.prindent("  Reduced chi-squared = {0:E}".format(chisqnu))
        else:
            for i in range(1, 5):
                good = np.where(fitmask != 0)[0]
                x0[good] = numer[good]/denom[good]
#				x0res = utils.robust_regression(x0in[good],x0[good],pnpc[0],0.2,function=function)
                x0res = utils.func_fit(x0in[good], x0[good], function, pnpc[0],
                                         weights=weights, minx=x0in[0], maxx=x0in[-1])
                x0fit = utils.func_val(x0res, x0in, function, minx=x0in[0], maxx=x0in[-1])
                chisq = (x0[good]-x0fit[good])**2.0
                fitmask[good] *= (chisq < np.sum(chisq)/2.0).astype(np.int)
                chisqnu = np.sum(chisq)/np.sum(fitmask)
                msgs.prindent("  Reduced chi-squared = {0:E}".format(chisqnu))
                if chisqnu == chisqold:
                    break
                else:
                    chisqold = chisqnu
        if chisqnu > 2.0:
            msgs.warn("PCA has very large residuals")
        elif chisqnu > 0.5:
            msgs.warn("PCA has fairly large residuals")
        #bad = np.where(fitmask==0)[0]
        #x0[bad] = x0fit[bad]
    else:
        x0res = 0.0
    x3fit = np.dot(eigv,high_order_matrix.T) + np.outer(x0fit,np.ones(nrow)).T
    outpar = dict({'high_fit': high_fit, 'x0': x0, 'x0in': x0in, 'x0fit': x0fit, 'x0res': x0res, 'x0mask': fitmask,
                   'hidden': hidden, 'usetrc': usetrace, 'eigv': eigv, 'npc': npc, 'coeffstr': coeffstr})
    return x3fit, outpar