def fit_bp(bp,fl):
    # Fit the spectrum
    n_bp=len(bp)
    x = np.linspace(-1,1,num=n_bp,endpoint=True)
    L = Legendre([1,1,1,1])
    bpfit = L.fit(x[fl != 0],bp[fl != 0],10,domain=[-1,1])
    x_fit,y_fit = bpfit.linspace(n=n_bp)
    #y_fit /= np.median(y_fit)
    return y_fit
Exemple #2
0
def coef_to_trajectory(c, evaluation_points_nb, basis, basis_features,
                       basis_dimension):
    """
    Given coefficients, build the associated trajectory with respect to
    a functional basis.

    Inputs:
        - c: list of floats or list of pd.Series
            Each element of the list contains coefficients of a state
        - evaluation_points_nb: int
            Number of points on which the trajectory is evaluated
        - basis: string
            Name of the functional basis
        - basis_features: dict
            Contain information on the basis for each state
        - basis_dimension: dict
            Give the number of basis functions for each state
    Output:
        - y: DataFrame
            Contains computed states of a flight
    """
    # FIXME: if below necessary ??
    # If c is list of floats, convert it into a list of pd.Series
    n_var = len(basis_dimension)
    if len(c) != n_var:
        c_formatted = []
        k = 0
        for state in basis_dimension:
            c_ = pd.Series(c[k:k+basis_dimension[state]], name=state)
            k += basis_dimension[state]
            c_formatted.append(c_)
        c = c_formatted.copy()

    y = pd.DataFrame()
    # Build each state
    for i in range(n_var):
        if basis == 'legendre':
            # Fix the domain [0,1] of the basis
            cl_c_state = Legendre(c[i].values, domain=[0, 1])
            # Evaluate
            _, y[c[i].name] = Legendre.linspace(cl_c_state,
                                                n=evaluation_points_nb)
        elif basis == 'bspline':
            # Get knots
            t = basis_features['knots']
            # Get degree of spline for the i-th state
            k_i = list(basis_features.values())[i+1]
            # Add knots at endpoints 0 and 1
            t_i = np.r_[(0,)*(k_i+1), t, (1,)*(k_i+1)]
            # Create spline
            spl_i = BSpline(t_i, c[i].values, k_i)
            # Define evaluation points
            x = np.linspace(0, 1, evaluation_points_nb)
            # Evaluate
            y[c[i].name] = spl_i(x)

    return y
def fit_bp(bp, fl):
    # Fit the spectrum
    n_bp = len(bp)
    x = np.linspace(-1, 1, num=n_bp, endpoint=True)
    L = Legendre([1, 1, 1, 1])
    bpfit = L.fit(x[fl != 0], bp[fl != 0], 10, domain=[-1, 1])
    x_fit, y_fit = bpfit.linspace(n=n_bp)
    #y_fit /= np.median(y_fit)
    return y_fit
Exemple #4
0
def make_poly_regressors(n_samples, order=2):
    # mean
    X = np.ones((n_samples, 1))
    for d in range(order):
        poly = Legendre.basis(d + 1)
        poly_trend = poly(np.linspace(-1, 1, n_samples))
        X = np.hstack((X, poly_trend[:, None]))
    return X
Exemple #5
0
    def _reval_legendre(y, p):
        """Re-evaluate Legendre polynomials."""
        P = np.zeros((p + 1, len(y)))
        dP = np.zeros((p + 1, 1, len(y)))

        P[0] = 1. - y
        P[1] = y
        dP[0][0] = -1. + 0. * y
        dP[1][0] = 1. + 0. * y

        x = 2. * y - 1.
        for n in range(2, p + 1):
            c = np.zeros(n)
            c[n - 1] = 1.
            s = Legendre(c).integ(lbnd=-1)
            scale = np.sqrt((2. * n - 1.) / 2.)
            P[n] = s(x) * scale
            dP[n][0] = 2 * s.deriv()(x) * scale

        return P, dP
Exemple #6
0
    def __init__(self, *args, **kwargs):
        PolynomialBasisFunctions.__init__(self, *args, **kwargs)

        # Standard linear basis functions if the polynomial order is 1
        if self.polynomialOrder is 1:
            self._functions = [Legendre((0.5, -0.5)), Legendre((0.5, 0.5))]

        # Integrated Legendre polynomials (first two functions replaced by classic linear ones)
        elif self.polynomialOrder > 1:
            # Base
            self._functions = [
                Legendre(coefficients)
                for coefficients in oneHotArray(self.polynomialOrder + 1)
            ]

            # Integrate base functions
            for index in range(self.polynomialOrder - 1, 0, -1):
                self._functions[index] = Legendre(legint(
                    np.sqrt(float(index) - 0.5) * self._functions[index].coef,
                    m=1,
                    lbnd=-1.0),
                                                  domain=self.domain)

            # Replace first and last functions
            self._functions[0] = Legendre((0.5, -0.5))
            self._functions[-1] = Legendre((0.5, 0.5))

        else:
            raise ValueError("Invalid polynomial order!")
Exemple #7
0
    def __init__(self, *args, **kwargs):
        PolynomialBasisFunctions.__init__(self, *args, **kwargs)

        # Standard linear basis functions if the polynomial order is 1
        if self.polynomialOrder is 1:
            self._functions = [Legendre((0.5, -0.5)), Legendre((0.5, 0.5))]

        # Legendre polynomials, but replace the constant function with a linear one
        elif self.polynomialOrder > 1:
            self._functions = [None for i in range(self.polynomialOrder + 1)]
            for index, coefficients in enumerate(
                    oneHotArray(self.polynomialOrder + 1)):

                if index is 0:
                    coefficients[0] = 0
                    coefficients[1] = -1

                self._functions[index] = Legendre(coefficients,
                                                  domain=self.domain)

        else:
            raise ValueError("Invalid polynomial order!")
Exemple #8
0
def p4():
    deg = 4
    c = np.zeros(deg + 1)
    dom = [-np.pi, np.pi]
    for i in range(0, 5):
        coef = np.zeros(deg + 1)
        coef[i] = 1
        prod = lambda x: Legendre(coef, dom)(x)**2
        den = integrate.quad(prod, -np.pi, np.pi)
        prod = lambda x: Legendre(coef, dom)(x) * np.cos(x)
        num = integrate.quad(prod, -np.pi, np.pi)
        c[i] = num[0] / den[0]
    p = Legendre(c, dom)
    #plot them
    x = np.linspace(-np.pi, np.pi, 500)
    yp = p(x)
    ycos = np.cos(x)
    fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
    ax1.plot(x, ycos, '-k')
    ax1.plot(x, yp, '-c')
    ax2.plot(x, ycos - yp, '-k')
    plt.show()
Exemple #9
0
def trajectory_to_coef(y, basis, basis_features, basis_dimension):
    """
    Given a trajectory, compute its associated coefficients for each
    state with respect to a functional basis.

    Inputs:
        - y: DataFrame
            Trajectory - Index has to start at 0
        - basis: string
            Name of the functional basis
        - basis_features: dict
            Contain information on the basis for each state
        - basis_dimension: dict
            Give the dimension of the basis for each state
    Output:
        - coef: list of pd.Series
            Each element of the list contains the coefficients of a
            state
    """
    # Define data on [0, 1] because each trajectory is considered as being
    # defined on [0,1]
    evaluation_points_nb = y.shape[0] - 1
    x = y.index / evaluation_points_nb
    coef = []
    if basis == 'legendre':
        # Compute coefficients for each state
        for state in basis_dimension:
            # NB: Use Legendre class to fix the domain of the basis
            least_square_fit = Legendre.fit(x,
                                            y[state],
                                            deg=basis_dimension[state]-1,
                                            domain=[0, 1])
            s = pd.Series(least_square_fit.coef, name=state)
            coef.append(s)
    elif basis == 'bspline':
        # Get internal knots
        t = basis_features['knots']
        # Compute coefficients for each state
        for state in basis_dimension:
            # Get degree
            k_state = basis_features[state]
            # Add external knots depending on the degree
            t_state = np.r_[(0,)*(k_state+1), t, (1,)*(k_state+1)]
            # Interpolate
            spl = make_lsq_spline(x, y[state], t_state, k_state)
            s = pd.Series(spl.c, name=state)
            coef.append(s)
    coef = np.array([c for series in coef for c in series.values])

    return coef
Exemple #10
0
    def signal_convolution_sh(self,
                              order,
                              qnorm,
                              tau=1 / (4 * np.pi**2),
                              nb_samples=100):
        r"""
        Returns the convolution operator in spherical harmonics basis, using
        the Funk-Hecke theorem as described in [1]_.

        Parameters
        ----------
        order : int
            The (even) spherical harmonics truncation order.
        qnorm : double
            The norm of q vector in mm\ :superscript:`-1`.
        tau : double
            The diffusion time in s.
        nb_samples : int
            The number of samples controling the accuracy of the numerical
            integral.

        Note
        ----
        The function implemented here is the general, numerical implementation
        of the Funk-Hecke theorem. It is eventually replaced by analytical
        formula (when available) in subclasses.

        References
        ----------
        .. [1] Descoteaux, Maxime. "High angular resolution diffusion MRI: from
               local estimation to segmentation and tractography." PhD diss.,
               Universite de Nice Sophia-Antipolis, France, 2010.

        """
        cos_thetas = np.linspace(0, 1, nb_samples)
        thetas = np.arccos(cos_thetas)
        qnorms = qnorm * np.ones(nb_samples)
        fir = self.signal(qnorms, thetas, tau)
        H = np.zeros((order + 1, nb_samples))
        dim_sh = shm.dimension(order)
        for l in range(0, order + 1, 2):
            coeffs = np.zeros(l + 1)
            coeffs[l] = 1.0
            H[l, :] = Legendre(coeffs)(cos_thetas)
        ls = list(map(shm.sh_degree, range(dim_sh)))
        rs = np.dot(H, fir) / nb_samples
        return rs[ls]
Exemple #11
0
 def _fit_spot_sigma(self, ispec, axis=0, npoly=5):
     """
     Fit the cross-sectional Gaussian sigma of PSF spots vs. wavelength.
     Return callable Legendre object.
     
     Inputs:
         ispec : spectrum number
         axis  : 0 or 'x' for cross dispersion sigma;
                 1 or 'y' or 'w' for wavelength dispersion
         npoly : order of Legendre poly to fit to sigma vs. wavelength
         
     Returns:
         legfit such that legfit(w) returns fit at wavelengths w
     """
     
     if type(axis) is not int:
         if axis in ('x', 'X'):
             axis = 0
         elif axis in ('y', 'Y', 'w', 'W'):
             axis = 1
         else:
             raise ValueError("Unknown axis type {}".format(axis))
             
     if axis not in (0,1):
         raise ValueError("axis must be 0, 'x', 1, 'y', or 'w'")
         
     yy = np.linspace(10, self.npix_y-10, 20)
     ww = self.wavelength(ispec, y=yy)
     xsig = list()  #- sigma vs. wavelength array to fill
     for w in ww:
         xspot = self.pix(ispec, w).sum(axis=axis)
         xspot /= np.sum(xspot)       #- normalize for edge cases
         xx = np.arange(len(xspot))
         mean, sigma = scipy.optimize.curve_fit(gausspix, xx, xspot)[0]
             
         xsig.append(sigma)
     
     #- Fit Legendre polynomial and return coefficients
     legfit = Legendre.fit(ww, xsig, npoly, domain=(self._wmin, self._wmax))
             
     return legfit
Exemple #12
0
    def _fit_spot_sigma(self, ispec, axis=0, npoly=5):
        """
        Fit the cross-sectional Gaussian sigma of PSF spots vs. wavelength.
        Return callable Legendre object.

        Arguments:
            ispec : spectrum number
            axis  : 0 or 'x' for cross dispersion sigma;
                    1 or 'y' or 'w' for wavelength dispersion
            npoly : order of Legendre poly to fit to sigma vs. wavelength

        Returns:
            legfit such that legfit(w) returns fit at wavelengths w
        """

        if type(axis) is not int:
            if axis in ('x', 'X'):
                axis = 0
            elif axis in ('y', 'Y', 'w', 'W'):
                axis = 1
            else:
                raise ValueError("Unknown axis type {}".format(axis))

        if axis not in (0, 1):
            raise ValueError("axis must be 0, 'x', 1, 'y', or 'w'")

        yy = np.linspace(10, self.npix_y - 10, 20)
        ww = self.wavelength(ispec, y=yy)
        xsig = list()  #- sigma vs. wavelength array to fill
        for w in ww:
            xspot = self.pix(ispec, w).sum(axis=axis)
            xspot /= np.sum(xspot)  #- normalize for edge cases
            xx = np.arange(len(xspot))
            mean, sigma = scipy.optimize.curve_fit(gausspix, xx, xspot)[0]

            xsig.append(sigma)

        #- Fit Legendre polynomial and return coefficients
        legfit = Legendre.fit(ww, xsig, npoly, domain=(self._wmin, self._wmax))

        return legfit
Exemple #13
0
 def radius_legendre2(self,theta,a,b,c):
     A=[a,b,c]
     R=Legendre(A)
     x=2*np.cos(theta)-1
     return R(x)
Exemple #14
0
def process_arc(qframe, xytraceset, linelist=None, npoly=2, nbins=2):
    """
    qframe: desispec.qframe.QFrame object
    xytraceset : desispec.xytraceset.XYTraceSet object
    linelist: line list to fit
    npoly: polynomial order for sigma expansion
    nbins: no of bins for the half of the fitting window
    return: xytraceset (with ysig vs wave)
    """

    log = get_logger()

    if linelist is None:

        if qframe.meta is None or "CAMERA" not in qframe.meta:
            log.error(
                "no information about camera in qframe so I don't know which lines to use"
            )
            raise RuntimeError(
                "no information about camera in qframe so I don't know which lines to use"
            )

        camera = qframe.meta["CAMERA"]
        #- load arc lines
        from desispec.bootcalib import load_arcline_list, load_gdarc_lines, find_arc_lines
        llist = load_arcline_list(camera)
        dlamb, gd_lines = load_gdarc_lines(camera, llist)
        linelist = gd_lines
        log.info(
            "No line list configured. Fitting for lines {}".format(linelist))

    tset = xytraceset

    assert (qframe.nspec == tset.nspec)

    tset.ysig_vs_wave_traceset = TraceSet(np.zeros((tset.nspec, npoly + 1)),
                                          [tset.wavemin, tset.wavemax])

    for spec in range(tset.nspec):
        spec_wave = qframe.wave[spec]
        spec_linelist = linelist[(linelist > spec_wave[0])
                                 & (linelist < spec_wave[-1])]
        meanwaves, emeanwaves, sigmas, esigmas = sigmas_from_arc(
            spec_wave,
            qframe.flux[spec],
            qframe.ivar[spec],
            spec_linelist,
            n=nbins)

        # convert from wavelength A unit to CCD pixel for consistency with specex PSF
        y = tset.y_vs_wave(spec, spec_wave)
        dydw = np.interp(meanwaves, spec_wave,
                         np.gradient(y) / np.gradient(spec_wave))
        sigmas *= dydw  # A -> pixels
        esigmas *= dydw  # A -> pixels

        ok = (sigmas > 0) & (esigmas > 0)

        try:
            thislegfit = Legendre.fit(meanwaves[ok],
                                      sigmas[ok],
                                      npoly,
                                      domain=[tset.wavemin, tset.wavemax],
                                      w=1. / esigmas[ok]**2)
            tset.ysig_vs_wave_traceset._coeff[spec] = thislegfit.coef
        except:
            log.error("legfit of psf width failed for spec {}".format(spec))

        wave = np.linspace(tset.wavemin, tset.wavemax, 20)
        #plt.plot(wave,tset.ysig_vs_wave(spec,wave))

    #plt.show()
    return xytraceset
Exemple #15
0
 def radius_legendre4(self,theta,a,b,c,d,e):
     A=[a,b,c,d,e]
     R=Legendre(A)
     x=2*np.cos(theta)-1
     return R(x)
def get_light_detrended_data(data):
    """
    clip +/12 hours on every orbit
    fit out a quadratic to each orbit

    return a dict with keys:
        orbit1, orbit2, orbit3, orbit4, sector7, sector9, allsectors

        and each key leads to another dictionary with time and all available
        apertures+detrended stages.
    """

    data_dict = {}
    dtrtypes = ['IRM', 'PCA', 'TFA']
    apnums = [1, 2, 3]
    sectornums = [7, 9]

    orbitgap = 1
    orbitpadding = 0.5

    for sectornum, sector_data in zip(sectornums, data):

        time = sector_data['TMID_BJD']

        flux_sector_dict = {}
        for dtrtype in dtrtypes:
            for apnum in apnums:
                k = dtrtype + str(apnum)
                this_flux = (vp._given_mag_get_flux(sector_data[k]))

                # now mask orbit start and end
                time_copy = deepcopy(time)

                trim_time, trim_this_flux = moe.mask_orbit_start_and_end(
                    time_copy,
                    this_flux,
                    orbitgap=orbitgap,
                    expected_norbits=2,
                    orbitpadding=orbitpadding)

                # now fit out quadratics from each orbit and rejoin them
                norbits, trim_time_groups = lcmath.find_lc_timegroups(
                    trim_time, mingap=orbitgap)
                if norbits != 2:
                    raise AssertionError('expected 2 orbits')

                save_flux = []
                for time_group in trim_time_groups:
                    trim_time_orbit = trim_time[time_group]
                    trim_this_flux_orbit = trim_this_flux[time_group]

                    order = 2  # fit out a quadtric! #FIXME CHECK
                    p = Legendre.fit(trim_time_orbit, trim_this_flux_orbit,
                                     order)
                    trim_this_flux_orbit_fit = p(trim_time_orbit)

                    save_flux_orbit = (trim_this_flux_orbit /
                                       trim_this_flux_orbit_fit)

                    save_flux.append(save_flux_orbit)

                flux_sector_dict[k] = np.concatenate(save_flux)

        # update time to be same length as all the trimmed fluxes
        time = trim_time

        sectorkey = 'sector{}'.format(sectornum)
        data_dict[sectorkey] = {}
        data_dict[sectorkey]['time'] = time
        data_dict[sectorkey]['fluxes'] = flux_sector_dict

    sectorkeys = ['sector{}'.format(s) for s in sectornums]

    # create the "allsectors" dataset
    data_dict['allsectors'] = {}
    data_dict['allsectors']['time'] = np.concatenate(
        [data_dict[sectorkey]['time'] for sectorkey in sectorkeys])

    data_dict['allsectors']['fluxes'] = {}
    for dtrtype in dtrtypes:
        for apnum in apnums:
            k = dtrtype + str(apnum)
            data_dict['allsectors']['fluxes'][k] = np.concatenate([
                data_dict[sectorkey]['fluxes'][k] for sectorkey in sectorkeys
            ])

    return data_dict
Exemple #17
0
def quicksim_input_data(psffile, ww, ifiber=100):
    assert 0 <= ifiber < 500

    #- Read input data
    #- spots[i,j] is a 2D PSF spot sampled at
    #- slit position spotpos[i] and wavelength spotwave[j].
    #- Fiber k is located on the slit at fiberpos[k].
    spots = fitsio.read(psffile, 'SPOTS')
    spotwave = fitsio.read(psffile, 'SPOTWAVE')
    spotpos = fitsio.read(psffile, 'SPOTPOS')
    fiberpos = fitsio.read(psffile, 'FIBERPOS')
    hdr = fitsio.read_header(psffile)

    nwave = len(spotwave)
    npos = len(spotpos)
    nfiber = len(fiberpos)

    pixsize = int(round(float(hdr['CCDPIXSZ']) / hdr['CDELT1']))

    #- Measure the FWHM of the spots in x and y
    spot_fwhm_x = N.zeros((npos, nwave))
    spot_fwhm_y = N.zeros((npos, nwave))
    spot_neff = N.zeros((npos, nwave))
    for i in range(npos):
        for j in range(nwave):
            fx, fy = img_fwhm(spots[i, j])
            spot_fwhm_x[i, j] = fx
            spot_fwhm_y[i, j] = fy
            spot_neff[i, j] = calc_neff(spots[i, j], pixsize)

    #- For each spot wavelength, interpolate to the location of ifiber
    fiber_fwhm_x = N.zeros(nwave)
    fiber_fwhm_y = N.zeros(nwave)
    fiber_neff = N.zeros(nwave)

    for j in range(nwave):
        spx = InterpolatedUnivariateSpline(spotpos, spot_fwhm_x[:, j])
        fiber_fwhm_x[j] = spx(fiberpos[ifiber])
        spy = InterpolatedUnivariateSpline(spotpos, spot_fwhm_y[:, j])
        fiber_fwhm_y[j] = spy(fiberpos[ifiber])

        spn = InterpolatedUnivariateSpline(spotpos, spot_neff[:, j])
        fiber_neff[j] = spn(fiberpos[ifiber])

    #- Interpolate onto ww wavelength grid
    spx = InterpolatedUnivariateSpline(spotwave, fiber_fwhm_x)
    fwhm_x = spx(ww)
    spy = InterpolatedUnivariateSpline(spotwave, fiber_fwhm_y)
    fwhm_y = spy(ww)

    #- Convert fwhm units from spot pixels to CCD pixels
    #- Use units propagated from original spots calculations, not desi.yaml
    fwhm_x /= pixsize
    fwhm_y /= pixsize

    #- Final Neff sampled on same wavelength grid
    spn = InterpolatedUnivariateSpline(spotwave, fiber_neff)
    neff = spn(ww)

    #- Angstroms per row
    from numpy.polynomial.legendre import Legendre
    ycoeff, yhdr = fitsio.read(psffile, 'YCOEFF', header=True)
    domain = (yhdr['WAVEMIN'], yhdr['WAVEMAX'])
    y = Legendre(ycoeff[ifiber], domain=domain)(ww)
    ang_per_row = N.gradient(ww) / N.gradient(y)

    #- Convert fwhm_y from pixels to Angstroms
    fwhm_y *= ang_per_row

    data = N.rec.fromarrays(
        [ww, fwhm_y, fwhm_x, neff, ang_per_row],
        names="wavelength,fwhm_wave,fwhm_spatial,neff_spatial,angstroms_per_row"
    )

    return data
Exemple #18
0
 def derivatives(self):
     functions = [
         Legendre(np.polynomial.legendre.legder(polynomial.coef))
         for polynomial in self._functions
     ]
     return BasisFunctions(functions=functions)
Exemple #19
0
def fit_wsigmas(means, wsigmas, ewsigmas, npoly=2, domain=None):
    #- return callable legendre object
    wt = 1 / ewsigmas**2
    legfit = Legendre.fit(means, wsigmas, npoly, domain=domain, w=wt)

    return legfit
Exemple #20
0
def fit_wsigmas(means,wsigmas,ewsigmas,npoly=2,domain=None):
    #- return callable legendre object
    wt=1/ewsigmas**2
    legfit = Legendre.fit(means, wsigmas, npoly, domain=domain,w=wt)

    return legfit
 def generate_gauss_lobatto_nodes(self, num_points):
     nodes = Legendre.basis(num_points - 1).deriv().roots()
     return np.concatenate(([-1], nodes, [1]))
def get_lc_given_fficutout(workingdir, cutouts, c_obj, return_pkl=False):
    """
    Do simple aperture photometry on FFI cutouts. Uses world's simplest
    background subtraction -- the cutout median. Imposes an aperture radius of
    3 pixels. Invents the error bars as 1/sqrt(n_counts).

    To clean up the light curve, the following steps did a decent job:

    If multi-sector, each sector is normalized by its median. Sectors are
    then stitched together, and only quality==0 cadences are taken.
    If any "timegroup" (usually sectors, but not strictly -- here I define it
    by 0.5 day gaps) has much worse interquartile range than the other (>5x the
    median IQR), drop that timegroup. This usually means that the star was on
    bad pixels, or the edge of the detector, or something similar for one
    sector.

    Then, sigma clip out any ridiculous outliers via [7sigma, 7sigma] symmetric
    clip.

    Then, required all fluxes and errors were finite, and for each timegroup
    masked out 0.5 days at the beginning, and 0.5 days at the end. This makes
    the gaps bigger, but mostly throws out ramp systematic-infested data that
    othewise throws off the period measurement.

    Finally, an apparently relatively common long-term systematic in these LCs
    looks just like a linear slope over the course of an orbit. (Maybe b/c
    stars are moving, but aperture center is not? Unclear.) Therefore, to
    detrend, I fit out a LINE in time from each time group, if the group has at
    least two days worth of data. (Not if shorter, to avoid overfitting).

    Then, save the lightcurve and related data to a pickle file, in workingdir.
    If the pickle is found to already exist, and return_pkl is True, it is
    loaded and returned

    ----------
    args:

        workingdir (str): directory to which the light curve is written

        cutouts (list of length at least 1): paths to fficutouts. assumed to be
        from different sectors.

        c_obj (astropy.skycoord): coordinate of object, used to project wcs to
        image.

    ----------
    returns:

        if fails to get a good light curve, returns None. else, returns
        dictionary with the following keys.

        'time': time array
        'quality': quality flag array
        'flux': sigma-clipped flux (counts)
        'rel_flux': sigma-clipped relative flux, fully detrended
        'rel_flux_err': sigma-clipped relative flux errors
        'predetrending_time':
        'predetrending_rel_flux': before fitting out the line, rel flux values
        'predetrending_rel_flux_err':
        'x': location of aperture used to extract light curve
        'y': ditto
        'median_imgs': list of median images of the stack used to extract apertures
        'cutout_wcss': WCSs to the above images
    """

    outpath = os.path.join(workingdir, 'multisector_lc.pkl')
    if os.path.exists(outpath) and not return_pkl:
        print('WRN! found {}, returning without load'.format(outpath))
        return
    elif os.path.exists(outpath) and return_pkl:
        print('found {}, returning with load'.format(outpath))
        with open(outpath, 'rb') as f:
            out_dict = pickle.load(f)
        return out_dict

    if len(cutouts) == 0:
        raise AssertionError('something wrong in tesscut! fix this!')

    # img_flux and img_flux_err are image cubes of (time x spatial x spatial).
    # make lists of them for each sector.
    img_fluxs = [iu.get_data_keyword(f, 'FLUX') for f in cutouts]

    # for the background, just take the median of the image. very simple.
    bkgd_fluxs = [np.array([np.nanmedian(img_flux[ix, :, :]) for ix in
                            range(len(img_flux))]) for img_flux in img_fluxs]

    img_flux_errs = [iu.get_data_keyword(f, 'FLUX_ERR') for f in cutouts]

    times = [iu.get_data_keyword(f, 'TIME')+2457000 for f in cutouts]
    qualitys = [iu.get_data_keyword(f, 'QUALITY') for f in cutouts]

    cut_hduls = [fits.open(f) for f in cutouts]
    cutout_wcss = [wcs.WCS(cuthdul[2].header) for cuthdul in cut_hduls]

    # get the location to put down the apertures
    try:
        xs, ys = [], []
        for cutout_wcs in cutout_wcss:
            _x, _y = cutout_wcs.all_world2pix(
                c_obj.icrs.ra, c_obj.icrs.dec, 0
            )
            xs.append(_x)
            ys.append(_y)
    except Exception as e:
        print('ERR! wcs all_world2pix got {}'.format(repr(e)))
        import IPython; IPython.embed()

    #
    # plop down a 3 pixel circular aperture at the locations. then make the
    # light curves by doing the sum!
    #
    positions = [(x, y) for x,y in zip(xs, ys)]

    try:
        circ_apertures = [
            CircularAperture(position, r=3) for position in positions
        ]

    except ValueError as e:

        print('ERR1 {}'.format(e))

        out_dict = {
            'time':[],
            'quality':[],
            'flux':[],
            'rel_flux':[],
            'rel_flux_err':[],
            'predetrending_time':[],
            'predetrending_rel_flux':[],
            'predetrending_rel_flux_err':[]
        }

        with open(outpath, 'wb') as f:
            pickle.dump(out_dict, f)

        return None

    fluxs = []
    median_imgs = []
    # iterate over sectors
    for img, bkgd, aper in zip(img_fluxs, bkgd_fluxs, circ_apertures):

        img_stack = img - bkgd[:,None,None]

        # iterate over cadences in sector
        s_flux = []
        for _img in img_stack:

            phot_table = aperture_photometry(_img, aper)

            s_flux.append(phot_table['aperture_sum'])

        fluxs.append(np.array(s_flux))

        median_img = np.nanmedian(img_stack, axis=0)
        median_imgs.append(median_img)

    # normalize each sector by its median
    rel_fluxs = [f/np.nanmedian(f) for f in fluxs]
    rel_flux_errs = [np.sqrt(f)/np.nanmedian(f) for f in fluxs]

    #
    # concatenate sectors together and take only quality==0 cadences.
    #
    time = np.concatenate(times).flatten()
    quality = np.concatenate(qualitys).flatten()
    flux = np.concatenate(fluxs).flatten()
    rel_flux = np.concatenate(rel_fluxs).flatten()
    rel_flux_err = np.concatenate(rel_flux_errs).flatten()

    sel = (quality == 0)

    time = time[sel]
    flux = flux[sel]
    rel_flux = rel_flux[sel]
    rel_flux_err = rel_flux_err[sel]
    quality = quality[sel]

    #
    # sort everything into time order
    #
    sind = np.argsort(time)

    time = time[sind]
    quality = quality[sind]
    flux = flux[sind]
    rel_flux = rel_flux[sind]
    rel_flux_err = rel_flux_err[sind]

    #
    # if any "timegroup" (usually sectors, but not strictly -- here I define it
    # by 0.5 day gaps) has much worse interquartile range, drop it. This
    # usually means that the star was on bad pixels, or the edge of the
    # detector, or something similar for one sector.
    #
    ngroups, groups = find_lc_timegroups(time, mingap=0.5)

    rel_flux_iqrs = nparr([
        iqr(rel_flux[group], rng=(25,75)) for group in groups]
    )

    if ngroups >= 3:

        median_iqr = np.nanmedian(rel_flux_iqrs)

        bad_groups = (rel_flux_iqrs > 5*median_iqr)

        if len(bad_groups[bad_groups]) > 0:
            print('WRN! got {} bad time-groups. dropping them.'.
                  format(len(bad_groups[bad_groups])))

            gd_inds = nparr(groups)[~bad_groups]

            time = np.concatenate([time[gd] for gd in gd_inds]).flatten()
            quality = np.concatenate([quality[gd] for gd in gd_inds]).flatten()
            flux = np.concatenate([flux[gd] for gd in gd_inds]).flatten()
            rel_flux = np.concatenate([rel_flux[gd] for gd in gd_inds]).flatten()
            rel_flux_err = np.concatenate([rel_flux_err[gd] for gd in gd_inds]).flatten()

        else:
            # did not find any bad groups
            pass

    else:
        # trickier to detect outlying sectors with fewer groups
        pass

    #
    # sigma clip out any ridiculous outliers via [7sigma, 7sigma] symmetric
    # clip.
    #
    stime, srel_flux, srel_flux_err, [sflux, squality] = (
        sigclip_magseries_with_extparams(
        time, rel_flux, rel_flux_err, [flux, quality],
        sigclip=[7,7], iterative=False, magsarefluxes=True)
    )

    #
    # require finite fluxes. then mask gap edges. if you get no finite values,
    # save the dud pickle and return None.
    #
    sel = np.isfinite(srel_flux)

    stime = stime[sel]
    sflux = sflux[sel]
    srel_flux = srel_flux[sel]
    srel_flux_err = srel_flux_err[sel]
    squality = squality[sel]

    if len(stime) == len(sflux) == 0:

        out_dict = {
            'time':stime,
            'quality':squality,
            'flux':sflux,
            'rel_flux':srel_flux,
            'rel_flux_err':srel_flux_err,
            'predetrending_time':stime,
            'predetrending_rel_flux':srel_flux,
            'predetrending_rel_flux_err':srel_flux_err,
            'x':np.array(xs).flatten(),
            'y':np.array(ys).flatten(),
            'median_imgs': median_imgs,
            'cutout_wcss': cutout_wcss
        }

        with open(outpath, 'wb') as f:
            pickle.dump(out_dict, f)

        return None

    if not np.any(median_imgs[0]) and np.any(sflux):
        print('somehow getting nan image but finite flux')
        import IPython; IPython.embed()

    stime, srel_flux, [srel_flux_err, sflux, squality] = (
        lcu.mask_timegap_edges(stime, srel_flux,
                               othervectors=[srel_flux_err, sflux, squality],
                               gap=0.5, padding=12/(24))
    )

    #
    # Fit out a LINE in time from each time group, if the group has at least
    # two days worth of data. I added this because an apparently relatively
    # common long-term systematic in these LCs looks just like a linear slope
    # over the course of an orbit. (Maybe b/c stars are moving, but aperture
    # center is not? Unclear.)
    #
    predetrending_time = stime
    predetrending_rel_flux = srel_flux
    predetrending_rel_flux_err = srel_flux_err

    ngroups, groups = find_lc_timegroups(stime, mingap=0.5)

    _time, _rflux, _rflux_err = [], [], []

    for group in groups:

        tg_time = stime[group]
        tg_rel_flux = srel_flux[group]
        tg_rel_flux_err = srel_flux_err[group]

        if len(tg_time) <= 1:
            # singletons or zero-groups would cause problems
            continue

        if tg_time.max() - tg_time.min() < 2:

            # don't try fitting out trends in small time groups (risks
            # overfitting).

            _time.append(tg_time)
            _rflux.append(tg_rel_flux)
            _rflux_err.append(tg_rel_flux_err)

            continue

        try:

            p = Legendre.fit(tg_time, tg_rel_flux, 1)
            coeffs = p.coef

            tg_fit_rel_flux = p(tg_time)

            # divide out the linear fit
            tg_dtr_rel_flux = tg_rel_flux/tg_fit_rel_flux

            _time.append(tg_time)
            _rflux.append(tg_dtr_rel_flux)
            _rflux_err.append(tg_rel_flux_err)

        except np.linalg.LinAlgError:
            print('WRN! Legendre.fit failed, b/c bad data for this group. '
                  'Continue.')
            continue

    if len(_time) == 0:

        out_dict = {
            'time':stime,
            'quality':squality,
            'flux':sflux,
            'rel_flux':srel_flux,
            'rel_flux_err':srel_flux_err,
            'predetrending_time':stime,
            'predetrending_rel_flux':srel_flux,
            'predetrending_rel_flux_err':srel_flux_err,
            'x':np.array(xs).flatten(),
            'y':np.array(ys).flatten(),
            'median_imgs': median_imgs,
            'cutout_wcss': cutout_wcss
        }

        with open(outpath, 'wb') as f:
            pickle.dump(out_dict, f)

        return None

    stime = np.concatenate(_time).flatten()
    srel_flux = np.concatenate(_rflux).flatten()
    srel_flux_err = np.concatenate(_rflux_err).flatten()

    #
    # 1-dimensional arrays:
    # time, quality, flux, rel_flux, and rel_flux_err are all of same length.
    #
    # xs and ys are length n_sectors; they are the positions used in the
    # aperture.
    #
    # median_imgs is list of length n_sectors, for which each entry is the
    # median image in that sector.
    #
    # cutout_wcss is list of length n_sectors, each entry is the WCS
    # corresponding to median_image
    #
    out_dict = {
        'time':stime,
        'quality':squality,
        'flux':sflux,
        'rel_flux':srel_flux,
        'rel_flux_err':srel_flux_err,
        'predetrending_time':predetrending_time,
        'predetrending_rel_flux':predetrending_rel_flux,
        'predetrending_rel_flux_err':predetrending_rel_flux_err,
        'x':np.array(xs).flatten(),
        'y':np.array(ys).flatten(),
        'median_imgs': median_imgs,
        'cutout_wcss': cutout_wcss
    }

    with open(outpath, 'wb') as f:
        pickle.dump(out_dict, f)

    if len(stime) == len(sflux) == 0:
        return None

    else:
        return out_dict
Exemple #23
0
def legendre_fit_magseries(times, mags, errs, period,
                           legendredeg=10,
                           sigclip=30.0,
                           plotfit=False,
                           magsarefluxes=False,
                           verbose=True):

    '''Fit an arbitrary-order Legendre series, via least squares, to the
    magnitude/flux time series.

    This is a series of the form::

        p(x) = c_0*L_0(x) + c_1*L_1(x) + c_2*L_2(x) + ... + c_n*L_n(x)

    where L_i's are Legendre polynomials (also called "Legendre functions of the
    first kind") and c_i's are the coefficients being fit.

    This function is mainly just a wrapper to
    `numpy.polynomial.legendre.Legendre.fit`.

    Parameters
    ----------

    times,mags,errs : np.array
        The input mag/flux time-series to fit a Legendre series polynomial to.

    period : float
        The period to use for the Legendre fit.

    legendredeg : int
        This is `n` in the equation above, e.g. if you give `n=5`, you will
        get 6 coefficients. This number should be much less than the number of
        data points you are fitting.

    sigclip : float or int or sequence of two floats/ints or None
        If a single float or int, a symmetric sigma-clip will be performed using
        the number provided as the sigma-multiplier to cut out from the input
        time-series.

        If a list of two ints/floats is provided, the function will perform an
        'asymmetric' sigma-clip. The first element in this list is the sigma
        value to use for fainter flux/mag values; the second element in this
        list is the sigma value to use for brighter flux/mag values. For
        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
        dimmings and greater than 3-sigma brightenings. Here the meaning of
        "dimming" and "brightening" is set by *physics* (not the magnitude
        system), which is why the `magsarefluxes` kwarg must be correctly set.

        If `sigclip` is None, no sigma-clipping will be performed, and the
        time-series (with non-finite elems removed) will be passed through to
        the output.

    magsarefluxes : bool
        If True, will treat the input values of `mags` as fluxes for purposes of
        plotting the fit and sig-clipping.

    plotfit : str or False
        If this is a string, this function will make a plot for the fit to the
        mag/flux time-series and writes the plot to the path specified here.

    ignoreinitfail : bool
        If this is True, ignores the initial failure to find a set of optimized
        Fourier parameters using the global optimization function and proceeds
        to do a least-squares fit anyway.

    verbose : bool
        If True, will indicate progress and warn of any problems.

    Returns
    -------

    dict
        This function returns a dict containing the model fit parameters, the
        minimized chi-sq value and the reduced chi-sq value. The form of this
        dict is mostly standardized across all functions in this module::

            {
                'fittype':'legendre',
                'fitinfo':{
                    'legendredeg': the Legendre polynomial degree used,
                    'fitmags': the model fit mags,
                    'fitepoch': the epoch of minimum light for the fit,
                },
                'fitchisq': the minimized value of the fit's chi-sq,
                'fitredchisq':the reduced chi-sq value,
                'fitplotfile': the output fit plot if fitplot is not None,
                'magseries':{
                    'times':input times in phase order of the model,
                    'phase':the phases of the model mags,
                    'mags':input mags/fluxes in the phase order of the model,
                    'errs':errs in the phase order of the model,
                    'magsarefluxes':input value of magsarefluxes kwarg
                }
            }


    '''
    stimes, smags, serrs = sigclip_magseries(times, mags, errs,
                                             sigclip=sigclip,
                                             magsarefluxes=magsarefluxes)

    # get rid of zero errs
    nzind = npnonzero(serrs)
    stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]

    phase, pmags, perrs, ptimes, mintime = (
        get_phased_quantities(stimes, smags, serrs, period)
    )

    if verbose:
        LOGINFO('fitting Legendre series with '
                'maximum Legendre polynomial order %s to '
                'mag series with %s observations, '
                'using period %.6f, folded at %.6f' % (legendredeg,
                                                       len(pmags),
                                                       period,
                                                       mintime))

    # Least squares fit of Legendre polynomial series to the data. The window
    # and domain (see "Using the Convenience Classes" in the numpy
    # documentation) are handled automatically, scaling the times to a minimal
    # domain in [-1,1], in which Legendre polynomials are a complete basis.

    p = Legendre.fit(phase, pmags, legendredeg)
    coeffs = p.coef
    fitmags = p(phase)

    # Now compute the chisq and red-chisq.

    fitchisq = npsum(
        ((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
    )

    nparams = legendredeg + 1
    fitredchisq = fitchisq/(len(pmags) - nparams - 1)

    if verbose:
        LOGINFO(
            'Legendre fit done. chisq = %.5f, reduced chisq = %.5f' %
            (fitchisq, fitredchisq)
        )

    # figure out the time of light curve minimum (i.e. the fit epoch)
    # this is when the fit mag is maximum (i.e. the faintest)
    # or if magsarefluxes = True, then this is when fit flux is minimum
    if not magsarefluxes:
        fitmagminind = npwhere(fitmags == npmax(fitmags))
    else:
        fitmagminind = npwhere(fitmags == npmin(fitmags))
    if len(fitmagminind[0]) > 1:
        fitmagminind = (fitmagminind[0][0],)
    magseriesepoch = ptimes[fitmagminind]

    # assemble the returndict
    returndict = {
        'fittype':'legendre',
        'fitinfo':{
            'legendredeg':legendredeg,
            'fitmags':fitmags,
            'fitepoch':magseriesepoch,
            'finalparams':coeffs,
        },
        'fitchisq':fitchisq,
        'fitredchisq':fitredchisq,
        'fitplotfile':None,
        'magseries':{
            'times':ptimes,
            'phase':phase,
            'mags':pmags,
            'errs':perrs,
            'magsarefluxes':magsarefluxes
        }
    }

    # make the fit plot if required
    if plotfit and isinstance(plotfit, str):

        make_fit_plot(phase, pmags, perrs, fitmags,
                      period, mintime, magseriesepoch,
                      plotfit,
                      magsarefluxes=magsarefluxes)

        returndict['fitplotfile'] = plotfit

    return returndict
# Elisabeth and Hannah work things out

# from GJ 1132/working/ 
# wavecal[0] is an N-degree Legenre polynomial; wavecal[1] is the range over which it works
wavecal = np.load('aperture_832_1173_wavelengthcalibration.npy')[()]

# from GJ 1132/working/
stamp = np.load('calibStamps_aperture_832_1173.npy')[()

# recreate the conversion from pixel position to wavelength
from numpy.polynomial.legendre import Legendre 
L = Legendre(wavecal[0], domain=wavecal[1])

# make an interpolation to go from wavelength to pixel position
# stamp['w'] is the pixel range centered on the star, e.g., array([-1173, -1172, -1171, ...,   872,   873,   874])

def getPixelPositon (wavelength):                                   
    return np.interp(wavelength, L(stamp['w'][:,0]), stamp['w'][:,0])   




# some wavelengths from LDSS3C manual calibration; WE NEED TO MAKE THESE FOR IMACES GRISMS!
from astropy.io import ascii
waves = ascii.read('/h/mulan0/code/mosasaurus/data/IMACS/gri-150-10.8/gri-150-10.8_wavelength_identifications.txt')
Exemple #25
0
def ODFLegendreOptimiser(data_q, reference_run):
	n_threads     = np.shape(data_q)[0]
	n_l           = np.shape(data_q)[-1]
	
	d_theta       = np.pi / (n_steps_theta-1.)
	
	eta_grid      = np.linspace(eta_min, eta_max, n_steps_eta)
	theta_grid    = np.linspace(0., np.pi, n_steps_theta)
	
	coeffs        = np.sqrt(np.arange(n_l) + 0.5)
	
	S_res         = np.zeros(n_steps_eta)
	F_tot         = np.zeros([n_threads, n_steps_eta])
	
	for idx_thread,E_q_loc in enumerate(data_q):
		print("Processing thread %d out of %d" % tuple([idx_thread+1, n_threads]))
		sys.stdout.flush()
		
		converged = False
		
		for idx_eta, eta in enumerate(eta_grid):
			n_dens = eta / V0
			g_pl   = (1-0.75*eta) / (1-eta)**2
			
			psi    = np.exp(-theta_grid**2)
			psi_l  = np.zeros(n_l)

			# ODF self-consistent solver
			for idx_iter in range(n_steps_max):
				for l in range(n_l):
					if np.any(E_q_loc[l,:] != 0.):
						order    = np.zeros(l+1)
						order[l] = 1
						psi_l[l] = (Legendre(order)(np.cos(theta_grid)) * psi * np.sin(theta_grid)).sum()

				psi_l      *= coeffs * d_theta
				psi_dummy   = psi.copy()

				coeffs_excl = np.dot(E_q_loc+E_q_loc.T, psi_l) / 2.

				psi         = np.exp(-g_pl * n_dens/(2*np.pi)**2 * Legendre(coeffs * coeffs_excl)(np.cos(theta_grid)))
				psi        /= (2*np.pi)**2 * d_theta * (psi * np.sin(theta_grid)).sum()
									 
				if np.max(np.abs(psi - psi_dummy)) < tol_odf:
					converged = True
					break

			if not converged:
				print("\033[1;31mConvergence failed - eta=%f\033[0m" % eta)
				sys.exit()

			# Free energy
			b2    = (np.outer(psi_l, psi_l) * E_q_loc).sum() / 2.

			f_id  = n_dens * (2*np.pi)**2 * d_theta * (np.sin(theta_grid) * psi * np.log(psi)).sum()
			f_exc = b2/V0 * n_dens * eta*g_pl

			F_tot[idx_thread, idx_eta] = f_id + f_exc

			# Order parameter
			if reference_run:
				s_tmp          = (3*np.cos(theta_grid)**2 - 1) / 2.
				s_tmp         *= (2*np.pi)**2 * d_theta * np.sin(theta_grid) * psi

				S_res[idx_eta] = s_tmp.sum()

		if reference_run:
			data_res       = np.zeros([n_steps_eta, 2])
			data_res[:, 0] = eta_grid
			data_res[:, 1] = S_res

			path_order     = path_data + '/order_param.res'
			np.savetxt(path_order, data_res)

			print("\033[1;32mOrder parameter printed to '%s'\033[0m" % path_order)
			
	return F_tot
Exemple #26
0
def compute_legendre_dot_product_derivatives(basis_dimension):
    """
    Compute dot products of Legendre polynomials and their derivatives.

    Input:
        - basis_dimension: dict
            Give the number of basis functions for each state
    Outputs:
        - dot_product_12: ndarray
            Array containing the dot products of Legendre polynomials
            with their derivatives
        - dot_product_22: ndarray
            Array containing the dot products of Legendre polynomials
            derivatives
    """
    # Compute the dimension of the problem
    dimension = np.sum([basis_dimension[elt] for elt in basis_dimension])
    dot_product_12 = np.zeros([dimension, dimension])
    dot_product_22 = np.zeros([dimension, dimension])
    i, j = 0, 0
    # Loop over states
    for state1 in basis_dimension:
        for state2 in basis_dimension:
            for k in range(basis_dimension[state1]):
                c_k = np.zeros(basis_dimension[state1])
                c_k[k] += 1
                # Create Legendre class for k-th polynomial
                c_k = Legendre(c_k, domain=[0, 1])
                # Compute derivative
                c_k_deriv = c_k.deriv()
                for l in range(basis_dimension[state2]):
                    c_l = np.zeros(basis_dimension[state2])
                    c_l[l] += 1
                    # Create Legendre class for k-th polynomial
                    c_l = Legendre(c_l, domain=[0, 1])
                    # Compute derivative
                    c_l_deriv = c_l.deriv()
                    # Multiply polynomials
                    product_12 = legmul(list(c_k), list(c_l_deriv))
                    product_22 = legmul(list(c_k_deriv), list(c_l_deriv))
                    # Create classes
                    product_12 = Legendre(product_12, domain=[0, 1])
                    product_22 = Legendre(product_22, domain=[0, 1])
                    # Integrate
                    int_product_12 = product_12.integ()
                    int_product_22 = product_22.integ()
                    # Evaluate at the endpoints
                    _, traj_deriv_12 = int_product_12.linspace(n=2)
                    _, traj_deriv_22 = int_product_22.linspace(n=2)
                    # Deduce dot products
                    dot_product_12[i + k, j + l] += traj_deriv_12[1]
                    dot_product_12[i + k, j + l] -= traj_deriv_12[0]
                    dot_product_22[i + k, j + l] += traj_deriv_22[1]
                    dot_product_22[i + k, j + l] -= traj_deriv_22[0]
            j += basis_dimension[state2]
        j = 0
        i += basis_dimension[state1]

    return dot_product_12, dot_product_22
Exemple #27
0
def Lrd(y):
    """返回Py(x)"""
    coef = (0,) * y + (1,)
    return Legendre(coef, [-1, 1])
 def generate_gauss_nodes(self, num_points):
     return Legendre.basis(num_points).roots()
Exemple #29
0
def legendre_fit_magseries(times,
                           mags,
                           errs,
                           period,
                           legendredeg=10,
                           sigclip=30.0,
                           plotfit=False,
                           magsarefluxes=False,
                           verbose=True):
    '''
    Fit an arbitrary-order Legendre series, via least squares, to the
    magnitude/flux time series. This is a series of the form:

        p(x) = c_0*L_0(x) + c_1*L_1(x) + c_2*L_2(x) + ... + c_n*L_n(x)

    where L_i's are Legendre polynomials (also caleld "Legendre functions of
    the first kind") and c_i's are the coefficients being fit.

    Args:

    legendredeg (int): n in the above equation. (I.e., if you give n=5, you
    will get 6 coefficients). This number should be much less than the number
    of data points you are fitting.

    sigclip (float): number of standard deviations away from the mean of the
    magnitude time-series from which to "clip" data points.

    magsarefluxes (bool): sets the ylabel and ylimits of plots for either
    magnitudes (False) or flux units (i.e. normalized to 1, in which case
    magsarefluxes should be set to True).

    Returns:

    returndict:
    {
        'fittype':'legendre',
        'fitinfo':{
            'legendredeg':legendredeg,
            'fitmags':fitmags,
            'fitepoch':magseriesepoch
        },
        'fitchisq':fitchisq,
        'fitredchisq':fitredchisq,
        'fitplotfile':None,
        'magseries':{
            'times':ptimes,
            'phase':phase,
            'mags':pmags,
            'errs':perrs,
            'magsarefluxes':magsarefluxes},
    }

    where `fitmags` is the values of the fit function interpolated onto
    magseries' `phase`.

    This function is mainly just a wrapper to
    numpy.polynomial.legendre.Legendre.fit.

    '''
    stimes, smags, serrs = sigclip_magseries(times,
                                             mags,
                                             errs,
                                             sigclip=sigclip,
                                             magsarefluxes=magsarefluxes)

    # get rid of zero errs
    nzind = npnonzero(serrs)
    stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]

    phase, pmags, perrs, ptimes, mintime = (_get_phased_quantities(
        stimes, smags, serrs, period))

    if verbose:
        LOGINFO('fitting Legendre series with '
                'maximum Legendre polynomial order %s to '
                'mag series with %s observations, '
                'using period %.6f, folded at %.6f' %
                (legendredeg, len(pmags), period, mintime))

    # Least squares fit of Legendre polynomial series to the data. The window
    # and domain (see "Using the Convenience Classes" in the numpy
    # documentation) are handled automatically, scaling the times to a minimal
    # domain in [-1,1], in which Legendre polynomials are a complete basis.

    p = Legendre.fit(phase, pmags, legendredeg)
    coeffs = p.coef
    fitmags = p(phase)

    # Now compute the chisq and red-chisq.

    fitchisq = npsum(((fitmags - pmags) * (fitmags - pmags)) / (perrs * perrs))

    nparams = legendredeg + 1
    fitredchisq = fitchisq / (len(pmags) - nparams - 1)

    if verbose:
        LOGINFO('Legendre fit done. chisq = %.5f, reduced chisq = %.5f' %
                (fitchisq, fitredchisq))

    # figure out the time of light curve minimum (i.e. the fit epoch)
    # this is when the fit mag is maximum (i.e. the faintest)
    # or if magsarefluxes = True, then this is when fit flux is minimum
    if not magsarefluxes:
        fitmagminind = npwhere(fitmags == npmax(fitmags))
    else:
        fitmagminind = npwhere(fitmags == npmin(fitmags))
    magseriesepoch = ptimes[fitmagminind]

    # assemble the returndict
    returndict = {
        'fittype': 'legendre',
        'fitinfo': {
            'legendredeg': legendredeg,
            'fitmags': fitmags,
            'fitepoch': magseriesepoch,
            'finalparams': coeffs,
        },
        'fitchisq': fitchisq,
        'fitredchisq': fitredchisq,
        'fitplotfile': None,
        'magseries': {
            'times': ptimes,
            'phase': phase,
            'mags': pmags,
            'errs': perrs,
            'magsarefluxes': magsarefluxes
        }
    }

    # make the fit plot if required
    if plotfit and isinstance(plotfit, str):

        _make_fit_plot(phase,
                       pmags,
                       perrs,
                       fitmags,
                       period,
                       mintime,
                       magseriesepoch,
                       plotfit,
                       magsarefluxes=magsarefluxes)

        returndict['fitplotfile'] = plotfit

    return returndict
Exemple #30
0
def main():
    """
    Convert simulated DESI spectrograph PSF spots into Specter PSF format.

    Spots and their CCD (x,y) location are provided on a grid of slit positions
    and wavelengths.  Fiber number and CCD x position increase with slit position;
    CCD y position increases with wavelength.  These spots and locations must
    be interpolated to the actual fiber positions on the slit and
    to arbitrary wavelengths.

    This code writes a Specter SpotGridPSF format to encode this information.

    Stephen Bailey, LBL
    September 2013
    """

    import sys
    import os
    import numpy as N
    from scipy import ndimage  #- for center_of_mass and shift
    from numpy.polynomial.legendre import Legendre
    import fitsio
    import yaml

    #- Load options
    import argparse
    parser = argparse.ArgumentParser(prog=sys.argv[0])
    # parser.add_argument("-p", "--prefix", action='store',  help="input psf files prefix, including path")
    parser.add_argument("-o",
                        "--outpsf",
                        action='store',
                        help="output PSF file",
                        default='psf-blat.fits')
    # parser.add_argument("-t", "--throughput", action='store',  help="input throughput file to embed with PSF")
    parser.add_argument("-d",
                        "--debug",
                        action="store_true",
                        help="start ipython prompt when done")
    parser.add_argument("-c",
                        "--camera",
                        action='store',
                        help="camera: b, r, or z")
    parser.add_argument('spotfiles',
                        action='store',
                        help='Input spot files',
                        narg='+')

    opts = parser.parse_args()

    if len(opts.spotfiles) == 0:
        print("ERROR: no input spot files given", file=sys.stderr)
        return 1

    #- Read DESI parameters
    params = yaml.load(open(os.getenv('DESIMODEL') + '/data/desi.yaml'))

    #- Get dimensions from first spot file
    hdr = fitsio.read_header(spotfiles[0])
    SpotPixelSize = hdr['PIXSIZE']  #- PSF spot pixel size in mm

    #- Hardcode spectrograph and CCD dimensions
    # CcdPixelSize = 0.015   #- CCD pixel size in mm
    # FiberSpacing = 0.230   #- center-to-center spacing in mm
    # GroupSpacing = 0.556   #- center-to-center group gap in mm
    # FibersPerGroup = 25
    # GroupsPerCcd = 20
    # NumFibers = 500
    # NumPixX = 4096
    # NumPixY = 4096
    # nspec = FibersPerGroup * GroupsPerCcd

    #- CCD pixel size in mm
    CcdPixelSize = params['ccd'][opts.camera]['pixsize'] / 1000.0  #- um -> mm

    #- center-to-center fiber spacing in mm on slit
    FiberSpacing = params['spectro']['fiber_spacing']

    #- center-to-center fiber group gap in mm on slit
    GroupSpacing = params['spectro']['fiber_group_spacing']

    FibersPerGroup = params['spectro']['fibers_per_group']
    GroupsPerCcd = params['spectro']['groups_per_ccd']
    NumFibers = params['spectro']['nfibers']
    NumPixX = params['ccd'][opts.camera]['npix_x']
    NumPixY = params['ccd'][opts.camera]['npix_y']
    nspec = FibersPerGroup * GroupsPerCcd

    #- Determine grid of wavelengths and fiber positions for the spots
    #- Use set() to get unique values, then convert to sorted array
    #- spotgrid maps (fiberpos, wavelength) -> filename
    print("Determining wavelength and slit position grid")
    wavelength = set()
    spotpos = set()
    spotgrid = dict()
    for filename in spotfiles:
        hdr = fitsio.read_header(filename)
        w = hdr['WAVE'] * 10  #- Wavelength [nm -> AA]
        p = hdr['FIBER']  #- Fiber slit position [mm]
        p = -p  #- Swap slit axis orientation to match CCD x
        wavelength.add(w)  #- Wavelength nm -> AA
        spotpos.add(p)
        spotgrid[(p, w)] = filename

    #- Wavelengths and slit positions of spots in grid
    wavelength = N.array(sorted(wavelength))
    spotpos = N.array(sorted(spotpos))

    #- Load grid of spots, and the x,y CCD pixel location of those spots
    print("Reading spots")
    nx = hdr['NAXIS1']
    ny = hdr['NAXIS2']
    np = len(spotpos)
    nw = len(wavelength)
    spots = N.zeros((np, nw, ny, nx), dtype=N.float32)
    spotx = N.zeros((np, nw), dtype=N.float32)
    spoty = N.zeros((np, nw), dtype=N.float32)
    for i, p in enumerate(spotpos):
        for j, w in enumerate(wavelength):
            pix = fitsio.read(spotgrid[(p, w)])
            hdr = fitsio.read_header(spotgrid[(p, w)])

            #- Shift spot to center of image
            #- NOTE: uses spline interpolation, not sinc interpolation
            npy, npx = pix.shape
            yc, xc = ndimage.center_of_mass(pix)
            xmid = (pix.shape[1] - 1) / 2.0
            ymid = (pix.shape[0] - 1) / 2.0
            dx = xmid - xc
            dy = ymid - yc
            spots[i, j] = ndimage.shift(pix, (dy, dx))

            #- Reference pixel in FITS file
            xref = hdr['CRPIX1'] - 1
            yref = hdr['CRPIX2'] - 1

            #- Location of centroid on CCD in mm from center
            spotx[i, j] = hdr['CRVAL1'] + (xmid - xref + dx) * hdr['CDELT1']
            spoty[i, j] = hdr['CRVAL2'] + (ymid - yref + dy) * hdr['CDELT2']

    #- Convert spotx, spoty to pixel units instead of mm
    spotx = spotx / CcdPixelSize + NumPixX / 2
    spoty = spoty / CcdPixelSize + NumPixY / 2

    #- Map location of each fiber along the slit
    ifiber = N.arange(NumFibers).astype(int)
    ngaps = ifiber / FibersPerGroup  #- Number of gaps prior to fiber ifiber
    fiberpos = ifiber * FiberSpacing + ngaps * (GroupSpacing - FiberSpacing)
    fiberpos -= N.mean(fiberpos)

    #-----
    #- Determine range of wavelengths to fit
    #- Fit Legendre polynomials and extrapolate to CCD edges
    wmin = wavelength[0]
    wmax = wavelength[-1]
    for i in range(np):
        poly = Legendre.fit(spoty[i], wavelength, deg=5, domain=(0, NumPixY))
        wmin = min(wmin, poly(0))
        wmax = max(wmax, poly(NumPixY - 1))
        print(i, wmin, wmax, poly(0), poly(NumPixY - 1))

    #- Round down/up to nearest Angstrom
    wmin = int(wmin)
    wmax = int(wmax + 1)

    #- Min and max of spot/fiber positions on the slit head
    pmin = min(spotpos[0], fiberpos[0])
    pmax = max(spotpos[-1], fiberpos[-1])

    #-------------------------------------------------------------------------
    #- For slices in wavelength, fit y vs. slit position and sample at
    #- fiberpos spoty[np, nw]

    ydeg = 7
    y_vs_w = N.zeros((nspec, nw))
    for i in range(nw):
        poly = Legendre.fit(spotpos,
                            spoty[:, i],
                            deg=ydeg,
                            domain=(pmin, pmax))
        y_vs_w[:, i] = poly(fiberpos)

    #- For each fiber, fit y vs. wavelength and save coefficients
    #- Also calculate min/max wavelengths seen by every fiber

    wmin_all = 0
    wmax_all = 1e8
    ww = N.arange(wmin, wmax)

    ycoeff = N.zeros((nspec, ydeg + 1))
    for i in range(nspec):
        poly = Legendre.fit(wavelength,
                            y_vs_w[i],
                            deg=ydeg,
                            domain=(wmin, wmax))
        ycoeff[i] = poly.coef

        wmin_all = max(wmin_all, N.interp(0, poly(ww), ww))
        wmax_all = min(wmax_all, N.interp(NumPixY - 1, poly(ww), ww))

    #- Round up/down to integer wavelengths
    wmin_all = int(wmin_all)
    wmax_all = int(wmax_all + 1)

    #-------------------------------------------------------------------------
    #- for a slice in wavelength, fit x vs. slit position
    x_vs_p = N.zeros((nw, len(fiberpos)))
    for i in range(nw):
        poly = Legendre.fit(spotpos, spotx[:, i], deg=7, domain=(pmin, pmax))
        x_vs_p[i] = poly(fiberpos)
        assert N.max(N.abs(spotx[:, i] - poly(spotpos))) < 0.01

    xdeg = 7
    xcoeff = N.zeros((nspec, xdeg + 1))
    for i in range(nspec):
        poly = Legendre.fit(wavelength,
                            x_vs_p[:, i],
                            deg=xdeg,
                            domain=(wmin, wmax))
        xcoeff[i, :] = poly.coef
        assert N.max(N.abs(x_vs_p[:, i] - poly(wavelength))) < 0.01

    #-------------------------------------------------------------------------
    #- Write to fits file
    print("Writing", opts.outpsf)

    #- Use first spot file for representative header to pass keywords through
    hdr = fitsio.read_header(spotfiles[0])
    hdr.delete('WAVE')
    hdr.delete('FIBER')
    hdr.add_record({
        "name": "PSFTYPE",
        "value": "SPOTGRID",
        "comment": "Grid of simulated PSF spots"
    })
    hdr.add_record({
        "name": "NPIX_X",
        "value": NumPixX,
        "comment": "Number of CCD pixels in X direction"
    })
    hdr.add_record({
        "name": "NPIX_Y",
        "value": NumPixY,
        "comment": "Number of CCD pixels in Y direction"
    })
    hdr.add_record({
        "name": "NSPEC",
        "value": nspec,
        "comment": "Number of spectra"
    })
    hdr.add_record({
        "name": "NWAVE",
        "value": nw,
        "comment": "Number of wavelength samples"
    })
    hdr.add_record({
        "name": "CCDPIXSZ",
        "value": CcdPixelSize,
        "comment": "CCD pixel size [mm]"
    })
    hdr.add_record({
        "name": "DFIBER",
        "value": FiberSpacing,
        "comment": "Center-to-center pitch of fibers on slit [mm]"
    })
    hdr.add_record({
        "name": "DGROUP",
        "value": GroupSpacing,
        "comment": "Spacing between fiber groups on slit [mm]"
    })
    hdr.add_record({
        "name": "NGROUPS",
        "value": GroupsPerCcd,
        "comment": "Number of fiber groups per slit"
    })
    hdr.add_record({
        "name": "NFIBGRP",
        "value": FibersPerGroup,
        "comment": "Number of fibers per group"
    })
    hdr.add_record({
        "name": "WAVEMIN",
        "value": wmin,
        "comment": "Min wavelength for Legendre domain [-1,1]"
    })
    hdr.add_record({
        "name": "WAVEMAX",
        "value": wmax,
        "comment": "Max wavelength for Legendre domain [-1,1]"
    })
    hdr.add_record({
        "name": "WMIN_ALL",
        "value": wmin_all,
        "comment": "Min wavelength seen by all spectra [Ang]"
    })
    hdr.add_record({
        "name": "WMAX_ALL",
        "value": wmax_all,
        "comment": "Max wavelength seen by all spectra [Ang]"
    })

    fitsio.write(opts.outpsf,
                 xcoeff,
                 extname='XCOEFF',
                 header=hdr,
                 clobber=True)

    wavehdr = list()
    wavehdr.append(
        dict(name='WAVEMIN',
             value=wmin,
             comment='Min wavelength on the CCD [Ang]'))
    wavehdr.append(
        dict(name='WAVEMAX',
             value=wmax,
             comment='Max wavelength on the CCD [Ang]'))
    wavehdr.append(
        dict(name='WMIN_ALL',
             value=wmin_all,
             comment='Min wavelength seen by all spectra [Ang]'))
    wavehdr.append(
        dict(name='WMAX_ALL',
             value=wmax_all,
             comment='Max wavelength seen by all spectra [Ang]'))
    fitsio.write(opts.outpsf, ycoeff, extname='YCOEFF', header=wavehdr)

    # fitsio.write(opts.outpsf, Y, extname='Y')
    # fitsio.write(opts.outpsf, W, extname='WAVELENGTH')

    fitsio.write(opts.outpsf, spots, extname='SPOTS')
    fitsio.write(opts.outpsf, spotx, extname='SPOTX')
    fitsio.write(opts.outpsf, spoty, extname='SPOTY')
    fitsio.write(opts.outpsf, fiberpos, extname='FIBERPOS')
    fitsio.write(opts.outpsf, spotpos, extname='SPOTPOS')
    fitsio.write(opts.outpsf, wavelength, extname='SPOTWAVE')

    #- Add pre-computed throughput to PSF if requested
    #- Removing; this could just lead to inconsistencies
    # if opts.throughput:
    #     header = fitsio.read_header(opts.throughput, 'THROUGHPUT')
    #     data = fitsio.read(opts.throughput, 'THROUGHPUT')
    #     fitsio.write(opts.outpsf, data, header=header, extname='THROUGHPUT')

    #--- DEBUG ---
    if opts.debug:
        import pylab as P
        P.ion()
        import IPython
        IPython.embed()
    #--- DEBUG ---
    return 0
def wavelength_solution(directory,
                        objecto,
                        cenwave,
                        pix_scale,
                        max_wave=9999999,
                        min_wave=0,
                        width_guess=4,
                        upper_sigma=2.0,
                        lower_sigma=2.0,
                        order=3,
                        max_delta=10,
                        fit_range=9,
                        max_width=5,
                        arc_name='arc',
                        lamp_file='HeNeAr.dat'):
    '''
    Calculate the wavelength solution to a spectra. The function needs a very good initial guess of the
    slope and intercept of a linear solution to the wavelength correction. It will also check if the
    file already exists.

    Parameters
    -------------
    directory: Directory where all the science files and the lamp file are located
    objecto  : Name of the object to correct for
    cenwave  : Best guess of the central wavelength in Angstroms
    pix_scale: Best guess of the pixel scale in Angstroms / pixel
    max_wave : Maximum wavelength to fit in Angstroms
    min_wave : Minimum wavelength to fit in Angstroms
    width_guess: Guess of the width of the lines in Angstroms
    upper_sigma: Used for sigma clipping of solution fit
    lower_sigma: Used for sigma clipping of solution fit
    order : Order of the Legendre fit, can be 2, 3, 4
    max_delta: Maximum separation in Angstroms between guess and result of center wavelength
    fit_range: Fit plus/minus these many Angstroms around the central wavelength
    max_width: Maximum width in Angstroms of the resulting fit of a line

    Output
    -------------
    Function will append the name of the lamp to its respective science file.
    And output a database file with the parameters of the Legendre fit.
    '''

    # Find the files with the lamp and open the first one
    input_lamp = glob.glob(directory + '/%s*BiasFlatOut.fits' % arc_name)[0]
    Lamp = fits.open(input_lamp)

    # Extract the X data from the lamp
    counts_y = Lamp[0].data[0][0]
    pixel_x = np.arange(len(counts_y)) + 1

    # Apply the approximate wavelength correction
    x_shift = cenwave - len(pixel_x) * pix_scale / 2.0
    approx_wave = pixel_x * pix_scale + x_shift

    # Import the known wavelengths
    CuArNeLines_all = np.genfromtxt(lamp_file)

    # Select only the lines that are within the range of the spectrum
    CuArNeLines_most = CuArNeLines_all[
        np.where((min(approx_wave) < CuArNeLines_all)
                 & (max(approx_wave) > CuArNeLines_all))]

    # Crop the lines to only the range specified by max_wave and min_wave
    CuArNeLines = CuArNeLines_most[np.where((CuArNeLines_most < max_wave)
                                            & (CuArNeLines_most > min_wave))]

    # Empty variables for the parameters of the gaussian fits
    good_centers = []
    good_truths = []
    good_widths = []

    # For every line in the CuArNe file fit a gaussian around the center wavelength.
    for Line in CuArNeLines:

        # Slice the data to only fit a region of 9 angstroms to each side of the line
        Xdata = approx_wave[np.where((approx_wave > Line - fit_range)
                                     & (approx_wave < Line + fit_range))]
        Ydata = counts_y[np.where((approx_wave > Line - fit_range)
                                  & (approx_wave < Line + fit_range))]

        # Make an initial guess to the gaussian to fit
        h_guess = np.max(Ydata) - np.min(Ydata)  # Line Height
        c_guess = Line  # Line Center
        w_guess = width_guess  # Line Width
        o_guess = np.min(Ydata)
        FirstGuess = [h_guess, c_guess, w_guess]

        # Do a quick least squares fit to the data
        Optimized, success = optimize.leastsq(errfunc_gauss,
                                              FirstGuess[:],
                                              args=(Xdata, Ydata))

        # Rename the output parameters to something readable
        output_height = Optimized[0]
        output_center = Optimized[1]
        output_width = Optimized[2]
        delta_wavelength = np.abs(Line - output_center)

        # Accept the line only if:
        # It's less than max_width Angstroms wide
        # It's less than max_delta Angstroms from the initial guess
        # It's height is positive
        if (delta_wavelength < max_delta) and (output_width < max_width) and (
                output_height > 0):
            good_centers = np.append(good_centers, output_center)
            good_truths = np.append(good_truths, Line)
            good_widths = np.append(good_widths, output_width)

        # Plot the individual line gaussian function
        Xdata_out = np.linspace(min(Xdata), max(Xdata), 100)
        plt.plot(Xdata_out,
                 gaussian(Xdata_out, *Optimized),
                 linewidth=2,
                 color='g')
    print(str(len(good_centers)) + " Lines Found")

    # Plot the gaussian fits on top of the approximate solution for diagnosis
    plt.plot(approx_wave, counts_y, linewidth=1, color='b')
    for i in range(len(good_truths)):
        plt.axvline(x=good_truths[i], color='k', linestyle='--', label='True')
    for i in range(len(good_centers)):
        plt.axvline(x=good_centers[i],
                    color='g',
                    linestyle='-',
                    label='Center')
    plt.show()

    # Fit a 2nd order polynomial to the centers (center of lines in spectra determined from fits) and the
    # good_truths (lines in the input HeNeAr file that were deemed acceptable)
    Parameters = np.polyfit(good_centers, good_truths,
                            2)  # Fit the Line centers to true centers
    Function = np.poly1d(Parameters)  # Function with best fit parameters
    Model = Function(good_centers)  # Resulting function with true centers
    Output = good_truths - Model  # Data - Model

    # Do Sigma Clipping: Mark as Bad the points that are higher than some sigma.
    hi_simga = np.average(Output) + upper_sigma * np.std(Output)
    lo_sigma = np.average(Output) - lower_sigma * np.std(Output)
    bad_lines = np.where((Output >= hi_simga) | (Output <= lo_sigma))
    good_lines = np.where((Output < hi_simga) & (Output > lo_sigma))

    # Remove the clipped points from the list of good lines
    best_centers = good_centers[good_lines]

    # Find minimum and maximum of the centers in the original pixel array
    back_to_pixels = (best_centers -
                      (cenwave - len(pixel_x) * pix_scale / 2.0)) / pix_scale
    #back_to_pixels = (best_centers - x_shift) / pix_scale
    Xmin = min(back_to_pixels)
    Xmax = max(back_to_pixels)

    # Normalize the good centers so that they go from -1 to 1
    Center_min = min(best_centers)
    Center_max = max(best_centers)
    model_x = normalize(best_centers, Center_min, Center_max)

    # Do the fit again with an Nth order Legendre polynomial
    Parameters_Good = np.polynomial.legendre.legfit(
        model_x, good_truths[good_lines],
        order)  # Fit the line centers to the true centers
    Model_Good = Legendre(Parameters_Good)(
        model_x)  # Resulting function with true centers
    Output_Good = Model_Good - good_truths[good_lines]  # Model - Data

    # Plot results to make sure everything looks ok
    plt.subplots_adjust(hspace=0.0)
    plt.subplot(211)
    plt.xlim(min(good_centers), max(good_centers))
    plt.scatter(good_centers, good_truths - good_centers, color='b')
    plt.scatter(good_centers[bad_lines],
                good_truths[bad_lines] - good_centers[bad_lines],
                color='r')
    plt.plot(best_centers, Model_Good - best_centers, color='g')

    plt.subplot(212)
    plt.axhline(color='g')
    plt.xlabel("Wavelength")
    plt.ylabel("Residuals")
    plt.xlim(min(good_centers), max(good_centers))
    plt.scatter(best_centers, good_truths[good_lines] - Model_Good, color='b')
    plt.show()

    # Calculate the error and resolution on the wavelength correction
    Error = np.std(Output_Good)
    true_widths = good_widths[good_lines] * 2 * np.sqrt(2 * np.log(2))
    Resolution = np.average(true_widths, weights=1 / Output_Good**2)
    Resolution_std = np.std(true_widths)
    print("Error = " + str(Error))
    print("Resolution = " + str(Resolution))

    # Plot Resolution of Lines
    #plt.errorbar(np.arange(len(true_widths)), true_widths, Output_Good, fmt = '.')
    #plt.axhline(y = Resolution, color = 'k')
    #plt.axhline(y = Resolution + Resolution_std, color = 'k', linestyle = '--')
    #plt.axhline(y = Resolution - Resolution_std, color = 'k', linestyle = '--')
    #plt.show()

    # Do the Final Correction on the entire spectra
    Lamp_Wave_Out = Legendre(Parameters_Good)(normalize(
        approx_wave, Center_min, Center_max))

    # Plot the output before and after the correction
    plt.plot(approx_wave, counts_y, color='k', alpha=0.2, linestyle='--')
    plt.plot(Lamp_Wave_Out, counts_y)
    for i in CuArNeLines_all:
        plt.axvline(x=i, color='r')
    plt.show()

    # Save the output to the file that will be used by IRAF
    if order == 2:
        Line0 = "# std = " + str(Error) + "\n"
        Line1 = "begin identify Lamp.ms - Ap 1" + "\n"
        Line2 = "coefficients 7" + "\n"
        Line3 = "2.0" + "\n"  # 2 = Legendre Polynomial
        Line4 = "3.0" + "\n"  # 3 = Order of the fit
        Line5 = str(Xmin) + "\n"
        Line6 = str(Xmax) + "\n"
        Line7 = str(Parameters_Good[0]) + "\n"
        Line8 = str(Parameters_Good[1]) + "\n"
        Line9 = str(Parameters_Good[2]) + "\n"
        Output = Line0 + Line1 + Line2 + Line3 + Line4 + Line5 + Line6 + Line7 + Line8 + Line9
    elif order == 3:
        Line0 = "# std = " + str(Error) + "\n"
        Line1 = "begin identify Lamp.ms - Ap 1" + "\n"
        Line2 = "coefficients 8" + "\n"
        Line3 = "2.0" + "\n"  # 2 = Legendre Polynomial
        Line4 = "4.0" + "\n"  # 4 = Order of the fit
        Line5 = str(Xmin) + "\n"
        Line6 = str(Xmax) + "\n"
        Line7 = str(Parameters_Good[0]) + "\n"
        Line8 = str(Parameters_Good[1]) + "\n"
        Line9 = str(Parameters_Good[2]) + "\n"
        Line10 = str(Parameters_Good[3]) + "\n"
        Output = Line0 + Line1 + Line2 + Line3 + Line4 + Line5 + Line6 + Line7 + Line8 + Line9 + Line10
    elif order == 4:
        Line0 = "# std = " + str(Error) + "\n"
        Line1 = "begin identify Lamp.ms - Ap 1" + "\n"
        Line2 = "coefficients 9" + "\n"
        Line3 = "2.0" + "\n"  # 2 = Legendre Polynomial
        Line4 = "5.0" + "\n"  # 5 = Order of the fit
        Line5 = str(Xmin) + "\n"
        Line6 = str(Xmax) + "\n"
        Line7 = str(Parameters_Good[0]) + "\n"
        Line8 = str(Parameters_Good[1]) + "\n"
        Line9 = str(Parameters_Good[2]) + "\n"
        Line10 = str(Parameters_Good[3]) + "\n"
        Line11 = str(Parameters_Good[4]) + "\n"
        Output = Line0 + Line1 + Line2 + Line3 + Line4 + Line5 + Line6 + Line7 + Line8 + Line9 + Line10 + Line11
    else:
        print("Order Must be 2, 3, or 4")

    # Find where the actual image name begins for saving
    filename = input_lamp[input_lamp.find('/%s' % arc_name) + 1:-5]

    # Save the output to the IRAF database
    outputname = 'database/id' + filename
    f = open(outputname, 'w')
    f.write(Output)
    f.close()
    print("Saved " + outputname)

    # Append the lamp wavelength link to the science target
    # Do that for each image
    Inputs = glob.glob('%s/%s*SkyOut.fits' % (directory, objecto))

    # Check that the files don't exist
    if check_existence('%s/%s*OutWave.fits' % (directory, objecto),
                       'wavelength_solution'):
        return

    for name in Inputs:
        print(name)
        iraf.hedit(images=str(name),
                   fields='REFSPEC1',
                   value=str(filename),
                   add='yes',
                   update='yes')

        # Do wavelength correction for each image
        iraf.noao.dispcor(input=name,
                          output=name[:-5] + "Wave.fits",
                          linearize='no',
                          database='database',
                          w1='INDEF',
                          w2='INDEF',
                          dw='INDEF',
                          nw='INDEF')
Exemple #32
0
def plot_phase(fpath,
               ax,
               ind,
               s=3,
               alpha=0.3,
               lctype='IRM2',
               periodogramtype=None,
               peakindex=0,
               plot_bin_phase=False,
               overwritecsv=1):

    outsavpath = os.path.join(
        OUTDIR,
        'quilt_s6_s7_' + os.path.basename(fpath).replace('.fits', '.csv'))

    if os.path.exists(outsavpath) and not overwritecsv:
        df = pd.read_csv(outsavpath)
        phase = nparr(df['phase'])
        phz_flux = nparr(df['phz_flux'])
        period = nparr(df['period'])[0]

    else:
        #
        # get data. fpath here is a fits LC file. apply the periodogram requested.
        #
        time = iu.get_data_keyword(fpath, 'TMID_BJD', ext=1)
        mag = iu.get_data_keyword(fpath, lctype, ext=1)

        f_x0 = 1e4
        m_x0 = 10
        flux = f_x0 * 10**(-0.4 * (mag - m_x0))
        flux /= np.nanmedian(flux)

        time, flux = moe.mask_orbit_start_and_end(time, flux)

        # fit out long term trend (light detrending) with median filter of 5 days.
        if 'IRM' in lctype:
            ngroups, groups = lcmath.find_lc_timegroups(time, mingap=0.5)
            assert ngroups == 2

            windowsize = 48 * 5 + 1  # 5 days
            tg_smooth_flux = []
            for group in groups:

                #
                # fit out arbitrary order legendre series
                # p(x) = c_0*L_0(x) + c_1*L_1(x) + c_2*L_2(x) + ... + c_n*L_n(x)
                #
                legendredeg = 2
                p = Legendre.fit(time[group], flux[group], legendredeg)
                coeffs = p.coef
                fit_flux = p(time[group])

                tg_smooth_flux.append(flux[group] / fit_flux)

            flux = np.concatenate(tg_smooth_flux)

        if periodogramtype == 'tls':
            period_min, period_max = 0.5, 5
            tlsp = periodbase.tls_parallel_pfind(
                time,
                flux,
                1e-3 * flux,
                magsarefluxes=True,
                tls_rstar_min=0.1,
                tls_rstar_max=10,
                tls_mstar_min=0.1,
                tls_mstar_max=5.0,
                tls_oversample=8,
                tls_mintransits=1,
                tls_transit_template='default',
                nbestpeaks=5,
                sigclip=None,
                nworkers=52)

            period = tlsp['nbestperiods'][peakindex]
            t0 = tlsp['tlsresult']['T0']
            if peakindex == 1:
                t0 += period / 2

        elif periodogramtype == 'gls':
            period_min, period_max = 0.1, 5
            ls = LombScargle(time, flux, flux * 1e-3)
            freq, power = ls.autopower(minimum_frequency=1 / period_max,
                                       maximum_frequency=1 / period_min,
                                       samples_per_peak=20)
            period = 1 / freq[np.argmax(power)]
            t0 = time[np.argmin(flux)]

        else:
            raise NotImplementedError(
                'got {}, not imlemented'.format(periodogramtype))

        #
        # phase data
        #
        phzd = phase_magseries(time, flux, period, t0, wrap=True, sort=True)

        phase = phzd['phase']
        phz_flux = phzd['mags']

    #
    # plot data
    #
    ax.scatter(phase,
               phz_flux,
               c='k',
               alpha=alpha,
               zorder=3,
               s=s,
               rasterized=True,
               linewidths=0)

    ax.text(0.88,
            0.03,
            '{:.2f}d'.format(period),
            transform=ax.transAxes,
            ha='right',
            va='bottom')

    ax.text(0.04,
            0.06,
            '{}'.format(ind),
            transform=ax.transAxes,
            ha='left',
            va='bottom')

    if overwritecsv:
        outdf = pd.DataFrame({
            'phase': phase,
            'phz_flux': phz_flux,
            'period': np.ones_like(phase) * period
        })
        outdf.to_csv(outsavpath, index=False)

    if plot_bin_phase:

        binphasedlc = phase_bin_magseries(phase,
                                          phz_flux,
                                          binsize=2e-2,
                                          minbinelems=3)
        binplotphase = binphasedlc['binnedphases']
        binplotmags = binphasedlc['binnedmags']

        ax.scatter(binplotphase,
                   binplotmags,
                   c='orange',
                   alpha=alpha,
                   zorder=4,
                   s=s,
                   rasterized=True,
                   linewidths=0)