コード例 #1
0
def keppca(infile,
           maskfile,
           outfile,
           components,
           plotpca,
           nreps,
           clobber,
           verbose,
           logfile,
           status,
           cmdLine=False):

    try:
        import mdp
    except:
        msg = 'ERROR -- KEPPCA: this task has an external python dependency to MDP, a Modular toolkit for Data Processing (http://mdp-toolkit.sourceforge.net). In order to take advantage of this PCA task, the user must first install MDP with their current python distribution. Note carefully that you may have more than python installation on your machine, and ensure that MDP is installed with the same version of python that the PyKE tools employ. Installation instructions for MDP can be found at the URL provided above.'
        status = kepmsg.err(None, msg, True)

# startup parameters

    status = 0
    labelsize = 32
    ticksize = 18
    xsize = 16
    ysize = 10
    lcolor = '#0000ff'
    lwidth = 1.0
    fcolor = '#ffff00'
    falpha = 0.2
    seterr(all="ignore")

    # log the call

    if status == 0:
        hashline = '----------------------------------------------------------------------------'
        kepmsg.log(logfile, hashline, verbose)
        call = 'KEPPCA -- '
        call += 'infile=' + infile + ' '
        call += 'maskfile=' + maskfile + ' '
        call += 'outfile=' + outfile + ' '
        call += 'components=' + components + ' '
        ppca = 'n'
        if (plotpca): ppca = 'y'
        call += 'plotpca=' + ppca + ' '
        call += 'nmaps=' + str(nreps) + ' '
        overwrite = 'n'
        if (clobber): overwrite = 'y'
        call += 'clobber=' + overwrite + ' '
        chatter = 'n'
        if (verbose): chatter = 'y'
        call += 'verbose=' + chatter + ' '
        call += 'logfile=' + logfile
        kepmsg.log(logfile, call + '\n', verbose)

# start time

    if status == 0:
        kepmsg.clock('KEPPCA started at', logfile, verbose)

# test log file

    if status == 0:
        logfile = kepmsg.test(logfile)

# clobber output file

    if status == 0:
        if clobber: status = kepio.clobber(outfile, logfile, verbose)
        if kepio.fileexists(outfile):
            message = 'ERROR -- KEPPCA: ' + outfile + ' exists. Use clobber=yes'
            status = kepmsg.err(logfile, message, verbose)

# Set output file names - text file with data and plot

    if status == 0:
        dataout = copy(outfile)
        repname = re.sub('.fits', '.png', outfile)

# open input file

    if status == 0:
        instr = pyfits.open(infile, mode='readonly', memmap=True)
        tstart, tstop, bjdref, cadence, status = kepio.timekeys(
            instr, infile, logfile, verbose, status)

# open TPF FITS file

    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \
            kepio.readTPF(infile,'TIME',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \
            kepio.readTPF(infile,'TIMECORR',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \
            kepio.readTPF(infile,'CADENCENO',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \
            kepio.readTPF(infile,'FLUX',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \
            kepio.readTPF(infile,'FLUX_ERR',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, flux_bkg, status = \
            kepio.readTPF(infile,'FLUX_BKG',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, flux_bkg_err, status = \
            kepio.readTPF(infile,'FLUX_BKG_ERR',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, qual, status = \
            kepio.readTPF(infile,'QUALITY',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, pcorr1, status = \
            kepio.readTPF(infile,'POS_CORR1',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, pcorr2, status = \
            kepio.readTPF(infile,'POS_CORR2',logfile,verbose)

# Save original data dimensions, in case of using maskfile

    if status == 0:
        xdimorig = xdim
        ydimorig = ydim

# read mask definition file if it has been supplied

    if status == 0 and 'aper' not in maskfile.lower(
    ) and maskfile.lower() != 'all':
        maskx = array([], 'int')
        masky = array([], 'int')
        lines, status = kepio.openascii(maskfile, 'r', logfile, verbose)
        for line in lines:
            line = line.strip().split('|')
            if len(line) == 6:
                y0 = int(line[3])
                x0 = int(line[4])
                line = line[5].split(';')
                for items in line:
                    try:
                        masky = numpy.append(masky,
                                             y0 + int(items.split(',')[0]))
                        maskx = numpy.append(maskx,
                                             x0 + int(items.split(',')[1]))
                    except:
                        continue
        status = kepio.closeascii(lines, logfile, verbose)
        if len(maskx) == 0 or len(masky) == 0:
            message = 'ERROR -- KEPPCA: ' + maskfile + ' contains no pixels.'
            status = kepmsg.err(logfile, message, verbose)
        xdim = max(maskx) - min(maskx) + 1  # Find largest x dimension of mask
        ydim = max(masky) - min(masky) + 1  # Find largest y dimension of mask

        # pad mask to ensure it is rectangular

        workx = array([], 'int')
        worky = array([], 'int')
        for ip in arange(min(maskx), max(maskx) + 1):
            for jp in arange(min(masky), max(masky) + 1):
                workx = append(workx, ip)
                worky = append(worky, jp)
        maskx = workx
        masky = worky

# define new subimage bitmap...

    if status == 0 and maskfile.lower() != 'all':
        aperx = numpy.array([], 'int')
        apery = numpy.array([], 'int')
        aperb = maskx - x0 + xdimorig * (
            masky - y0
        )  # aperb is an array that contains the pixel numbers in the mask
        npix = len(aperb)

# ...or use all pixels

    if status == 0 and maskfile.lower() == 'all':
        npix = xdimorig * ydimorig
        aperb = array([], 'int')
        aperb = numpy.r_[0:npix]

# legal mask defined?

    if status == 0:
        if len(aperb) == 0:
            message = 'ERROR -- KEPPCA: no legal pixels within the subimage are defined.'
            status = kepmsg.err(logfile, message, verbose)

# Identify principal components desired

    if status == 0:
        pcaout = []
        txt = components.strip().split(',')
        for work1 in txt:
            try:
                pcaout.append(int(work1.strip()))
            except:
                work2 = work1.strip().split('-')
                try:
                    for work3 in range(int(work2[0]), int(work2[1]) + 1):
                        pcaout.append(work3)
                except:
                    message = 'ERROR -- KEPPCA: cannot understand principal component list requested'
                    status = kepmsg.err(logfile, message, verbose)
    if status == 0:
        pcaout = set(sort(pcaout))
    pcarem = array(
        list(pcaout)) - 1  # The list of pca component numbers to be removed

    # Initialize arrays and variables, and apply pixel mask to the data

    if status == 0:
        ntim = 0
        time = numpy.array([], dtype='float64')
        timecorr = numpy.array([], dtype='float32')
        cadenceno = numpy.array([], dtype='int')
        pixseries = numpy.array([], dtype='float32')
        errseries = numpy.array([], dtype='float32')
        bkgseries = numpy.array([], dtype='float32')
        berseries = numpy.array([], dtype='float32')
        quality = numpy.array([], dtype='float32')
        pos_corr1 = numpy.array([], dtype='float32')
        pos_corr2 = numpy.array([], dtype='float32')
        nrows = numpy.size(fluxpixels, 0)

# Apply the pixel mask so we are left with only the desired pixels

    if status == 0:
        pixseriesb = fluxpixels[:, aperb]
        errseriesb = errpixels[:, aperb]
        bkgseriesb = flux_bkg[:, aperb]
        berseriesb = flux_bkg_err[:, aperb]

# Read in the data to various arrays

    if status == 0:
        for i in range(nrows):
            if qual[i] < 10000 and \
                    numpy.isfinite(barytime[i]) and \
                    numpy.isfinite(fluxpixels[i,int(ydim*xdim/2+0.5)]) and \
                    numpy.isfinite(fluxpixels[i,1+int(ydim*xdim/2+0.5)]):
                ntim += 1
                time = numpy.append(time, barytime[i])
                timecorr = numpy.append(timecorr, tcorr[i])
                cadenceno = numpy.append(cadenceno, cadno[i])
                pixseries = numpy.append(pixseries, pixseriesb[i])
                errseries = numpy.append(errseries, errseriesb[i])
                bkgseries = numpy.append(bkgseries, bkgseriesb[i])
                berseries = numpy.append(berseries, berseriesb[i])
                quality = numpy.append(quality, qual[i])
                pos_corr1 = numpy.append(pos_corr1, pcorr1[i])
                pos_corr2 = numpy.append(pos_corr2, pcorr2[i])
        pixseries = numpy.reshape(pixseries, (ntim, npix))
        errseries = numpy.reshape(errseries, (ntim, npix))
        bkgseries = numpy.reshape(bkgseries, (ntim, npix))
        berseries = numpy.reshape(berseries, (ntim, npix))
        tmp = numpy.median(pixseries, axis=1)
        for i in range(len(tmp)):
            pixseries[i] = pixseries[i] - tmp[i]

# Figure out which pixels are undefined/nan and remove them. Keep track for adding back in later

    if status == 0:
        nanpixels = numpy.array([], dtype='int')
        i = 0
        while (i < npix):
            if numpy.isnan(pixseries[0, i]):
                nanpixels = numpy.append(nanpixels, i)
                npix = npix - 1
            i = i + 1
        pixseries = numpy.delete(pixseries, nanpixels, 1)
        errseries = numpy.delete(errseries, nanpixels, 1)
        pixseries[numpy.isnan(pixseries)] = random.gauss(100, 10)
        errseries[numpy.isnan(errseries)] = 10

# Compute statistical weights, means, standard deviations

    if status == 0:
        weightseries = (pixseries / errseries)**2
        pixMean = numpy.average(pixseries, axis=0, weights=weightseries)
        pixStd = numpy.std(pixseries, axis=0)

# Normalize the input by subtracting the mean and divising by the standard deviation.
# This makes it a correlation-based PCA, which is what we want.

    if status == 0:
        pixseriesnorm = (pixseries - pixMean) / pixStd

# Number of principal components to compute. Setting it equal to the number of pixels

    if status == 0:
        nvecin = npix

# Run PCA using the MDP Whitening PCA, which produces normalized PCA components (zero mean and unit variance)

    if status == 0:
        pcan = mdp.nodes.WhiteningNode(svd=True)
        pcar = pcan.execute(pixseriesnorm)
        eigvec = pcan.get_recmatrix()
        model = pcar

# Re-insert nan columns as zeros

    if status == 0:
        for i in range(0, len(nanpixels)):
            nanpixels[i] = nanpixels[i] - i
        eigvec = numpy.insert(eigvec, nanpixels, 0, 1)
        pixMean = numpy.insert(pixMean, nanpixels, 0, 0)

#  Make output eigenvectors (correlation images) into xpix by ypix images

    if status == 0:
        eigvec = eigvec.reshape(nvecin, ydim, xdim)

# Calculate sum of all pixels to display as raw lightcurve and other quantities

    if status == 0:
        pixseriessum = sum(pixseries, axis=1)
        nrem = len(pcarem)  # Number of components to remove
        nplot = npix  # Number of pcas to plot - currently set to plot all components, but could set
        # nplot = nrem to just plot as many components as is being removed

# Subtract components by fitting them to the summed light curve

    if status == 0:
        x0 = numpy.tile(-1.0, 1)
        for k in range(0, nrem):

            def f(x):
                fluxcor = pixseriessum
                for k in range(0, len(x)):
                    fluxcor = fluxcor - x[k] * model[:, pcarem[k]]
                return mad(fluxcor)

            if k == 0:
                x0 = array([-1.0])
            else:
                x0 = numpy.append(x0, 1.0)
            myfit = scipy.optimize.fmin(f,
                                        x0,
                                        maxiter=50000,
                                        maxfun=50000,
                                        disp=False)
            x0 = myfit

# Now that coefficients for all components have been found, subtract them to produce a calibrated time-series,
# and then divide by the robust mean to produce a normalized time series as well

    if status == 0:
        c = myfit
        fluxcor = pixseriessum
        for k in range(0, nrem):
            fluxcor = fluxcor - c[k] * model[:, pcarem[k]]
            normfluxcor = fluxcor / mean(reject_outliers(fluxcor, 2))

# input file data

    if status == 0:
        cards0 = instr[0].header.cards
        cards1 = instr[1].header.cards
        cards2 = instr[2].header.cards
        table = instr[1].data[:]
        maskmap = copy(instr[2].data)

# subimage physical WCS data

    if status == 0:
        crpix1p = cards2['CRPIX1P'].value
        crpix2p = cards2['CRPIX2P'].value
        crval1p = cards2['CRVAL1P'].value
        crval2p = cards2['CRVAL2P'].value
        cdelt1p = cards2['CDELT1P'].value
        cdelt2p = cards2['CDELT2P'].value

# dummy columns for output file

    if status == 0:
        sap_flux_err = numpy.empty(len(time))
        sap_flux_err[:] = numpy.nan
        sap_bkg = numpy.empty(len(time))
        sap_bkg[:] = numpy.nan
        sap_bkg_err = numpy.empty(len(time))
        sap_bkg_err[:] = numpy.nan
        pdc_flux = numpy.empty(len(time))
        pdc_flux[:] = numpy.nan
        pdc_flux_err = numpy.empty(len(time))
        pdc_flux_err[:] = numpy.nan
        psf_centr1 = numpy.empty(len(time))
        psf_centr1[:] = numpy.nan
        psf_centr1_err = numpy.empty(len(time))
        psf_centr1_err[:] = numpy.nan
        psf_centr2 = numpy.empty(len(time))
        psf_centr2[:] = numpy.nan
        psf_centr2_err = numpy.empty(len(time))
        psf_centr2_err[:] = numpy.nan
        mom_centr1 = numpy.empty(len(time))
        mom_centr1[:] = numpy.nan
        mom_centr1_err = numpy.empty(len(time))
        mom_centr1_err[:] = numpy.nan
        mom_centr2 = numpy.empty(len(time))
        mom_centr2[:] = numpy.nan
        mom_centr2_err = numpy.empty(len(time))
        mom_centr2_err[:] = numpy.nan

# mask bitmap

    if status == 0 and 'aper' not in maskfile.lower(
    ) and maskfile.lower() != 'all':
        for i in range(maskmap.shape[0]):
            for j in range(maskmap.shape[1]):
                aperx = append(aperx, crval1p + (j + 1 - crpix1p) * cdelt1p)
                apery = append(apery, crval2p + (i + 1 - crpix2p) * cdelt2p)
                if maskmap[i, j] == 0:
                    pass
                else:
                    maskmap[i, j] = 1
                    for k in range(len(maskx)):
                        if aperx[-1] == maskx[k] and apery[-1] == masky[k]:
                            maskmap[i, j] = 3

# construct output primary extension

    if status == 0:
        hdu0 = pyfits.PrimaryHDU()
        for i in range(len(cards0)):
            if cards0[i].keyword not in list(hdu0.header.keys()):
                hdu0.header[cards0[i].keyword] = (cards0[i].value,
                                                  cards0[i].comment)
            else:
                hdu0.header.cards[
                    cards0[i].keyword].comment = cards0[i].comment
        status = kepkey.history(call, hdu0, outfile, logfile, verbose)
        outstr = HDUList(hdu0)

# construct output light curve extension

    if status == 0:
        col1 = Column(name='TIME',
                      format='D',
                      unit='BJD - 2454833',
                      array=time)
        col2 = Column(name='TIMECORR', format='E', unit='d', array=timecorr)
        col3 = Column(name='CADENCENO', format='J', array=cadenceno)
        col4 = Column(name='SAP_FLUX',
                      format='E',
                      unit='e-/s',
                      array=pixseriessum)
        col5 = Column(name='SAP_FLUX_ERR',
                      format='E',
                      unit='e-/s',
                      array=sap_flux_err)
        col6 = Column(name='SAP_BKG', format='E', unit='e-/s', array=sap_bkg)
        col7 = Column(name='SAP_BKG_ERR',
                      format='E',
                      unit='e-/s',
                      array=sap_bkg_err)
        col8 = Column(name='PDCSAP_FLUX',
                      format='E',
                      unit='e-/s',
                      array=pdc_flux)
        col9 = Column(name='PDCSAP_FLUX_ERR',
                      format='E',
                      unit='e-/s',
                      array=pdc_flux_err)
        col10 = Column(name='SAP_QUALITY', format='J', array=quality)
        col11 = Column(name='PSF_CENTR1',
                       format='E',
                       unit='pixel',
                       array=psf_centr1)
        col12 = Column(name='PSF_CENTR1_ERR',
                       format='E',
                       unit='pixel',
                       array=psf_centr1_err)
        col13 = Column(name='PSF_CENTR2',
                       format='E',
                       unit='pixel',
                       array=psf_centr2)
        col14 = Column(name='PSF_CENTR2_ERR',
                       format='E',
                       unit='pixel',
                       array=psf_centr2_err)
        col15 = Column(name='MOM_CENTR1',
                       format='E',
                       unit='pixel',
                       array=mom_centr1)
        col16 = Column(name='MOM_CENTR1_ERR',
                       format='E',
                       unit='pixel',
                       array=mom_centr1_err)
        col17 = Column(name='MOM_CENTR2',
                       format='E',
                       unit='pixel',
                       array=mom_centr2)
        col18 = Column(name='MOM_CENTR2_ERR',
                       format='E',
                       unit='pixel',
                       array=mom_centr2_err)
        col19 = Column(name='POS_CORR1',
                       format='E',
                       unit='pixel',
                       array=pos_corr1)
        col20 = Column(name='POS_CORR2',
                       format='E',
                       unit='pixel',
                       array=pos_corr2)
        col21 = Column(name='PCA_FLUX', format='E', unit='e-/s', array=fluxcor)
        col22 = Column(name='PCA_FLUX_NRM', format='E', array=normfluxcor)
        cols = ColDefs([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11, \
                            col12,col13,col14,col15,col16,col17,col18,col19,col20,col21,col22])
        hdu1 = new_table(cols)
        hdu1.header['TTYPE1'] = ('TIME', 'column title: data time stamps')
        hdu1.header['TFORM1'] = ('D', 'data type: float64')
        hdu1.header['TUNIT1'] = ('BJD - 2454833',
                                 'column units: barycenter corrected JD')
        hdu1.header['TDISP1'] = ('D12.7', 'column display format')
        hdu1.header['TTYPE2'] = (
            'TIMECORR', 'column title: barycentric-timeslice correction')
        hdu1.header['TFORM2'] = ('E', 'data type: float32')
        hdu1.header['TUNIT2'] = ('d', 'column units: days')
        hdu1.header['TTYPE3'] = ('CADENCENO',
                                 'column title: unique cadence number')
        hdu1.header['TFORM3'] = ('J', 'column format: signed integer32')
        hdu1.header['TTYPE4'] = ('SAP_FLUX',
                                 'column title: aperture photometry flux')
        hdu1.header['TFORM4'] = ('E', 'column format: float32')
        hdu1.header['TUNIT4'] = ('e-/s', 'column units: electrons per second')
        hdu1.header['TTYPE5'] = ('SAP_FLUX_ERR',
                                 'column title: aperture phot. flux error')
        hdu1.header['TFORM5'] = ('E', 'column format: float32')
        hdu1.header['TUNIT5'] = (
            'e-/s', 'column units: electrons per second (1-sigma)')
        hdu1.header['TTYPE6'] = (
            'SAP_BKG', 'column title: aperture phot. background flux')
        hdu1.header['TFORM6'] = ('E', 'column format: float32')
        hdu1.header['TUNIT6'] = ('e-/s', 'column units: electrons per second')
        hdu1.header['TTYPE7'] = (
            'SAP_BKG_ERR', 'column title: ap. phot. background flux error')
        hdu1.header['TFORM7'] = ('E', 'column format: float32')
        hdu1.header['TUNIT7'] = (
            'e-/s', 'column units: electrons per second (1-sigma)')
        hdu1.header['TTYPE8'] = ('PDCSAP_FLUX',
                                 'column title: PDC photometry flux')
        hdu1.header['TFORM8'] = ('E', 'column format: float32')
        hdu1.header['TUNIT8'] = ('e-/s', 'column units: electrons per second')
        hdu1.header['TTYPE9'] = ('PDCSAP_FLUX_ERR',
                                 'column title: PDC flux error')
        hdu1.header['TFORM9'] = ('E', 'column format: float32')
        hdu1.header['TUNIT9'] = (
            'e-/s', 'column units: electrons per second (1-sigma)')
        hdu1.header['TTYPE10'] = (
            'SAP_QUALITY', 'column title: aperture photometry quality flag')
        hdu1.header['TFORM10'] = ('J', 'column format: signed integer32')
        hdu1.header['TTYPE11'] = ('PSF_CENTR1',
                                  'column title: PSF fitted column centroid')
        hdu1.header['TFORM11'] = ('E', 'column format: float32')
        hdu1.header['TUNIT11'] = ('pixel', 'column units: pixel')
        hdu1.header['TTYPE12'] = ('PSF_CENTR1_ERR',
                                  'column title: PSF fitted column error')
        hdu1.header['TFORM12'] = ('E', 'column format: float32')
        hdu1.header['TUNIT12'] = ('pixel', 'column units: pixel')
        hdu1.header['TTYPE13'] = ('PSF_CENTR2',
                                  'column title: PSF fitted row centroid')
        hdu1.header['TFORM13'] = ('E', 'column format: float32')
        hdu1.header['TUNIT13'] = ('pixel', 'column units: pixel')
        hdu1.header['TTYPE14'] = ('PSF_CENTR2_ERR',
                                  'column title: PSF fitted row error')
        hdu1.header['TFORM14'] = ('E', 'column format: float32')
        hdu1.header['TUNIT14'] = ('pixel', 'column units: pixel')
        hdu1.header['TTYPE15'] = (
            'MOM_CENTR1', 'column title: moment-derived column centroid')
        hdu1.header['TFORM15'] = ('E', 'column format: float32')
        hdu1.header['TUNIT15'] = ('pixel', 'column units: pixel')
        hdu1.header['TTYPE16'] = ('MOM_CENTR1_ERR',
                                  'column title: moment-derived column error')
        hdu1.header['TFORM16'] = ('E', 'column format: float32')
        hdu1.header['TUNIT16'] = ('pixel', 'column units: pixel')
        hdu1.header['TTYPE17'] = ('MOM_CENTR2',
                                  'column title: moment-derived row centroid')
        hdu1.header['TFORM17'] = ('E', 'column format: float32')
        hdu1.header['TUNIT17'] = ('pixel', 'column units: pixel')
        hdu1.header['TTYPE18'] = ('MOM_CENTR2_ERR',
                                  'column title: moment-derived row error')
        hdu1.header['TFORM18'] = ('E', 'column format: float32')
        hdu1.header['TUNIT18'] = ('pixel', 'column units: pixel')
        hdu1.header['TTYPE19'] = (
            'POS_CORR1', 'column title: col correction for vel. abbern')
        hdu1.header['TFORM19'] = ('E', 'column format: float32')
        hdu1.header['TUNIT19'] = ('pixel', 'column units: pixel')
        hdu1.header['TTYPE20'] = (
            'POS_CORR2', 'column title: row correction for vel. abbern')
        hdu1.header['TFORM20'] = ('E', 'column format: float32')
        hdu1.header['TUNIT20'] = ('pixel', 'column units: pixel')
        hdu1.header['TTYPE21'] = ('PCA_FLUX',
                                  'column title: PCA-corrected flux')
        hdu1.header['TFORM21'] = ('E', 'column format: float32')
        hdu1.header['TUNIT21'] = ('pixel', 'column units: e-/s')
        hdu1.header['TTYPE22'] = (
            'PCA_FLUX_NRM', 'column title: normalized PCA-corrected flux')
        hdu1.header['TFORM22'] = ('E', 'column format: float32')
        hdu1.header['EXTNAME'] = ('LIGHTCURVE', 'name of extension')
        for i in range(len(cards1)):
            if (cards1[i].keyword not in list(hdu1.header.keys())
                    and cards1[i].keyword[:4] not in [
                        'TTYP', 'TFOR', 'TUNI', 'TDIS', 'TDIM', 'WCAX', '1CTY',
                        '2CTY', '1CRP', '2CRP', '1CRV', '2CRV', '1CUN', '2CUN',
                        '1CDE', '2CDE', '1CTY', '2CTY', '1CDL', '2CDL', '11PC',
                        '12PC', '21PC', '22PC'
                    ]):
                hdu1.header[cards1[i].keyword] = (cards1[i].value,
                                                  cards1[i].comment)
        outstr.append(hdu1)

# construct output mask bitmap extension

    if status == 0:
        hdu2 = ImageHDU(maskmap)
        for i in range(len(cards2)):
            if cards2[i].keyword not in list(hdu2.header.keys()):
                hdu2.header[cards2[i].keyword] = (cards2[i].value,
                                                  cards2[i].comment)
            else:
                hdu2.header.cards[
                    cards2[i].keyword].comment = cards2[i].comment
        outstr.append(hdu2)

# construct principal component table

    if status == 0:
        cols = [
            Column(name='TIME', format='E', unit='BJD - 2454833', array=time)
        ]
        for i in range(len(pcar[0, :])):
            colname = 'PC' + str(i + 1)
            col = Column(name=colname, format='E', array=pcar[:, i])
            cols.append(col)
        hdu3 = new_table(ColDefs(cols))
        hdu3.header['EXTNAME'] = ('PRINCIPAL_COMPONENTS', 'name of extension')
        hdu3.header['TTYPE1'] = ('TIME', 'column title: data time stamps')
        hdu3.header['TFORM1'] = ('D', 'data type: float64')
        hdu3.header['TUNIT1'] = ('BJD - 2454833',
                                 'column units: barycenter corrected JD')
        hdu3.header['TDISP1'] = ('D12.7', 'column display format')
        for i in range(len(pcar[0, :])):
            hdu3.header['TTYPE' + str(i + 2)] = \
                ('PC' + str(i + 1), 'column title: principal component number' + str(i + 1))
            hdu3.header['TFORM' + str(i + 2)] = ('E', 'column format: float32')
        outstr.append(hdu3)

# write output file

    if status == 0:
        outstr.writeto(outfile)

# close input structure

    if status == 0:
        status = kepio.closefits(instr, logfile, verbose)

# Create PCA report

    if status == 0 and plotpca:
        npp = 7  # Number of plots per page
        l = 1
        repcnt = 1
        for k in range(nreps):

            # First plot of every pagewith flux image, flux and calibrated time series

            status = kepplot.define(16, 12, logfile, verbose)
            if (k % (npp - 1) == 0):
                pylab.figure(figsize=[10, 16])
                subplot2grid((npp, 6), (0, 0), colspan=2)
                #                imshow(log10(pixMean.reshape(xdim,ydim).T-min(pixMean)+1),interpolation="nearest",cmap='RdYlBu')
                imshow(log10(
                    flipud(pixMean.reshape(ydim, xdim)) - min(pixMean) + 1),
                       interpolation="nearest",
                       cmap='RdYlBu')
                xticks([])
                yticks([])
                ax1 = subplot2grid((npp, 6), (0, 2), colspan=4)
                px = copy(time) + bjdref
                py = copy(pixseriessum)
                px, xlab, status = kepplot.cleanx(px, logfile, verbose)
                py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose)
                kepplot.RangeOfPlot(px, py, 0.01, False)
                kepplot.plot1d(px, py, cadence, lcolor, lwidth, fcolor, falpha,
                               True)
                py = copy(fluxcor)
                py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose)
                plot(px,
                     py,
                     marker='.',
                     color='r',
                     linestyle='',
                     markersize=1.0)
                kepplot.labels('', re.sub('\)', '',
                                          re.sub('Flux \(', '', ylab)), 'k',
                               18)
                grid()
                setp(ax1.get_xticklabels(), visible=False)

# plot principal components

            subplot2grid((npp, 6), (l, 0), colspan=2)
            imshow(eigvec[k], interpolation="nearest", cmap='RdYlBu')
            xlim(-0.5, xdim - 0.5)
            ylim(-0.5, ydim - 0.5)
            xticks([])
            yticks([])

            # The last plot on the page that should have the xlabel

            if (k % (npp - 1) == npp - 2 or k == nvecin - 1):
                subplot2grid((npp, 6), (l, 2), colspan=4)
                py = copy(model[:, k])
                kepplot.RangeOfPlot(px, py, 0.01, False)
                kepplot.plot1d(px, py, cadence, 'r', lwidth, 'g', falpha, True)
                kepplot.labels(xlab, 'PC ' + str(k + 1), 'k', 18)
                pylab.grid()
                pylab.tight_layout()
                l = 1
                pylab.savefig(re.sub('.png', '_%d.png' % repcnt, repname))
                if not cmdLine: kepplot.render(cmdLine)
                repcnt += 1

# The other plots on the page that should have no xlabel

            else:
                ax2 = subplot2grid((npp, 6), (l, 2), colspan=4)
                py = copy(model[:, k])
                kepplot.RangeOfPlot(px, py, 0.01, False)
                kepplot.plot1d(px, py, cadence, 'r', lwidth, 'g', falpha, True)
                kepplot.labels('', 'PC ' + str(k + 1), 'k', 18)
                grid()
                setp(ax2.get_xticklabels(), visible=False)
                pylab.tight_layout()
                l = l + 1
        pylab.savefig(re.sub('.png', '_%d.png' % repcnt, repname))
        if not cmdLine: kepplot.render(cmdLine)

# plot style and size

    if status == 0 and plotpca:
        status = kepplot.define(labelsize, ticksize, logfile, verbose)
        pylab.figure(figsize=[xsize, ysize])
        pylab.clf()

# plot aperture photometry and PCA corrected data

    if status == 0 and plotpca:
        ax = kepplot.location([0.06, 0.54, 0.93, 0.43])
        px = copy(time) + bjdref
        py = copy(pixseriessum)
        px, xlab, status = kepplot.cleanx(px, logfile, verbose)
        py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose)
        kepplot.RangeOfPlot(px, py, 0.01, False)
        kepplot.plot1d(px, py, cadence, lcolor, lwidth, fcolor, falpha, True)
        py = copy(fluxcor)
        py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose)
        kepplot.plot1d(px, py, cadence, 'r', 2, fcolor, 0.0, True)
        pylab.setp(pylab.gca(), xticklabels=[])
        kepplot.labels('', ylab, 'k', 24)
        pylab.grid()

# plot aperture photometry and PCA corrected data

    if status == 0 and plotpca:
        ax = kepplot.location([0.06, 0.09, 0.93, 0.43])
        yr = array([], 'float32')
        npc = min([6, nrem])
        for i in range(npc - 1, -1, -1):
            py = pcar[:, i] * c[i]
            py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose)
            cl = float(i) / (float(npc))
            kepplot.plot1d(px, py, cadence, [1.0 - cl, 0.0, cl], 2, fcolor,
                           0.0, True)
            yr = append(yr, py)
        y1 = max(yr)
        y2 = -min(yr)
        kepplot.RangeOfPlot(px, array([-y1, y1, -y2, y2]), 0.01, False)
        kepplot.labels(xlab, 'Principal Components', 'k', 24)
        pylab.grid()

# save plot to file

    if status == 0 and plotpca:
        pylab.savefig(repname)

# render plot

    if status == 0 and plotpca:
        kepplot.render(cmdLine)

# stop time

    if status == 0:
        kepmsg.clock('KEPPCA ended at', logfile, verbose)

    return
コード例 #2
0
ファイル: keppca.py プロジェクト: KeplerGO/PyKE
def keppca(infile,maskfile,outfile,components,plotpca,nreps,clobber,verbose,logfile,status,cmdLine=False): 

    try:
        import mdp
    except:
        msg = 'ERROR -- KEPPCA: this task has an external python dependency to MDP, a Modular toolkit for Data Processing (http://mdp-toolkit.sourceforge.net). In order to take advantage of this PCA task, the user must first install MDP with their current python distribution. Note carefully that you may have more than python installation on your machine, and ensure that MDP is installed with the same version of python that the PyKE tools employ. Installation instructions for MDP can be found at the URL provided above.'
        status = kepmsg.err(None,msg,True)
    
# startup parameters

    status = 0
    labelsize = 32
    ticksize = 18
    xsize = 16
    ysize = 10
    lcolor = '#0000ff'
    lwidth = 1.0
    fcolor = '#ffff00'
    falpha = 0.2
    seterr(all="ignore") 

# log the call 

    if status == 0:
        hashline = '----------------------------------------------------------------------------'
        kepmsg.log(logfile,hashline,verbose)
        call = 'KEPPCA -- '
        call += 'infile='+infile+' '
        call += 'maskfile='+maskfile+' '
        call += 'outfile='+outfile+' '
        call += 'components='+components+' '
        ppca = 'n'
        if (plotpca): ppca = 'y'
        call += 'plotpca='+ppca+ ' '
        call += 'nmaps='+str(nreps)+' '
        overwrite = 'n'
        if (clobber): overwrite = 'y'
        call += 'clobber='+overwrite+ ' '
        chatter = 'n'
        if (verbose): chatter = 'y'
        call += 'verbose='+chatter+' '
        call += 'logfile='+logfile
        kepmsg.log(logfile,call+'\n',verbose)
        
# start time

    if status == 0:
        kepmsg.clock('KEPPCA started at',logfile,verbose)

# test log file

    if status == 0:
        logfile = kepmsg.test(logfile)
    
# clobber output file

    if status == 0:
        if clobber: status = kepio.clobber(outfile,logfile,verbose)
        if kepio.fileexists(outfile): 
            message = 'ERROR -- KEPPCA: ' + outfile + ' exists. Use clobber=yes'
            status = kepmsg.err(logfile,message,verbose)

# Set output file names - text file with data and plot

    if status == 0:
        dataout = copy(outfile)
        repname = re.sub('.fits','.png',outfile)

# open input file

    if status == 0:    
        instr = pyfits.open(infile,mode='readonly',memmap=True)
        tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status)

# open TPF FITS file

    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \
            kepio.readTPF(infile,'TIME',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \
            kepio.readTPF(infile,'TIMECORR',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \
            kepio.readTPF(infile,'CADENCENO',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \
            kepio.readTPF(infile,'FLUX',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \
            kepio.readTPF(infile,'FLUX_ERR',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, flux_bkg, status = \
            kepio.readTPF(infile,'FLUX_BKG',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, flux_bkg_err, status = \
            kepio.readTPF(infile,'FLUX_BKG_ERR',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, qual, status = \
            kepio.readTPF(infile,'QUALITY',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, pcorr1, status = \
            kepio.readTPF(infile,'POS_CORR1',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, pcorr2, status = \
            kepio.readTPF(infile,'POS_CORR2',logfile,verbose)

# Save original data dimensions, in case of using maskfile

    if status == 0:
        xdimorig = xdim
        ydimorig = ydim
    
# read mask definition file if it has been supplied

    if status == 0 and 'aper' not in maskfile.lower() and maskfile.lower() != 'all':
        maskx = array([],'int')
        masky = array([],'int')
        lines, status = kepio.openascii(maskfile,'r',logfile,verbose)
        for line in lines:
            line = line.strip().split('|')
            if len(line) == 6:
                y0 = int(line[3])
                x0 = int(line[4])
                line = line[5].split(';')
                for items in line:
                    try:
                        masky = numpy.append(masky,y0 + int(items.split(',')[0]))
                        maskx = numpy.append(maskx,x0 + int(items.split(',')[1]))
                    except:
                        continue
        status = kepio.closeascii(lines,logfile,verbose)
        if len(maskx) == 0 or len(masky) == 0:
            message = 'ERROR -- KEPPCA: ' + maskfile + ' contains no pixels.'
            status = kepmsg.err(logfile,message,verbose)
        xdim = max(maskx) - min(maskx) + 1   # Find largest x dimension of mask
        ydim = max(masky) - min(masky) + 1   # Find largest y dimension of mask

# pad mask to ensure it is rectangular

        workx = array([],'int')
        worky = array([],'int')
        for ip in arange(min(maskx),max(maskx) + 1):
            for jp in arange(min(masky),max(masky) + 1):
                workx = append(workx,ip)
                worky = append(worky,jp)
        maskx = workx
        masky = worky

# define new subimage bitmap...

    if status == 0 and maskfile.lower() != 'all':
        aperx = numpy.array([],'int')
        apery = numpy.array([],'int')
        aperb = maskx - x0 + xdimorig * (masky - y0)   # aperb is an array that contains the pixel numbers in the mask
        npix = len(aperb)

# ...or use all pixels

    if status == 0 and maskfile.lower() == 'all':
        npix = xdimorig*ydimorig
        aperb = array([],'int')
        aperb = numpy.r_[0:npix]

# legal mask defined?

    if status == 0:
        if len(aperb) == 0:
            message = 'ERROR -- KEPPCA: no legal pixels within the subimage are defined.'
            status = kepmsg.err(logfile,message,verbose)

# Identify principal components desired

    if status == 0:
        pcaout = []
        txt = components.strip().split(',')
        for work1 in txt:
            try:
                pcaout.append(int(work1.strip()))
            except:
                work2 = work1.strip().split('-')
                try:
                    for work3 in range(int(work2[0]),int(work2[1]) + 1):
                        pcaout.append(work3)
                except:
                    message = 'ERROR -- KEPPCA: cannot understand principal component list requested'
                    status = kepmsg.err(logfile,message,verbose)
    if status == 0:
        pcaout = set(sort(pcaout))
    pcarem = array(list(pcaout))-1    # The list of pca component numbers to be removed

# Initialize arrays and variables, and apply pixel mask to the data

    if status == 0:
        ntim = 0
        time = numpy.array([],dtype='float64')
        timecorr = numpy.array([],dtype='float32')
        cadenceno = numpy.array([],dtype='int')
        pixseries = numpy.array([],dtype='float32')
        errseries = numpy.array([],dtype='float32')
        bkgseries = numpy.array([],dtype='float32')
        berseries = numpy.array([],dtype='float32')
        quality = numpy.array([],dtype='float32')
        pos_corr1 = numpy.array([],dtype='float32')
        pos_corr2 = numpy.array([],dtype='float32')
        nrows = numpy.size(fluxpixels,0)
        
# Apply the pixel mask so we are left with only the desired pixels       

    if status == 0:
        pixseriesb = fluxpixels[:,aperb]
        errseriesb = errpixels[:,aperb]
        bkgseriesb = flux_bkg[:,aperb]
        berseriesb = flux_bkg_err[:,aperb]

# Read in the data to various arrays 
   
    if status == 0:
        for i in range(nrows):
            if qual[i] < 10000 and \
                    numpy.isfinite(barytime[i]) and \
                    numpy.isfinite(fluxpixels[i,int(ydim*xdim/2+0.5)]) and \
                    numpy.isfinite(fluxpixels[i,1+int(ydim*xdim/2+0.5)]):
                ntim += 1
                time = numpy.append(time,barytime[i])
                timecorr = numpy.append(timecorr,tcorr[i])
                cadenceno = numpy.append(cadenceno,cadno[i])
                pixseries = numpy.append(pixseries,pixseriesb[i])
                errseries = numpy.append(errseries,errseriesb[i])
                bkgseries = numpy.append(bkgseries,bkgseriesb[i])
                berseries = numpy.append(berseries,berseriesb[i])
                quality = numpy.append(quality,qual[i])
                pos_corr1 = numpy.append(pos_corr1,pcorr1[i])
                pos_corr2 = numpy.append(pos_corr2,pcorr2[i])
        pixseries = numpy.reshape(pixseries,(ntim,npix))
        errseries = numpy.reshape(errseries,(ntim,npix))
        bkgseries = numpy.reshape(bkgseries,(ntim,npix))
        berseries = numpy.reshape(berseries,(ntim,npix))        
        tmp =  numpy.median(pixseries,axis=1)     
        for i in range(len(tmp)):
             pixseries[i] = pixseries[i] - tmp[i]

# Figure out which pixels are undefined/nan and remove them. Keep track for adding back in later

    if status == 0:
        nanpixels = numpy.array([],dtype='int')
        i = 0
        while (i < npix):
            if numpy.isnan(pixseries[0,i]):
                nanpixels = numpy.append(nanpixels,i)
                npix = npix - 1
            i = i + 1
        pixseries = numpy.delete(pixseries,nanpixels,1)
        errseries = numpy.delete(errseries,nanpixels,1)
        pixseries[numpy.isnan(pixseries)] = random.gauss(100,10)
        errseries[numpy.isnan(errseries)] = 10
 
# Compute statistical weights, means, standard deviations

    if status == 0:
        weightseries = (pixseries/errseries)**2
        pixMean = numpy.average(pixseries,axis=0,weights=weightseries)
        pixStd  = numpy.std(pixseries,axis=0)

# Normalize the input by subtracting the mean and divising by the standard deviation. 
# This makes it a correlation-based PCA, which is what we want.

    if status == 0:
        pixseriesnorm = (pixseries - pixMean)/pixStd

# Number of principal components to compute. Setting it equal to the number of pixels

    if status == 0:
        nvecin = npix  

# Run PCA using the MDP Whitening PCA, which produces normalized PCA components (zero mean and unit variance)
    
    if status == 0:
        pcan = mdp.nodes.WhiteningNode(svd=True)
        pcar = pcan.execute(pixseriesnorm)
        eigvec = pcan.get_recmatrix()
        model = pcar
 
# Re-insert nan columns as zeros

    if status == 0:
        for i in range(0,len(nanpixels)):
            nanpixels[i] = nanpixels[i]-i
        eigvec = numpy.insert(eigvec,nanpixels,0,1)
        pixMean = numpy.insert(pixMean,nanpixels,0,0)

#  Make output eigenvectors (correlation images) into xpix by ypix images

    if status == 0:
        eigvec = eigvec.reshape(nvecin,ydim,xdim)

# Calculate sum of all pixels to display as raw lightcurve and other quantities

    if status == 0:
        pixseriessum = sum(pixseries,axis=1)
        nrem=len(pcarem)  # Number of components to remove
        nplot = npix      # Number of pcas to plot - currently set to plot all components, but could set 
                          # nplot = nrem to just plot as many components as is being removed

# Subtract components by fitting them to the summed light curve

    if status == 0:
        x0 = numpy.tile(-1.0,1)
        for k in range(0,nrem):
            def f(x):
                fluxcor = pixseriessum
                for k in range(0,len(x)):
                    fluxcor = fluxcor - x[k]*model[:,pcarem[k]]
                return mad(fluxcor)
            if k==0:
                x0 = array([-1.0])
            else:
                x0 = numpy.append(x0,1.0)
            myfit = scipy.optimize.fmin(f,x0,maxiter=50000,maxfun=50000,disp=False)
            x0 = myfit
    
# Now that coefficients for all components have been found, subtract them to produce a calibrated time-series, 
# and then divide by the robust mean to produce a normalized time series as well

    if status == 0:
        c = myfit
        fluxcor = pixseriessum
        for k in range(0,nrem):
            fluxcor = fluxcor - c[k]*model[:,pcarem[k]]
            normfluxcor = fluxcor/mean(reject_outliers(fluxcor,2))

# input file data

    if status == 0:
        cards0 = instr[0].header.cards
        cards1 = instr[1].header.cards
        cards2 = instr[2].header.cards
        table = instr[1].data[:]
        maskmap = copy(instr[2].data)

# subimage physical WCS data

    if status == 0:
        crpix1p = cards2['CRPIX1P'].value
        crpix2p = cards2['CRPIX2P'].value
        crval1p = cards2['CRVAL1P'].value
        crval2p = cards2['CRVAL2P'].value
        cdelt1p = cards2['CDELT1P'].value
        cdelt2p = cards2['CDELT2P'].value

# dummy columns for output file

    if status == 0:
        sap_flux_err = numpy.empty(len(time)); sap_flux_err[:] = numpy.nan
        sap_bkg = numpy.empty(len(time)); sap_bkg[:] = numpy.nan
        sap_bkg_err = numpy.empty(len(time)); sap_bkg_err[:] = numpy.nan
        pdc_flux = numpy.empty(len(time)); pdc_flux[:] = numpy.nan
        pdc_flux_err = numpy.empty(len(time)); pdc_flux_err[:] = numpy.nan
        psf_centr1 = numpy.empty(len(time)); psf_centr1[:] = numpy.nan
        psf_centr1_err = numpy.empty(len(time)); psf_centr1_err[:] = numpy.nan
        psf_centr2 = numpy.empty(len(time)); psf_centr2[:] = numpy.nan
        psf_centr2_err = numpy.empty(len(time)); psf_centr2_err[:] = numpy.nan
        mom_centr1 = numpy.empty(len(time)); mom_centr1[:] = numpy.nan
        mom_centr1_err = numpy.empty(len(time)); mom_centr1_err[:] = numpy.nan
        mom_centr2 = numpy.empty(len(time)); mom_centr2[:] = numpy.nan
        mom_centr2_err = numpy.empty(len(time)); mom_centr2_err[:] = numpy.nan

# mask bitmap

    if status == 0 and 'aper' not in maskfile.lower() and maskfile.lower() != 'all':
        for i in range(maskmap.shape[0]):
            for j in range(maskmap.shape[1]):
                aperx = append(aperx,crval1p + (j + 1 - crpix1p) * cdelt1p)
                apery = append(apery,crval2p + (i + 1 - crpix2p) * cdelt2p)
                if maskmap[i,j] == 0:
                    pass
                else:
                    maskmap[i,j] = 1
                    for k in range(len(maskx)):
                        if aperx[-1] == maskx[k] and apery[-1] == masky[k]:
                            maskmap[i,j] = 3

# construct output primary extension

    if status == 0:
        hdu0 = pyfits.PrimaryHDU()
        for i in range(len(cards0)):
            if cards0[i].keyword not in hdu0.header.keys():
                hdu0.header[cards0[i].keyword] = (cards0[i].value, cards0[i].comment)
            else:
                hdu0.header.cards[cards0[i].keyword].comment = cards0[i].comment
        status = kepkey.history(call,hdu0,outfile,logfile,verbose)
        outstr = HDUList(hdu0)

# construct output light curve extension

    if status == 0:
        col1 = Column(name='TIME',format='D',unit='BJD - 2454833',array=time)
        col2 = Column(name='TIMECORR',format='E',unit='d',array=timecorr)
        col3 = Column(name='CADENCENO',format='J',array=cadenceno)
        col4 = Column(name='SAP_FLUX',format='E',unit='e-/s',array=pixseriessum)
        col5 = Column(name='SAP_FLUX_ERR',format='E',unit='e-/s',array=sap_flux_err)
        col6 = Column(name='SAP_BKG',format='E',unit='e-/s',array=sap_bkg)
        col7 = Column(name='SAP_BKG_ERR',format='E',unit='e-/s',array=sap_bkg_err)
        col8 = Column(name='PDCSAP_FLUX',format='E',unit='e-/s',array=pdc_flux)
        col9 = Column(name='PDCSAP_FLUX_ERR',format='E',unit='e-/s',array=pdc_flux_err)
        col10 = Column(name='SAP_QUALITY',format='J',array=quality)
        col11 = Column(name='PSF_CENTR1',format='E',unit='pixel',array=psf_centr1)
        col12 = Column(name='PSF_CENTR1_ERR',format='E',unit='pixel',array=psf_centr1_err)
        col13 = Column(name='PSF_CENTR2',format='E',unit='pixel',array=psf_centr2)
        col14 = Column(name='PSF_CENTR2_ERR',format='E',unit='pixel',array=psf_centr2_err)
        col15 = Column(name='MOM_CENTR1',format='E',unit='pixel',array=mom_centr1)
        col16 = Column(name='MOM_CENTR1_ERR',format='E',unit='pixel',array=mom_centr1_err)
        col17 = Column(name='MOM_CENTR2',format='E',unit='pixel',array=mom_centr2)
        col18 = Column(name='MOM_CENTR2_ERR',format='E',unit='pixel',array=mom_centr2_err)
        col19 = Column(name='POS_CORR1',format='E',unit='pixel',array=pos_corr1)
        col20 = Column(name='POS_CORR2',format='E',unit='pixel',array=pos_corr2)
        col21 = Column(name='PCA_FLUX',format='E',unit='e-/s',array=fluxcor)
        col22 = Column(name='PCA_FLUX_NRM',format='E',array=normfluxcor)
        cols = ColDefs([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11, \
                            col12,col13,col14,col15,col16,col17,col18,col19,col20,col21,col22])
        hdu1 = new_table(cols)
        hdu1.header['TTYPE1'] = ('TIME','column title: data time stamps')
        hdu1.header['TFORM1'] = ('D','data type: float64')
        hdu1.header['TUNIT1'] = ('BJD - 2454833','column units: barycenter corrected JD')
        hdu1.header['TDISP1'] = ('D12.7','column display format')
        hdu1.header['TTYPE2'] = ('TIMECORR','column title: barycentric-timeslice correction')
        hdu1.header['TFORM2'] = ('E','data type: float32')
        hdu1.header['TUNIT2'] = ('d','column units: days')
        hdu1.header['TTYPE3'] = ('CADENCENO','column title: unique cadence number')
        hdu1.header['TFORM3'] = ('J','column format: signed integer32')
        hdu1.header['TTYPE4'] = ('SAP_FLUX','column title: aperture photometry flux')
        hdu1.header['TFORM4'] = ('E','column format: float32')
        hdu1.header['TUNIT4'] = ('e-/s','column units: electrons per second')
        hdu1.header['TTYPE5'] = ('SAP_FLUX_ERR','column title: aperture phot. flux error')
        hdu1.header['TFORM5'] = ('E','column format: float32')
        hdu1.header['TUNIT5'] = ('e-/s','column units: electrons per second (1-sigma)')
        hdu1.header['TTYPE6'] = ('SAP_BKG','column title: aperture phot. background flux')
        hdu1.header['TFORM6'] = ('E','column format: float32')
        hdu1.header['TUNIT6'] = ('e-/s','column units: electrons per second')
        hdu1.header['TTYPE7'] = ('SAP_BKG_ERR','column title: ap. phot. background flux error')
        hdu1.header['TFORM7'] = ('E','column format: float32')
        hdu1.header['TUNIT7'] = ('e-/s','column units: electrons per second (1-sigma)')
        hdu1.header['TTYPE8'] = ('PDCSAP_FLUX','column title: PDC photometry flux')
        hdu1.header['TFORM8'] = ('E','column format: float32')
        hdu1.header['TUNIT8'] = ('e-/s','column units: electrons per second')
        hdu1.header['TTYPE9'] = ('PDCSAP_FLUX_ERR','column title: PDC flux error')
        hdu1.header['TFORM9'] = ('E','column format: float32')
        hdu1.header['TUNIT9'] = ('e-/s','column units: electrons per second (1-sigma)')
        hdu1.header['TTYPE10'] = ('SAP_QUALITY','column title: aperture photometry quality flag')
        hdu1.header['TFORM10'] = ('J','column format: signed integer32')
        hdu1.header['TTYPE11'] = ('PSF_CENTR1','column title: PSF fitted column centroid')
        hdu1.header['TFORM11'] = ('E','column format: float32')
        hdu1.header['TUNIT11'] = ('pixel','column units: pixel')
        hdu1.header['TTYPE12'] = ('PSF_CENTR1_ERR','column title: PSF fitted column error')
        hdu1.header['TFORM12'] = ('E','column format: float32')
        hdu1.header['TUNIT12'] = ('pixel','column units: pixel')
        hdu1.header['TTYPE13'] = ('PSF_CENTR2','column title: PSF fitted row centroid')
        hdu1.header['TFORM13'] = ('E','column format: float32')
        hdu1.header['TUNIT13'] = ('pixel','column units: pixel')
        hdu1.header['TTYPE14'] = ('PSF_CENTR2_ERR','column title: PSF fitted row error')
        hdu1.header['TFORM14'] = ('E','column format: float32')
        hdu1.header['TUNIT14'] = ('pixel','column units: pixel')
        hdu1.header['TTYPE15'] = ('MOM_CENTR1','column title: moment-derived column centroid')
        hdu1.header['TFORM15'] = ('E','column format: float32')
        hdu1.header['TUNIT15'] = ('pixel','column units: pixel')
        hdu1.header['TTYPE16'] = ('MOM_CENTR1_ERR','column title: moment-derived column error')
        hdu1.header['TFORM16'] = ('E','column format: float32')
        hdu1.header['TUNIT16'] = ('pixel','column units: pixel')
        hdu1.header['TTYPE17'] = ('MOM_CENTR2','column title: moment-derived row centroid')
        hdu1.header['TFORM17'] = ('E','column format: float32')
        hdu1.header['TUNIT17'] = ('pixel','column units: pixel')
        hdu1.header['TTYPE18'] = ('MOM_CENTR2_ERR','column title: moment-derived row error')
        hdu1.header['TFORM18'] = ('E','column format: float32')
        hdu1.header['TUNIT18'] = ('pixel','column units: pixel')
        hdu1.header['TTYPE19'] = ('POS_CORR1','column title: col correction for vel. abbern')
        hdu1.header['TFORM19'] = ('E','column format: float32')
        hdu1.header['TUNIT19'] = ('pixel','column units: pixel')
        hdu1.header['TTYPE20'] = ('POS_CORR2','column title: row correction for vel. abbern')
        hdu1.header['TFORM20'] = ('E','column format: float32')
        hdu1.header['TUNIT20'] = ('pixel','column units: pixel')
        hdu1.header['TTYPE21'] = ('PCA_FLUX','column title: PCA-corrected flux')
        hdu1.header['TFORM21'] = ('E','column format: float32')
        hdu1.header['TUNIT21'] = ('pixel','column units: e-/s')
        hdu1.header['TTYPE22'] = ('PCA_FLUX_NRM','column title: normalized PCA-corrected flux')
        hdu1.header['TFORM22'] = ('E','column format: float32')
        hdu1.header['EXTNAME'] = ('LIGHTCURVE','name of extension')
        for i in range(len(cards1)):
            if (cards1[i].keyword not in hdu1.header.keys() and
                cards1[i].keyword[:4] not in ['TTYP','TFOR','TUNI','TDIS','TDIM','WCAX','1CTY',
                                          '2CTY','1CRP','2CRP','1CRV','2CRV','1CUN','2CUN',
                                          '1CDE','2CDE','1CTY','2CTY','1CDL','2CDL','11PC',
                                          '12PC','21PC','22PC']):
                hdu1.header[cards1[i].keyword] = (cards1[i].value, cards1[i].comment)
        outstr.append(hdu1)

# construct output mask bitmap extension

    if status == 0:
        hdu2 = ImageHDU(maskmap)
        for i in range(len(cards2)):
            if cards2[i].keyword not in hdu2.header.keys():
                hdu2.header[cards2[i].keyword] = (cards2[i].value, cards2[i].comment)
            else:
                hdu2.header.cards[cards2[i].keyword].comment = cards2[i].comment
        outstr.append(hdu2)

# construct principal component table

    if status == 0:
        cols = [Column(name='TIME',format='E',unit='BJD - 2454833',array=time)]
        for i in range(len(pcar[0,:])):
            colname = 'PC' + str(i + 1)
            col = Column(name=colname,format='E',array=pcar[:,i])
            cols.append(col)
        hdu3 = new_table(ColDefs(cols))
        hdu3.header['EXTNAME'] = ('PRINCIPAL_COMPONENTS','name of extension')
        hdu3.header['TTYPE1'] = ('TIME','column title: data time stamps')
        hdu3.header['TFORM1'] = ('D','data type: float64')
        hdu3.header['TUNIT1'] = ('BJD - 2454833','column units: barycenter corrected JD')
        hdu3.header['TDISP1'] = ('D12.7','column display format')
        for i in range(len(pcar[0,:])):
            hdu3.header['TTYPE' + str(i + 2)] = \
                ('PC' + str(i + 1), 'column title: principal component number' + str(i + 1))
            hdu3.header['TFORM' + str(i + 2)] = ('E','column format: float32')
        outstr.append(hdu3)

# write output file

    if status == 0:
        outstr.writeto(outfile)
    
# close input structure

    if status == 0:
        status = kepio.closefits(instr,logfile,verbose)
        
# Create PCA report 

    if status == 0 and plotpca:
        npp = 7 # Number of plots per page
        l = 1
        repcnt = 1
        for k in range(nreps):

# First plot of every pagewith flux image, flux and calibrated time series 

            status = kepplot.define(16,12,logfile,verbose)
            if (k % (npp - 1) == 0):     
                pylab.figure(figsize=[10,16])
                subplot2grid((npp,6),(0,0), colspan=2)
#                imshow(log10(pixMean.reshape(xdim,ydim).T-min(pixMean)+1),interpolation="nearest",cmap='RdYlBu')
                imshow(log10(flipud(pixMean.reshape(ydim,xdim))-min(pixMean)+1),interpolation="nearest",cmap='RdYlBu')
                xticks([])
                yticks([])
                ax1 = subplot2grid((npp,6),(0,2), colspan=4)
                px = copy(time) + bjdref
                py = copy(pixseriessum)
                px, xlab, status = kepplot.cleanx(px,logfile,verbose) 
                py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose)
                kepplot.RangeOfPlot(px,py,0.01,False)
                kepplot.plot1d(px,py,cadence,lcolor,lwidth,fcolor,falpha,True)
                py = copy(fluxcor)
                py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose)
                plot(px,py,marker='.',color='r',linestyle='',markersize=1.0)
                kepplot.labels('',re.sub('\)','',re.sub('Flux \(','',ylab)),'k',18)
                grid()
                setp(ax1.get_xticklabels(), visible=False)

# plot principal components

            subplot2grid((npp,6),(l,0), colspan=2)
            imshow(eigvec[k],interpolation="nearest",cmap='RdYlBu')
            xlim(-0.5,xdim-0.5)
            ylim(-0.5,ydim-0.5)
            xticks([])
            yticks([])

# The last plot on the page that should have the xlabel

            if ( k% (npp - 1) == npp - 2 or k == nvecin - 1):  
                subplot2grid((npp,6),(l,2), colspan=4)
                py = copy(model[:,k])
                kepplot.RangeOfPlot(px,py,0.01,False)
                kepplot.plot1d(px,py,cadence,'r',lwidth,'g',falpha,True)
                kepplot.labels(xlab,'PC ' + str(k+1),'k',18)
                pylab.grid()
                pylab.tight_layout()
                l = 1
                pylab.savefig(re.sub('.png','_%d.png' % repcnt,repname))
                if not cmdLine: kepplot.render(cmdLine)
                repcnt += 1

# The other plots on the page that should have no xlabel

            else:
                ax2 = subplot2grid((npp,6),(l,2), colspan=4)
                py = copy(model[:,k])
                kepplot.RangeOfPlot(px,py,0.01,False)
                kepplot.plot1d(px,py,cadence,'r',lwidth,'g',falpha,True)
                kepplot.labels('','PC ' + str(k+1),'k',18)
                grid()
                setp(ax2.get_xticklabels(), visible=False)
                pylab.tight_layout()
                l=l+1
        pylab.savefig(re.sub('.png','_%d.png' % repcnt,repname))
        if not cmdLine: kepplot.render(cmdLine)

# plot style and size

    if status == 0 and plotpca:
        status = kepplot.define(labelsize,ticksize,logfile,verbose)
        pylab.figure(figsize=[xsize,ysize])
        pylab.clf()

# plot aperture photometry and PCA corrected data

    if status == 0 and plotpca:
        ax = kepplot.location([0.06,0.54,0.93,0.43])
        px = copy(time) + bjdref
        py = copy(pixseriessum)
        px, xlab, status = kepplot.cleanx(px,logfile,verbose) 
        py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose)
        kepplot.RangeOfPlot(px,py,0.01,False)
        kepplot.plot1d(px,py,cadence,lcolor,lwidth,fcolor,falpha,True)
        py = copy(fluxcor)
        py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose)
        kepplot.plot1d(px,py,cadence,'r',2,fcolor,0.0,True)
        pylab.setp(pylab.gca(),xticklabels=[])
        kepplot.labels('',ylab,'k',24)
        pylab.grid()

# plot aperture photometry and PCA corrected data

    if status == 0 and plotpca:
        ax = kepplot.location([0.06,0.09,0.93,0.43])
        yr = array([],'float32')
        npc = min([6,nrem])
        for i in range(npc-1,-1,-1):
            py = pcar[:,i] * c[i]
            py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose)
            cl = float(i) / (float(npc))
            kepplot.plot1d(px,py,cadence,[1.0-cl,0.0,cl],2,fcolor,0.0,True)
            yr = append(yr,py)
        y1 = max(yr)
        y2 = -min(yr)
        kepplot.RangeOfPlot(px,array([-y1,y1,-y2,y2]),0.01,False)
        kepplot.labels(xlab,'Principal Components','k',24)
        pylab.grid()

# save plot to file

    if status == 0 and plotpca:
        pylab.savefig(repname)

# render plot

    if status == 0 and plotpca:
        kepplot.render(cmdLine)

# stop time

    if status == 0:
        kepmsg.clock('KEPPCA ended at',logfile,verbose)

    return
コード例 #3
0
ファイル: kepsff.py プロジェクト: kublaj/PyKE
def kepsff(infile,outfile,datacol,cenmethod,stepsize,npoly_cxcy,sigma_cxcy,npoly_ardx,
           npoly_dsdt,sigma_dsdt,npoly_arfl,sigma_arfl,plotres,clobber,verbose,logfile,
           status,cmdLine=False): 

# startup parameters

    status = 0
    labelsize = 16
    ticksize = 14
    xsize = 20
    ysize = 8
    lcolor = '#0000ff'
    lwidth = 1.0
    fcolor = '#ffff00'
    falpha = 0.2
    seterr(all="ignore") 

# log the call

    hashline = '----------------------------------------------------------------------------'
    kepmsg.log(logfile,hashline,verbose)
    call = 'KEPSFF -- '
    call += 'infile='+infile+' '
    call += 'outfile='+outfile+' '
    call += 'datacol='+datacol+' '
    call += 'cenmethod='+cenmethod+' '
    call += 'stepsize='+str(stepsize)+' '
    call += 'npoly_cxcy='+str(npoly_cxcy)+' '
    call += 'sigma_cxcy='+str(sigma_cxcy)+' '
    call += 'npoly_ardx='+str(npoly_ardx)+' '
    call += 'npoly_dsdt='+str(npoly_dsdt)+' '
    call += 'sigma_dsdt='+str(sigma_dsdt)+' '
    call += 'npoly_arfl='+str(npoly_arfl)+' '
    call += 'sigma_arfl='+str(sigma_arfl)+' '
    savep = 'n'
    if (plotres): savep = 'y'
    call += 'plotres='+savep+ ' '
    overwrite = 'n'
    if (clobber): overwrite = 'y'
    call += 'clobber='+overwrite+ ' '
    chatter = 'n'
    if (verbose): chatter = 'y'
    call += 'verbose='+chatter+' '
    call += 'logfile='+logfile
    kepmsg.log(logfile,call+'\n',verbose)

# start time

    kepmsg.clock('KEPSFF started at',logfile,verbose)

# test log file

    logfile = kepmsg.test(logfile)

# clobber output file

    if clobber: status = kepio.clobber(outfile,logfile,verbose)
    if kepio.fileexists(outfile): 
        message = 'ERROR -- KEPSFF: ' + outfile + ' exists. Use clobber=yes'
        status = kepmsg.err(logfile,message,verbose)

# open input file

    if status == 0:
        instr, status = kepio.openfits(infile,'readonly',logfile,verbose)
    if status == 0:
        tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status)
    if status == 0:
        try:
            work = instr[0].header['FILEVER']
            cadenom = 1.0
        except:
            cadenom = cadence

# fudge non-compliant FITS keywords with no values

    if status == 0:
        instr = kepkey.emptykeys(instr,file,logfile,verbose)

# read table structure

    if status == 0:
	table, status = kepio.readfitstab(infile,instr[1],logfile,verbose)

# determine sequence of windows in time

    if status == 0:
        frametim = instr[1].header['FRAMETIM']
        num_frm = instr[1].header['NUM_FRM']
        exptime = frametim * num_frm / 86400
        tstart = table.field('TIME')[0]
        tstop = table.field('TIME')[-1]
        winedge = arange(tstart,tstop,stepsize) 
        if tstop > winedge[-1] + stepsize / 2:
            winedge = append(winedge,tstop)
        else:
            winedge[-1] = tstop
        winedge = (winedge - tstart) / exptime
        winedge = winedge.astype(int)
        if len(table.field('TIME')) > winedge[-1] + 1:
            winedge = append(winedge,len(table.field('TIME')))
        elif len(table.field('TIME')) < winedge[-1]:
            winedge[-1] = len(table.field('TIME'))

# step through the time windows
        
    if status == 0:
        for iw in range(1,len(winedge)):
            t1 = winedge[iw-1]
            t2 = winedge[iw]

# filter input data table

            work1 = numpy.array([table.field('TIME')[t1:t2], table.field('CADENCENO')[t1:t2], 
                                 table.field(datacol)[t1:t2], 
                                 table.field('MOM_CENTR1')[t1:t2], table.field('MOM_CENTR2')[t1:t2],
                                 table.field('PSF_CENTR1')[t1:t2], table.field('PSF_CENTR2')[t1:t2],
                                 table.field('SAP_QUALITY')[t1:t2]],'float64')
            work1 = numpy.rot90(work1,3)
            work2 = work1[~numpy.isnan(work1).any(1)]            
            work2 = work2[(work2[:,0] == 0.0) | (work2[:,0] > 1e5)]

# assign table columns

            intime = work2[:,7] + bjdref
            cadenceno = work2[:,6].astype(int)
            indata = work2[:,5]
            mom_centr1 = work2[:,4]
            mom_centr2 = work2[:,3]
            psf_centr1 = work2[:,2]
            psf_centr2 = work2[:,1]
            sap_quality = work2[:,0]
            if cenmethod == 'moments':
                centr1 = copy(mom_centr1)
                centr2 = copy(mom_centr2)
            else:
                centr1 = copy(psf_centr1)
                centr2 = copy(psf_centr2)                

# fit centroid data with low-order polynomial

            cfit = zeros((len(centr2)))
            csig = zeros((len(centr2)))
            functype = 'poly' + str(npoly_cxcy)
            pinit = array([nanmean(centr2)])
            if npoly_cxcy > 0:
                for j in range(npoly_cxcy):
                    pinit = append(pinit,0.0)
            try:
                coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
                    kepfit.lsqclip(functype,pinit,centr1,centr2,None,sigma_cxcy,sigma_cxcy,10,logfile,verbose)
                for j in range(len(coeffs)):
                    cfit += coeffs[j] * numpy.power(centr1,j)
                    csig[:] = sigma
            except:
                message  = 'ERROR -- KEPSFF: could not fit centroid data with polynomial. There are no data points within the range of input rows %d - %d. Either increase the stepsize (with an appreciation of the effects on light curve quality this will have!), or better yet - cut the timeseries up to remove large gaps in the input light curve using kepclip.' % (t1,t2)
                status = kepmsg.err(logfile,message,verbose)
#                sys.exit('')
                os._exit(1)

# reject outliers

            time_good = array([],'float64')
            centr1_good = array([],'float32')
            centr2_good = array([],'float32')
            flux_good = array([],'float32')
            cad_good = array([],'int')
            for i in range(len(cfit)):
                if abs(centr2[i] - cfit[i]) < sigma_cxcy * csig[i]:
                    time_good = append(time_good,intime[i])
                    centr1_good = append(centr1_good,centr1[i])
                    centr2_good = append(centr2_good,centr2[i])
                    flux_good = append(flux_good,indata[i])
                    cad_good = append(cad_good,cadenceno[i])

# covariance matrix for centroid time series

            centr = concatenate([[centr1_good] - mean(centr1_good), [centr2_good] - mean(centr2_good)])
            covar = cov(centr)

# eigenvector eigenvalues of covariance matrix

            [eval, evec] = numpy.linalg.eigh(covar)
            ex = arange(-10.0,10.0,0.1)
            epar = evec[1,1] / evec[0,1] * ex
            enor = evec[1,0] / evec[0,0] * ex
            ex = ex + mean(centr1)
            epar = epar + mean(centr2_good)
            enor = enor + mean(centr2_good)

# rotate centroid data

            centr_rot = dot(evec.T,centr)

# fit polynomial to rotated centroids

            rfit = zeros((len(centr2)))
            rsig = zeros((len(centr2)))
            functype = 'poly' + str(npoly_ardx)
            pinit = array([nanmean(centr_rot[0,:])])
            pinit = array([1.0])
            if npoly_ardx > 0:
                for j in range(npoly_ardx):
                    pinit = append(pinit,0.0)
            try:
                coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
                    kepfit.lsqclip(functype,pinit,centr_rot[1,:],centr_rot[0,:],None,100.0,100.0,1,
                                   logfile,verbose)
            except:
                message  = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
                status = kepmsg.err(logfile,message,verbose)
            rx = linspace(nanmin(centr_rot[1,:]),nanmax(centr_rot[1,:]),100)
            ry = zeros((len(rx)))
            for i in range(len(coeffs)):
                ry = ry + coeffs[i] * numpy.power(rx,i)

# calculate arclength of centroids

            s = zeros((len(rx)))
            for i in range(1,len(s)):
                work3 = ((ry[i] - ry[i-1]) / (rx[i] - rx[i-1]))**2 
                s[i] = s[i-1] + math.sqrt(1.0 + work3) * (rx[i] - rx[i-1])

# fit arclength as a function of strongest eigenvector

            sfit = zeros((len(centr2)))
            ssig = zeros((len(centr2)))
            functype = 'poly' + str(npoly_ardx)
            pinit = array([nanmean(s)])
            if npoly_ardx > 0:
                for j in range(npoly_ardx):
                    pinit = append(pinit,0.0)
            try:
                acoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
                    kepfit.lsqclip(functype,pinit,rx,s,None,100.0,100.0,100,logfile,verbose)
            except:
                message  = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
                status = kepmsg.err(logfile,message,verbose)

# correlate arclength with detrended flux

            t = copy(time_good)
            c = copy(cad_good)
            y = copy(flux_good)
            z = centr_rot[1,:]
            x = zeros((len(z)))
            for i in range(len(acoeffs)):
                x = x + acoeffs[i] * numpy.power(z,i)

# calculate time derivative of arclength s

            dx = zeros((len(x)))
            for i in range(1,len(x)):
                dx[i] = (x[i] - x[i-1]) / (t[i] - t[i-1])
            dx[0] = dx[1]

# fit polynomial to derivative and flag outliers (thruster firings)

            dfit = zeros((len(dx)))
            dsig = zeros((len(dx)))
            functype = 'poly' + str(npoly_dsdt)
            pinit = array([nanmean(dx)])
            if npoly_dsdt > 0:
                for j in range(npoly_dsdt):
                    pinit = append(pinit,0.0)
            try:
                dcoeffs, errors, covar, iiter, dsigma, chi2, dof, fit, dumx, dumy, status = \
                    kepfit.lsqclip(functype,pinit,t,dx,None,3.0,3.0,10,logfile,verbose)
            except:
                message  = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
                status = kepmsg.err(logfile,message,verbose)
            for i in range(len(dcoeffs)):
                dfit = dfit + dcoeffs[i] * numpy.power(t,i)
            centr1_pnt = array([],'float32')
            centr2_pnt = array([],'float32')
            time_pnt = array([],'float64')
            flux_pnt = array([],'float32')
            dx_pnt = array([],'float32')
            s_pnt = array([],'float32')
            time_thr = array([],'float64')
            flux_thr = array([],'float32')
            dx_thr = array([],'float32')
            thr_cadence = []
            for i in range(len(t)):
                if dx[i] < dfit[i] + sigma_dsdt * dsigma and dx[i] > dfit[i] - sigma_dsdt * dsigma:
                    time_pnt = append(time_pnt,time_good[i])
                    flux_pnt = append(flux_pnt,flux_good[i])
                    dx_pnt = append(dx_pnt,dx[i])                
                    s_pnt = append(s_pnt,x[i])                
                    centr1_pnt = append(centr1_pnt,centr1_good[i])
                    centr2_pnt = append(centr2_pnt,centr2_good[i])
                else:
                    time_thr = append(time_thr,time_good[i])
                    flux_thr = append(flux_thr,flux_good[i])                
                    dx_thr = append(dx_thr,dx[i]) 
                    thr_cadence.append(cad_good[i])

# fit arclength-flux correlation

            cfit = zeros((len(time_pnt)))
            csig = zeros((len(time_pnt)))
            functype = 'poly' + str(npoly_arfl)
            pinit = array([nanmean(flux_pnt)])
            if npoly_arfl > 0:
                for j in range(npoly_arfl):
                    pinit = append(pinit,0.0)
            try:
                ccoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plx, ply, status = \
                    kepfit.lsqclip(functype,pinit,s_pnt,flux_pnt,None,sigma_arfl,sigma_arfl,100,logfile,verbose)
            except:
                message  = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
                status = kepmsg.err(logfile,message,verbose)        

# correction factors for unfiltered data

            centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)])
            centr_rot = dot(evec.T,centr)
            yy = copy(indata)
            zz = centr_rot[1,:]
            xx = zeros((len(zz)))
            cfac = zeros((len(zz)))
            for i in range(len(acoeffs)):
                xx = xx + acoeffs[i] * numpy.power(zz,i)
            for i in range(len(ccoeffs)):
                cfac = cfac + ccoeffs[i] * numpy.power(xx,i)

# apply correction to flux time-series

            out_detsap = indata / cfac

# split time-series data for plotting

            tim_gd = array([],'float32')
            flx_gd = array([],'float32')
            tim_bd = array([],'float32')
            flx_bd = array([],'float32')
            for i in range(len(indata)):
                if intime[i] in time_pnt:
                    tim_gd = append(tim_gd,intime[i])
                    flx_gd = append(flx_gd,out_detsap[i])
                else:
                    tim_bd = append(tim_bd,intime[i])
                    flx_bd = append(flx_bd,out_detsap[i])

# plot style and size

            status = kepplot.define(labelsize,ticksize,logfile,verbose)
            pylab.figure(figsize=[xsize,ysize])
            pylab.clf()

# plot x-centroid vs y-centroid

            ax = kepplot.location([0.04,0.57,0.16,0.41])                                      # plot location
            px = copy(centr1)                                                             # clean-up x-axis units
            py = copy(centr2)                                                             # clean-up y-axis units
            pxmin = px.min()
            pxmax = px.max()
            pymin = py.min()
            pymax = py.max()
            pxr = pxmax - pxmin
            pyr = pymax - pymin
            pad = 0.05
            if pxr > pyr:
                dely = (pxr - pyr) / 2 
                xlim(pxmin - pxr * pad, pxmax + pxr * pad)
                ylim(pymin - dely - pyr * pad, pymax + dely + pyr * pad)
            else:
                delx = (pyr - pxr) / 2 
                ylim(pymin - pyr * pad, pymax + pyr * pad)
                xlim(pxmin - delx - pxr * pad, pxmax + delx + pxr * pad)
            pylab.plot(px,py,color='#980000',markersize=5,marker='D',ls='')                   # plot data
            pylab.plot(centr1_good,centr2_good,color='#009900',markersize=5,marker='D',ls='') # plot data
            pylab.plot(ex,epar,color='k',ls='-')
            pylab.plot(ex,enor,color='k',ls='-')
            for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) 
            for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) 
            kepplot.labels('CCD Column','CCD Row','k',16)                                     # labels
            pylab.grid()                                                                      # grid lines
            
# plot arclength fits vs drift along strongest eigenvector

            ax = kepplot.location([0.24,0.57,0.16,0.41])                                      # plot location
            px = rx - rx[0]
            py = s - rx - (s[0] - rx[0])                                                      # clean-up y-axis units
            py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose)                         # clean-up x-axis units
            kepplot.RangeOfPlot(px,py,0.05,False)                                             # data limits
            pylab.plot(px,py,color='#009900',markersize=5,marker='D',ls='')
            px = plotx - rx[0]                                                              # clean-up x-axis units
            py = ploty-plotx - (s[0] - rx[0])                                              # clean-up y-axis units
            py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose)                         # clean-up x-axis units
            pylab.plot(px,py,color='r',ls='-',lw=3)
            for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) 
            for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) 
            ylab = re.sub(' e\S+',' pixels)',ylab)
            ylab = re.sub(' s\S+','',ylab)
            ylab = re.sub('Flux','s $-$ x\'',ylab)
            kepplot.labels('Linear Drift [x\'] (pixels)',ylab,'k',16)                               # labels
            pylab.grid()                                                                      # grid lines

# plot time derivative of arclength s

            ax = kepplot.location([0.04,0.08,0.16,0.41])                                        # plot location
            px = copy(time_pnt)
            py = copy(dx_pnt)
            px, xlab, status = kepplot.cleanx(px,logfile,verbose)       # clean-up x-axis units
            kepplot.RangeOfPlot(px,dx,0.05,False)                                             # data limits
            pylab.plot(px,py,color='#009900',markersize=5,marker='D',ls='')
            try:
                px = copy(time_thr)
                py = copy(dx_thr)
                px, xlab, status = kepplot.cleanx(px,logfile,verbose)       # clean-up x-axis units
                pylab.plot(px,py,color='#980000',markersize=5,marker='D',ls='')
            except:
                pass
            px = copy(t)
            py = copy(dfit)
            px, xlab, status = kepplot.cleanx(px,logfile,verbose)       # clean-up x-axis units
            pylab.plot(px,py,color='r',ls='-',lw=3)
            py = copy(dfit+sigma_dsdt*dsigma)
            pylab.plot(px,py,color='r',ls='--',lw=3)
            py = copy(dfit-sigma_dsdt*dsigma)
            pylab.plot(px,py,color='r',ls='--',lw=3)
            for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) 
            for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) 
            kepplot.labels(xlab,'ds/dt (pixels day$^{-1}$)','k',16)                                  # labels
            pylab.grid()                                                                      # grid lines

# plot relation of arclength vs detrended flux

            ax = kepplot.location([0.24,0.08,0.16,0.41])                                       # plot location
            px = copy(s_pnt)
            py = copy(flux_pnt)
            py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose)                         # clean-up x-axis units
            kepplot.RangeOfPlot(px,py,0.05,False)                                             # data limits
            pylab.plot(px,py,color='#009900',markersize=5,marker='D',ls='')
            pylab.plot(plx,ply,color='r',ls='-',lw=3)
            for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) 
            for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) 
            kepplot.labels('Arclength [s] (pixels)',ylab,'k',16)                                  # labels
            pylab.grid()                                                                      # grid lines
            
# plot aperture photometry

            kepplot.location([0.44,0.53,0.55,0.45])                          # plot location
            px, xlab, status = kepplot.cleanx(intime,logfile,verbose)       # clean-up x-axis units
            py, ylab, status = kepplot.cleany(indata,1.0,logfile,verbose)   # clean-up x-axis units
            kepplot.RangeOfPlot(px,py,0.01,True)                                 # data limits
            kepplot.plot1d(px,py,cadence,lcolor,lwidth,fcolor,falpha,True)  # plot data
            kepplot.labels(' ',ylab,'k',16)                                   # labels
            pylab.setp(pylab.gca(),xticklabels=[])                          # remove x- or y-tick labels
            kepplot.labels(xlab,re.sub('Flux','Aperture Flux',ylab),'k',16)   # labels
            pylab.grid()                                                    # grid lines

# Plot corrected photometry

            kepplot.location([0.44,0.08,0.55,0.45])                          # plot location
            kepplot.RangeOfPlot(px,py,0.01,True)                                 # data limits
            px, xlab, status = kepplot.cleanx(tim_gd,logfile,verbose)       # clean-up x-axis units
            py, ylab, status = kepplot.cleany(flx_gd,1.0,logfile,verbose)   # clean-up x-axis units
            kepplot.plot1d(px,py,cadence,lcolor,lwidth,fcolor,falpha,True)  # plot data
            try:
                px, xlab, status = kepplot.cleanx(tim_bd,logfile,verbose)       # clean-up x-axis units
                py = copy(flx_bd)
                pylab.plot(px,py,color='#980000',markersize=5,marker='D',ls='')
            except:
                pass
            kepplot.labels(xlab,re.sub('Flux','Corrected Flux',ylab),'k',16)   # labels
            pylab.grid()                                                    # grid lines

# render plot

            if plotres:
                kepplot.render(cmdLine)

# save plot to file

            if plotres:
                pylab.savefig(re.sub('.fits','_%d.png' % (iw + 1),outfile))

# correct fluxes within the output file
                
            intime = work1[:,7] + bjdref
            cadenceno = work1[:,6].astype(int)
            indata = work1[:,5]
            mom_centr1 = work1[:,4]
            mom_centr2 = work1[:,3]
            psf_centr1 = work1[:,2]
            psf_centr2 = work1[:,1]
            centr1 = copy(mom_centr1)
            centr2 = copy(mom_centr2)
            centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)])
            centr_rot = dot(evec.T,centr)
            yy = copy(indata)
            zz = centr_rot[1,:]
            xx = zeros((len(zz)))
            cfac = zeros((len(zz)))
            for i in range(len(acoeffs)):
                xx = xx + acoeffs[i] * numpy.power(zz,i)
            for i in range(len(ccoeffs)):
                cfac = cfac + ccoeffs[i] * numpy.power(xx,i)
            out_detsap = yy / cfac
            instr[1].data.field('SAP_FLUX')[t1:t2] /= cfac
            instr[1].data.field('PDCSAP_FLUX')[t1:t2] /= cfac
            try:
                instr[1].data.field('DETSAP_FLUX')[t1:t2] /= cfac
            except:
                pass

# add quality flag to output file for thruster firings

            for i in range(len(intime)):
                if cadenceno[i] in thr_cadence:
                    instr[1].data.field('SAP_QUALITY')[t1+i] += 131072

# write output file

    if status == 0:
        instr.writeto(outfile)
    
# close input file

    if status == 0:
        status = kepio.closefits(instr,logfile,verbose)	    

# end time

    if (status == 0):
	    message = 'KEPSFF completed at'
    else:
	    message = '\nKEPSFF aborted at'
    kepmsg.clock(message,logfile,verbose)
コード例 #4
0
def kepsff(infile,
           outfile,
           datacol,
           cenmethod,
           stepsize,
           npoly_cxcy,
           sigma_cxcy,
           npoly_ardx,
           npoly_dsdt,
           sigma_dsdt,
           npoly_arfl,
           sigma_arfl,
           plotres,
           clobber,
           verbose,
           logfile,
           status,
           cmdLine=False):

    # startup parameters

    status = 0
    labelsize = 16
    ticksize = 14
    xsize = 20
    ysize = 8
    lcolor = '#0000ff'
    lwidth = 1.0
    fcolor = '#ffff00'
    falpha = 0.2
    seterr(all="ignore")

    # log the call

    hashline = '----------------------------------------------------------------------------'
    kepmsg.log(logfile, hashline, verbose)
    call = 'KEPSFF -- '
    call += 'infile=' + infile + ' '
    call += 'outfile=' + outfile + ' '
    call += 'datacol=' + datacol + ' '
    call += 'cenmethod=' + cenmethod + ' '
    call += 'stepsize=' + str(stepsize) + ' '
    call += 'npoly_cxcy=' + str(npoly_cxcy) + ' '
    call += 'sigma_cxcy=' + str(sigma_cxcy) + ' '
    call += 'npoly_ardx=' + str(npoly_ardx) + ' '
    call += 'npoly_dsdt=' + str(npoly_dsdt) + ' '
    call += 'sigma_dsdt=' + str(sigma_dsdt) + ' '
    call += 'npoly_arfl=' + str(npoly_arfl) + ' '
    call += 'sigma_arfl=' + str(sigma_arfl) + ' '
    savep = 'n'
    if (plotres): savep = 'y'
    call += 'plotres=' + savep + ' '
    overwrite = 'n'
    if (clobber): overwrite = 'y'
    call += 'clobber=' + overwrite + ' '
    chatter = 'n'
    if (verbose): chatter = 'y'
    call += 'verbose=' + chatter + ' '
    call += 'logfile=' + logfile
    kepmsg.log(logfile, call + '\n', verbose)

    # start time

    kepmsg.clock('KEPSFF started at', logfile, verbose)

    # test log file

    logfile = kepmsg.test(logfile)

    # clobber output file

    if clobber: status = kepio.clobber(outfile, logfile, verbose)
    if kepio.fileexists(outfile):
        message = 'ERROR -- KEPSFF: ' + outfile + ' exists. Use clobber=yes'
        status = kepmsg.err(logfile, message, verbose)

# open input file

    if status == 0:
        instr, status = kepio.openfits(infile, 'readonly', logfile, verbose)
    if status == 0:
        tstart, tstop, bjdref, cadence, status = kepio.timekeys(
            instr, infile, logfile, verbose, status)
    if status == 0:
        try:
            work = instr[0].header['FILEVER']
            cadenom = 1.0
        except:
            cadenom = cadence

# fudge non-compliant FITS keywords with no values

    if status == 0:
        instr = kepkey.emptykeys(instr, file, logfile, verbose)

# read table structure

    if status == 0:
        table, status = kepio.readfitstab(infile, instr[1], logfile, verbose)

# determine sequence of windows in time

    if status == 0:
        frametim = instr[1].header['FRAMETIM']
        num_frm = instr[1].header['NUM_FRM']
        exptime = frametim * num_frm / 86400
        tstart = table.field('TIME')[0]
        tstop = table.field('TIME')[-1]
        winedge = arange(tstart, tstop, stepsize)
        if tstop > winedge[-1] + stepsize / 2:
            winedge = append(winedge, tstop)
        else:
            winedge[-1] = tstop
        winedge = (winedge - tstart) / exptime
        winedge = winedge.astype(int)
        if len(table.field('TIME')) > winedge[-1] + 1:
            winedge = append(winedge, len(table.field('TIME')))
        elif len(table.field('TIME')) < winedge[-1]:
            winedge[-1] = len(table.field('TIME'))

# step through the time windows

    if status == 0:
        for iw in range(1, len(winedge)):
            t1 = winedge[iw - 1]
            t2 = winedge[iw]

            # filter input data table

            work1 = numpy.array([
                table.field('TIME')[t1:t2],
                table.field('CADENCENO')[t1:t2],
                table.field(datacol)[t1:t2],
                table.field('MOM_CENTR1')[t1:t2],
                table.field('MOM_CENTR2')[t1:t2],
                table.field('PSF_CENTR1')[t1:t2],
                table.field('PSF_CENTR2')[t1:t2],
                table.field('SAP_QUALITY')[t1:t2]
            ], 'float64')
            work1 = numpy.rot90(work1, 3)
            work2 = work1[~numpy.isnan(work1).any(1)]
            work2 = work2[(work2[:, 0] == 0.0) | (work2[:, 0] > 1e5)]

            # assign table columns

            intime = work2[:, 7] + bjdref
            cadenceno = work2[:, 6].astype(int)
            indata = work2[:, 5]
            mom_centr1 = work2[:, 4]
            mom_centr2 = work2[:, 3]
            psf_centr1 = work2[:, 2]
            psf_centr2 = work2[:, 1]
            sap_quality = work2[:, 0]
            if cenmethod == 'moments':
                centr1 = copy(mom_centr1)
                centr2 = copy(mom_centr2)
            else:
                centr1 = copy(psf_centr1)
                centr2 = copy(psf_centr2)

# fit centroid data with low-order polynomial

            cfit = zeros((len(centr2)))
            csig = zeros((len(centr2)))
            functype = 'poly' + str(npoly_cxcy)
            pinit = array([nanmean(centr2)])
            if npoly_cxcy > 0:
                for j in range(npoly_cxcy):
                    pinit = append(pinit, 0.0)
            try:
                coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
                    kepfit.lsqclip(functype,pinit,centr1,centr2,None,sigma_cxcy,sigma_cxcy,10,logfile,verbose)
                for j in range(len(coeffs)):
                    cfit += coeffs[j] * numpy.power(centr1, j)
                    csig[:] = sigma
            except:
                message = 'ERROR -- KEPSFF: could not fit centroid data with polynomial. There are no data points within the range of input rows %d - %d. Either increase the stepsize (with an appreciation of the effects on light curve quality this will have!), or better yet - cut the timeseries up to remove large gaps in the input light curve using kepclip.' % (
                    t1, t2)
                status = kepmsg.err(logfile, message, verbose)
                #                sys.exit('')
                os._exit(1)

# reject outliers

            time_good = array([], 'float64')
            centr1_good = array([], 'float32')
            centr2_good = array([], 'float32')
            flux_good = array([], 'float32')
            cad_good = array([], 'int')
            for i in range(len(cfit)):
                if abs(centr2[i] - cfit[i]) < sigma_cxcy * csig[i]:
                    time_good = append(time_good, intime[i])
                    centr1_good = append(centr1_good, centr1[i])
                    centr2_good = append(centr2_good, centr2[i])
                    flux_good = append(flux_good, indata[i])
                    cad_good = append(cad_good, cadenceno[i])

# covariance matrix for centroid time series

            centr = concatenate([[centr1_good] - mean(centr1_good),
                                 [centr2_good] - mean(centr2_good)])
            covar = cov(centr)

            # eigenvector eigenvalues of covariance matrix

            [eval, evec] = numpy.linalg.eigh(covar)
            ex = arange(-10.0, 10.0, 0.1)
            epar = evec[1, 1] / evec[0, 1] * ex
            enor = evec[1, 0] / evec[0, 0] * ex
            ex = ex + mean(centr1)
            epar = epar + mean(centr2_good)
            enor = enor + mean(centr2_good)

            # rotate centroid data

            centr_rot = dot(evec.T, centr)

            # fit polynomial to rotated centroids

            rfit = zeros((len(centr2)))
            rsig = zeros((len(centr2)))
            functype = 'poly' + str(npoly_ardx)
            pinit = array([nanmean(centr_rot[0, :])])
            pinit = array([1.0])
            if npoly_ardx > 0:
                for j in range(npoly_ardx):
                    pinit = append(pinit, 0.0)
            try:
                coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
                    kepfit.lsqclip(functype,pinit,centr_rot[1,:],centr_rot[0,:],None,100.0,100.0,1,
                                   logfile,verbose)
            except:
                message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
                status = kepmsg.err(logfile, message, verbose)
            rx = linspace(nanmin(centr_rot[1, :]), nanmax(centr_rot[1, :]),
                          100)
            ry = zeros((len(rx)))
            for i in range(len(coeffs)):
                ry = ry + coeffs[i] * numpy.power(rx, i)

# calculate arclength of centroids

            s = zeros((len(rx)))
            for i in range(1, len(s)):
                work3 = ((ry[i] - ry[i - 1]) / (rx[i] - rx[i - 1]))**2
                s[i] = s[i - 1] + math.sqrt(1.0 + work3) * (rx[i] - rx[i - 1])

# fit arclength as a function of strongest eigenvector

            sfit = zeros((len(centr2)))
            ssig = zeros((len(centr2)))
            functype = 'poly' + str(npoly_ardx)
            pinit = array([nanmean(s)])
            if npoly_ardx > 0:
                for j in range(npoly_ardx):
                    pinit = append(pinit, 0.0)
            try:
                acoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
                    kepfit.lsqclip(functype,pinit,rx,s,None,100.0,100.0,100,logfile,verbose)
            except:
                message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
                status = kepmsg.err(logfile, message, verbose)

# correlate arclength with detrended flux

            t = copy(time_good)
            c = copy(cad_good)
            y = copy(flux_good)
            z = centr_rot[1, :]
            x = zeros((len(z)))
            for i in range(len(acoeffs)):
                x = x + acoeffs[i] * numpy.power(z, i)

# calculate time derivative of arclength s

            dx = zeros((len(x)))
            for i in range(1, len(x)):
                dx[i] = (x[i] - x[i - 1]) / (t[i] - t[i - 1])
            dx[0] = dx[1]

            # fit polynomial to derivative and flag outliers (thruster firings)

            dfit = zeros((len(dx)))
            dsig = zeros((len(dx)))
            functype = 'poly' + str(npoly_dsdt)
            pinit = array([nanmean(dx)])
            if npoly_dsdt > 0:
                for j in range(npoly_dsdt):
                    pinit = append(pinit, 0.0)
            try:
                dcoeffs, errors, covar, iiter, dsigma, chi2, dof, fit, dumx, dumy, status = \
                    kepfit.lsqclip(functype,pinit,t,dx,None,3.0,3.0,10,logfile,verbose)
            except:
                message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
                status = kepmsg.err(logfile, message, verbose)
            for i in range(len(dcoeffs)):
                dfit = dfit + dcoeffs[i] * numpy.power(t, i)
            centr1_pnt = array([], 'float32')
            centr2_pnt = array([], 'float32')
            time_pnt = array([], 'float64')
            flux_pnt = array([], 'float32')
            dx_pnt = array([], 'float32')
            s_pnt = array([], 'float32')
            time_thr = array([], 'float64')
            flux_thr = array([], 'float32')
            dx_thr = array([], 'float32')
            thr_cadence = []
            for i in range(len(t)):
                if dx[i] < dfit[i] + sigma_dsdt * dsigma and dx[
                        i] > dfit[i] - sigma_dsdt * dsigma:
                    time_pnt = append(time_pnt, time_good[i])
                    flux_pnt = append(flux_pnt, flux_good[i])
                    dx_pnt = append(dx_pnt, dx[i])
                    s_pnt = append(s_pnt, x[i])
                    centr1_pnt = append(centr1_pnt, centr1_good[i])
                    centr2_pnt = append(centr2_pnt, centr2_good[i])
                else:
                    time_thr = append(time_thr, time_good[i])
                    flux_thr = append(flux_thr, flux_good[i])
                    dx_thr = append(dx_thr, dx[i])
                    thr_cadence.append(cad_good[i])

# fit arclength-flux correlation

            cfit = zeros((len(time_pnt)))
            csig = zeros((len(time_pnt)))
            functype = 'poly' + str(npoly_arfl)
            pinit = array([nanmean(flux_pnt)])
            if npoly_arfl > 0:
                for j in range(npoly_arfl):
                    pinit = append(pinit, 0.0)
            try:
                ccoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plx, ply, status = \
                    kepfit.lsqclip(functype,pinit,s_pnt,flux_pnt,None,sigma_arfl,sigma_arfl,100,logfile,verbose)
            except:
                message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
                status = kepmsg.err(logfile, message, verbose)

# correction factors for unfiltered data

            centr = concatenate([[centr1] - mean(centr1_good),
                                 [centr2] - mean(centr2_good)])
            centr_rot = dot(evec.T, centr)
            yy = copy(indata)
            zz = centr_rot[1, :]
            xx = zeros((len(zz)))
            cfac = zeros((len(zz)))
            for i in range(len(acoeffs)):
                xx = xx + acoeffs[i] * numpy.power(zz, i)
            for i in range(len(ccoeffs)):
                cfac = cfac + ccoeffs[i] * numpy.power(xx, i)

# apply correction to flux time-series

            out_detsap = indata / cfac

            # split time-series data for plotting

            tim_gd = array([], 'float32')
            flx_gd = array([], 'float32')
            tim_bd = array([], 'float32')
            flx_bd = array([], 'float32')
            for i in range(len(indata)):
                if intime[i] in time_pnt:
                    tim_gd = append(tim_gd, intime[i])
                    flx_gd = append(flx_gd, out_detsap[i])
                else:
                    tim_bd = append(tim_bd, intime[i])
                    flx_bd = append(flx_bd, out_detsap[i])

# plot style and size

            status = kepplot.define(labelsize, ticksize, logfile, verbose)
            pylab.figure(figsize=[xsize, ysize])
            pylab.clf()

            # plot x-centroid vs y-centroid

            ax = kepplot.location([0.04, 0.57, 0.16, 0.41])  # plot location
            px = copy(centr1)  # clean-up x-axis units
            py = copy(centr2)  # clean-up y-axis units
            pxmin = px.min()
            pxmax = px.max()
            pymin = py.min()
            pymax = py.max()
            pxr = pxmax - pxmin
            pyr = pymax - pymin
            pad = 0.05
            if pxr > pyr:
                dely = (pxr - pyr) / 2
                xlim(pxmin - pxr * pad, pxmax + pxr * pad)
                ylim(pymin - dely - pyr * pad, pymax + dely + pyr * pad)
            else:
                delx = (pyr - pxr) / 2
                ylim(pymin - pyr * pad, pymax + pyr * pad)
                xlim(pxmin - delx - pxr * pad, pxmax + delx + pxr * pad)
            pylab.plot(px,
                       py,
                       color='#980000',
                       markersize=5,
                       marker='D',
                       ls='')  # plot data
            pylab.plot(centr1_good,
                       centr2_good,
                       color='#009900',
                       markersize=5,
                       marker='D',
                       ls='')  # plot data
            pylab.plot(ex, epar, color='k', ls='-')
            pylab.plot(ex, enor, color='k', ls='-')
            for tick in ax.xaxis.get_major_ticks():
                tick.label.set_fontsize(14)
            for tick in ax.yaxis.get_major_ticks():
                tick.label.set_fontsize(14)
            kepplot.labels('CCD Column', 'CCD Row', 'k', 16)  # labels
            pylab.grid()  # grid lines

            # plot arclength fits vs drift along strongest eigenvector

            ax = kepplot.location([0.24, 0.57, 0.16, 0.41])  # plot location
            px = rx - rx[0]
            py = s - rx - (s[0] - rx[0])  # clean-up y-axis units
            py, ylab, status = kepplot.cleany(py, 1.0, logfile,
                                              verbose)  # clean-up x-axis units
            kepplot.RangeOfPlot(px, py, 0.05, False)  # data limits
            pylab.plot(px,
                       py,
                       color='#009900',
                       markersize=5,
                       marker='D',
                       ls='')
            px = plotx - rx[0]  # clean-up x-axis units
            py = ploty - plotx - (s[0] - rx[0])  # clean-up y-axis units
            py, ylab, status = kepplot.cleany(py, 1.0, logfile,
                                              verbose)  # clean-up x-axis units
            pylab.plot(px, py, color='r', ls='-', lw=3)
            for tick in ax.xaxis.get_major_ticks():
                tick.label.set_fontsize(14)
            for tick in ax.yaxis.get_major_ticks():
                tick.label.set_fontsize(14)
            ylab = re.sub(' e\S+', ' pixels)', ylab)
            ylab = re.sub(' s\S+', '', ylab)
            ylab = re.sub('Flux', 's $-$ x\'', ylab)
            kepplot.labels('Linear Drift [x\'] (pixels)', ylab, 'k',
                           16)  # labels
            pylab.grid()  # grid lines

            # plot time derivative of arclength s

            ax = kepplot.location([0.04, 0.08, 0.16, 0.41])  # plot location
            px = copy(time_pnt)
            py = copy(dx_pnt)
            px, xlab, status = kepplot.cleanx(px, logfile,
                                              verbose)  # clean-up x-axis units
            kepplot.RangeOfPlot(px, dx, 0.05, False)  # data limits
            pylab.plot(px,
                       py,
                       color='#009900',
                       markersize=5,
                       marker='D',
                       ls='')
            try:
                px = copy(time_thr)
                py = copy(dx_thr)
                px, xlab, status = kepplot.cleanx(
                    px, logfile, verbose)  # clean-up x-axis units
                pylab.plot(px,
                           py,
                           color='#980000',
                           markersize=5,
                           marker='D',
                           ls='')
            except:
                pass
            px = copy(t)
            py = copy(dfit)
            px, xlab, status = kepplot.cleanx(px, logfile,
                                              verbose)  # clean-up x-axis units
            pylab.plot(px, py, color='r', ls='-', lw=3)
            py = copy(dfit + sigma_dsdt * dsigma)
            pylab.plot(px, py, color='r', ls='--', lw=3)
            py = copy(dfit - sigma_dsdt * dsigma)
            pylab.plot(px, py, color='r', ls='--', lw=3)
            for tick in ax.xaxis.get_major_ticks():
                tick.label.set_fontsize(14)
            for tick in ax.yaxis.get_major_ticks():
                tick.label.set_fontsize(14)
            kepplot.labels(xlab, 'ds/dt (pixels day$^{-1}$)', 'k',
                           16)  # labels
            pylab.grid()  # grid lines

            # plot relation of arclength vs detrended flux

            ax = kepplot.location([0.24, 0.08, 0.16, 0.41])  # plot location
            px = copy(s_pnt)
            py = copy(flux_pnt)
            py, ylab, status = kepplot.cleany(py, 1.0, logfile,
                                              verbose)  # clean-up x-axis units
            kepplot.RangeOfPlot(px, py, 0.05, False)  # data limits
            pylab.plot(px,
                       py,
                       color='#009900',
                       markersize=5,
                       marker='D',
                       ls='')
            pylab.plot(plx, ply, color='r', ls='-', lw=3)
            for tick in ax.xaxis.get_major_ticks():
                tick.label.set_fontsize(14)
            for tick in ax.yaxis.get_major_ticks():
                tick.label.set_fontsize(14)
            kepplot.labels('Arclength [s] (pixels)', ylab, 'k', 16)  # labels
            pylab.grid()  # grid lines

            # plot aperture photometry

            kepplot.location([0.44, 0.53, 0.55, 0.45])  # plot location
            px, xlab, status = kepplot.cleanx(intime, logfile,
                                              verbose)  # clean-up x-axis units
            py, ylab, status = kepplot.cleany(indata, 1.0, logfile,
                                              verbose)  # clean-up x-axis units
            kepplot.RangeOfPlot(px, py, 0.01, True)  # data limits
            kepplot.plot1d(px, py, cadence, lcolor, lwidth, fcolor, falpha,
                           True)  # plot data
            kepplot.labels(' ', ylab, 'k', 16)  # labels
            pylab.setp(pylab.gca(),
                       xticklabels=[])  # remove x- or y-tick labels
            kepplot.labels(xlab, re.sub('Flux', 'Aperture Flux', ylab), 'k',
                           16)  # labels
            pylab.grid()  # grid lines

            # Plot corrected photometry

            kepplot.location([0.44, 0.08, 0.55, 0.45])  # plot location
            kepplot.RangeOfPlot(px, py, 0.01, True)  # data limits
            px, xlab, status = kepplot.cleanx(tim_gd, logfile,
                                              verbose)  # clean-up x-axis units
            py, ylab, status = kepplot.cleany(flx_gd, 1.0, logfile,
                                              verbose)  # clean-up x-axis units
            kepplot.plot1d(px, py, cadence, lcolor, lwidth, fcolor, falpha,
                           True)  # plot data
            try:
                px, xlab, status = kepplot.cleanx(
                    tim_bd, logfile, verbose)  # clean-up x-axis units
                py = copy(flx_bd)
                pylab.plot(px,
                           py,
                           color='#980000',
                           markersize=5,
                           marker='D',
                           ls='')
            except:
                pass
            kepplot.labels(xlab, re.sub('Flux', 'Corrected Flux', ylab), 'k',
                           16)  # labels
            pylab.grid()  # grid lines

            # render plot

            if plotres:
                kepplot.render(cmdLine)

# save plot to file

            if plotres:
                pylab.savefig(re.sub('.fits', '_%d.png' % (iw + 1), outfile))

# correct fluxes within the output file

            intime = work1[:, 7] + bjdref
            cadenceno = work1[:, 6].astype(int)
            indata = work1[:, 5]
            mom_centr1 = work1[:, 4]
            mom_centr2 = work1[:, 3]
            psf_centr1 = work1[:, 2]
            psf_centr2 = work1[:, 1]
            centr1 = copy(mom_centr1)
            centr2 = copy(mom_centr2)
            centr = concatenate([[centr1] - mean(centr1_good),
                                 [centr2] - mean(centr2_good)])
            centr_rot = dot(evec.T, centr)
            yy = copy(indata)
            zz = centr_rot[1, :]
            xx = zeros((len(zz)))
            cfac = zeros((len(zz)))
            for i in range(len(acoeffs)):
                xx = xx + acoeffs[i] * numpy.power(zz, i)
            for i in range(len(ccoeffs)):
                cfac = cfac + ccoeffs[i] * numpy.power(xx, i)
            out_detsap = yy / cfac
            instr[1].data.field('SAP_FLUX')[t1:t2] /= cfac
            instr[1].data.field('PDCSAP_FLUX')[t1:t2] /= cfac
            try:
                instr[1].data.field('DETSAP_FLUX')[t1:t2] /= cfac
            except:
                pass

# add quality flag to output file for thruster firings

            for i in range(len(intime)):
                if cadenceno[i] in thr_cadence:
                    instr[1].data.field('SAP_QUALITY')[t1 + i] += 131072

# write output file

    if status == 0:
        instr.writeto(outfile)

# close input file

    if status == 0:
        status = kepio.closefits(instr, logfile, verbose)

# end time

    if (status == 0):
        message = 'KEPSFF completed at'
    else:
        message = '\nKEPSFF aborted at'
    kepmsg.clock(message, logfile, verbose)