def keppca(infile,maskfile,outfile,components,clobber,verbose,logfile,status): # startup parameters cmdLine=False status = 0 labelsize = 32 ticksize = 18 xsize = 16 ysize = 10 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPPCA -- ' call += 'infile='+infile+' ' call += 'maskfile='+maskfile+' ' call += 'outfile='+outfile+' ' call += 'components='+components+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPPCA started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPPCA: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile,message,verbose) # open input file status = 0 instr = pyfits.open(infile,mode='readonly',memmap=True) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # input file data if status == 0: cards0 = instr[0].header.ascardlist() cards1 = instr[1].header.ascardlist() cards2 = instr[2].header.ascardlist() table = instr[1].data[:] maskmap = copy(instr[2].data) # open TPF FITS file if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux_bkg, status = \ kepio.readTPF(infile,'FLUX_BKG',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux_bkg_err, status = \ kepio.readTPF(infile,'FLUX_BKG_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, pcorr1, status = \ kepio.readTPF(infile,'POS_CORR1',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, pcorr2, status = \ kepio.readTPF(infile,'POS_CORR2',logfile,verbose) # read mask definition file if status == 0 and 'aper' not in maskfile.lower() and maskfile.lower() != 'all': maskx = array([],'int') masky = array([],'int') lines, status = kepio.openascii(maskfile,'r',logfile,verbose) for line in lines: line = line.strip().split('|') if len(line) == 6: y0 = int(line[3]) x0 = int(line[4]) line = line[5].split(';') for items in line: try: masky = numpy.append(masky,y0 + int(items.split(',')[0])) maskx = numpy.append(maskx,x0 + int(items.split(',')[1])) except: continue status = kepio.closeascii(lines,logfile,verbose) if len(maskx) == 0 or len(masky) == 0: message = 'ERROR -- KEPPCA: ' + maskfile + ' contains no pixels.' status = kepmsg.err(logfile,message,verbose) # subimage physical WCS data if status == 0: crpix1p = cards2['CRPIX1P'].value crpix2p = cards2['CRPIX2P'].value crval1p = cards2['CRVAL1P'].value crval2p = cards2['CRVAL2P'].value cdelt1p = cards2['CDELT1P'].value cdelt2p = cards2['CDELT2P'].value # define new subimage bitmap... if status == 0 and 'aper' not in maskfile.lower() and maskfile.lower() != 'all': aperx = numpy.array([],'int') apery = numpy.array([],'int') aperb = numpy.array([],'int') for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): aperx = numpy.append(aperx,crval1p + (j + 1 - crpix1p) * cdelt1p) apery = numpy.append(apery,crval2p + (i + 1 - crpix2p) * cdelt2p) if maskmap[i,j] == 0: aperb = numpy.append(aperb,0) else: aperb = numpy.append(aperb,1) maskmap[i,j] = 1 for k in range(len(maskx)): if aperx[-1] == maskx[k] and apery[-1] == masky[k]: aperb[-1] = 3 maskmap[i,j] = 3 # ...or use old subimage bitmap if status == 0 and 'aper' in maskfile.lower(): aperb = array([],'int') for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): aperb = numpy.append(aperb,maskmap[i,j]) # ...or use all pixels if status == 0 and maskfile.lower() == 'all': aperb = array([],'int') for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): if maskmap[i,j] == 0: aperb = numpy.append(aperb,0) else: aperb = numpy.append(aperb,3) maskmap[i,j] = 3 # legal mask defined? if status == 0: if len(aperb) == 0: message = 'ERROR -- KEPPCA: no legal pixels within the subimage are defined.' status = kepmsg.err(logfile,message,verbose) # identify principal components to be combined if status == 0: pcaout = [] txt = components.strip().split(',') for work1 in txt: try: pcaout.append(int(work1.strip())) except: work2 = work1.strip().split('-') try: for work3 in range(int(work2[0]),int(work2[1]) + 1): pcaout.append(work3) except: message = 'ERROR -- KEPPCA: cannot understand principal component list requested' status = kepmsg.err(logfile,message,verbose) if status == 0: pcaout = set(sort(pcaout)) # flux pixel array size if status == 0: ntim = 0 time = numpy.array([],dtype='float64') timecorr = numpy.array([],dtype='float32') cadenceno = numpy.array([],dtype='int') pixseries = numpy.array([],dtype='float32') errseries = numpy.array([],dtype='float32') bkgseries = numpy.array([],dtype='float32') berseries = numpy.array([],dtype='float32') quality = numpy.array([],dtype='float32') pos_corr1 = numpy.array([],dtype='float32') pos_corr2 = numpy.array([],dtype='float32') nrows = numpy.size(fluxpixels,0) npix = numpy.size(fluxpixels,1) # remove NaN timestamps for i in range(nrows): if qual[i] == 0 and \ numpy.isfinite(barytime[i]) and \ numpy.isfinite(fluxpixels[i,ydim*xdim/2]) and \ numpy.isfinite(fluxpixels[i,1+ydim*xdim/2]): ntim += 1 time = numpy.append(time,barytime[i]) timecorr = numpy.append(timecorr,tcorr[i]) cadenceno = numpy.append(cadenceno,cadno[i]) pixseries = numpy.append(pixseries,fluxpixels[i]) errseries = numpy.append(errseries,errpixels[i]) bkgseries = numpy.append(bkgseries,flux_bkg[i]) berseries = numpy.append(berseries,flux_bkg_err[i]) quality = numpy.append(quality,qual[i]) pos_corr1 = numpy.append(pos_corr1,pcorr1[i]) pos_corr2 = numpy.append(pos_corr2,pcorr2[i]) pixseries = numpy.reshape(pixseries,(-1,npix)) errseries = numpy.reshape(errseries,(-1,npix)) bkgseries = numpy.reshape(bkgseries,(-1,npix)) berseries = numpy.reshape(berseries,(-1,npix)) # dummy columns for output file if status == 0: pdc_flux = numpy.empty(len(time)); pdc_flux[:] = numpy.nan pdc_flux_err = numpy.empty(len(time)); pdc_flux_err[:] = numpy.nan psf_centr1 = numpy.empty(len(time)); psf_centr1[:] = numpy.nan psf_centr1_err = numpy.empty(len(time)); psf_centr1_err[:] = numpy.nan psf_centr2 = numpy.empty(len(time)); psf_centr2[:] = numpy.nan psf_centr2_err = numpy.empty(len(time)); psf_centr2_err[:] = numpy.nan mom_centr1 = numpy.empty(len(time)); mom_centr1[:] = numpy.nan mom_centr1_err = numpy.empty(len(time)); mom_centr1_err[:] = numpy.nan mom_centr2 = numpy.empty(len(time)); mom_centr2[:] = numpy.nan mom_centr2_err = numpy.empty(len(time)); mom_centr2_err[:] = numpy.nan # subtract mean over time from each pixel in the mask if status == 0: nmask = 0 for i in range(npix): if aperb[i] == 3: nmask += 1 work1 = numpy.zeros((len(pixseries),nmask)) nmask = -1 for i in range(npix): if aperb[i] == 3: nmask += 1 maskedFlux = numpy.ma.masked_invalid(pixseries[:,i]) pixMean = numpy.mean(maskedFlux) if numpy.isfinite(pixMean): work1[:,nmask] = maskedFlux - pixMean else: work1[:,nmask] = numpy.zeros((ntim)) # calculate covariance matrix if status == 0: work2 = work1.T covariance = numpy.cov(work2) # determine eigenfunctions and eigenvectors of the covariance matrix if status == 0: [latent,coeff] = numpy.linalg.eig(covariance) # projection of the data in the new space if status == 0: score = numpy.dot(coeff.T,work2).T # construct new table data if status == 0: sap_flux = numpy.array([],'float32') sap_flux_err = numpy.array([],'float32') sap_bkg = numpy.array([],'float32') sap_bkg_err = numpy.array([],'float32') for i in range(len(time)): work1 = numpy.array([],'float64') work2 = numpy.array([],'float64') work3 = numpy.array([],'float64') work4 = numpy.array([],'float64') work5 = numpy.array([],'float64') for j in range(len(aperb)): if (aperb[j] == 3): work1 = numpy.append(work1,pixseries[i,j]) work2 = numpy.append(work2,errseries[i,j]) work3 = numpy.append(work3,bkgseries[i,j]) work4 = numpy.append(work4,berseries[i,j]) sap_flux = numpy.append(sap_flux,kepstat.sum(work1)) sap_flux_err = numpy.append(sap_flux_err,kepstat.sumerr(work2)) sap_bkg = numpy.append(sap_bkg,kepstat.sum(work3)) sap_bkg_err = numpy.append(sap_bkg_err,kepstat.sumerr(work4)) sap_mean = scipy.stats.stats.nanmean(sap_flux) # coadd principal components if status == 0: pca_flux = numpy.zeros((len(sap_flux))) for i in range(nmask): if (i + 1) in pcaout: pca_flux = pca_flux + score[:,i] pca_flux += sap_mean # construct output primary extension if status == 0: hdu0 = pyfits.PrimaryHDU() for i in range(len(cards0)): if cards0[i].key not in hdu0.header.ascardlist().keys(): hdu0.header.update(cards0[i].key, cards0[i].value, cards0[i].comment) else: hdu0.header.ascardlist()[cards0[i].key].comment = cards0[i].comment status = kepkey.history(call,hdu0,outfile,logfile,verbose) outstr = HDUList(hdu0) # construct output light curve extension if status == 0: col1 = Column(name='TIME',format='D',unit='BJD - 2454833',array=time) col2 = Column(name='TIMECORR',format='E',unit='d',array=timecorr) col3 = Column(name='CADENCENO',format='J',array=cadenceno) col4 = Column(name='SAP_FLUX',format='E',array=sap_flux) col5 = Column(name='SAP_FLUX_ERR',format='E',array=sap_flux_err) col6 = Column(name='SAP_BKG',format='E',array=sap_bkg) col7 = Column(name='SAP_BKG_ERR',format='E',array=sap_bkg_err) col8 = Column(name='PDCSAP_FLUX',format='E',array=pdc_flux) col9 = Column(name='PDCSAP_FLUX_ERR',format='E',array=pdc_flux_err) col10 = Column(name='SAP_QUALITY',format='J',array=quality) col11 = Column(name='PSF_CENTR1',format='E',unit='pixel',array=psf_centr1) col12 = Column(name='PSF_CENTR1_ERR',format='E',unit='pixel',array=psf_centr1_err) col13 = Column(name='PSF_CENTR2',format='E',unit='pixel',array=psf_centr2) col14 = Column(name='PSF_CENTR2_ERR',format='E',unit='pixel',array=psf_centr2_err) col15 = Column(name='MOM_CENTR1',format='E',unit='pixel',array=mom_centr1) col16 = Column(name='MOM_CENTR1_ERR',format='E',unit='pixel',array=mom_centr1_err) col17 = Column(name='MOM_CENTR2',format='E',unit='pixel',array=mom_centr2) col18 = Column(name='MOM_CENTR2_ERR',format='E',unit='pixel',array=mom_centr2_err) col19 = Column(name='POS_CORR1',format='E',unit='pixel',array=pos_corr1) col20 = Column(name='POS_CORR2',format='E',unit='pixel',array=pos_corr2) cols = ColDefs([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11, \ col12,col13,col14,col15,col16,col17,col18,col19,col20]) hdu1 = new_table(cols) hdu1.header.update('TTYPE1','TIME','column title: data time stamps') hdu1.header.update('TFORM1','D','data type: float64') hdu1.header.update('TUNIT1','BJD - 2454833','column units: barycenter corrected JD') hdu1.header.update('TDISP1','D12.7','column display format') hdu1.header.update('TTYPE2','TIMECORR','column title: barycentric-timeslice correction') hdu1.header.update('TFORM2','E','data type: float32') hdu1.header.update('TUNIT2','d','column units: days') hdu1.header.update('TTYPE3','CADENCENO','column title: unique cadence number') hdu1.header.update('TFORM3','J','column format: signed integer32') hdu1.header.update('TTYPE4','SAP_FLUX','column title: aperture photometry flux') hdu1.header.update('TFORM4','E','column format: float32') hdu1.header.update('TUNIT4','e-/s','column units: electrons per second') hdu1.header.update('TTYPE5','SAP_FLUX_ERR','column title: aperture phot. flux error') hdu1.header.update('TFORM5','E','column format: float32') hdu1.header.update('TUNIT5','e-/s','column units: electrons per second (1-sigma)') hdu1.header.update('TTYPE6','SAP_BKG','column title: aperture phot. background flux') hdu1.header.update('TFORM6','E','column format: float32') hdu1.header.update('TUNIT6','e-/s','column units: electrons per second') hdu1.header.update('TTYPE7','SAP_BKG_ERR','column title: ap. phot. background flux error') hdu1.header.update('TFORM7','E','column format: float32') hdu1.header.update('TUNIT7','e-/s','column units: electrons per second (1-sigma)') hdu1.header.update('TTYPE8','PDCSAP_FLUX','column title: PDC photometry flux') hdu1.header.update('TFORM8','E','column format: float32') hdu1.header.update('TUNIT8','e-/s','column units: electrons per second') hdu1.header.update('TTYPE9','PDCSAP_FLUX_ERR','column title: PDC flux error') hdu1.header.update('TFORM9','E','column format: float32') hdu1.header.update('TUNIT9','e-/s','column units: electrons per second (1-sigma)') hdu1.header.update('TTYPE10','SAP_QUALITY','column title: aperture photometry quality flag') hdu1.header.update('TFORM10','J','column format: signed integer32') hdu1.header.update('TTYPE11','PSF_CENTR1','column title: PSF fitted column centroid') hdu1.header.update('TFORM11','E','column format: float32') hdu1.header.update('TUNIT11','pixel','column units: pixel') hdu1.header.update('TTYPE12','PSF_CENTR1_ERR','column title: PSF fitted column error') hdu1.header.update('TFORM12','E','column format: float32') hdu1.header.update('TUNIT12','pixel','column units: pixel') hdu1.header.update('TTYPE13','PSF_CENTR2','column title: PSF fitted row centroid') hdu1.header.update('TFORM13','E','column format: float32') hdu1.header.update('TUNIT13','pixel','column units: pixel') hdu1.header.update('TTYPE14','PSF_CENTR2_ERR','column title: PSF fitted row error') hdu1.header.update('TFORM14','E','column format: float32') hdu1.header.update('TUNIT14','pixel','column units: pixel') hdu1.header.update('TTYPE15','MOM_CENTR1','column title: moment-derived column centroid') hdu1.header.update('TFORM15','E','column format: float32') hdu1.header.update('TUNIT15','pixel','column units: pixel') hdu1.header.update('TTYPE16','MOM_CENTR1_ERR','column title: moment-derived column error') hdu1.header.update('TFORM16','E','column format: float32') hdu1.header.update('TUNIT16','pixel','column units: pixel') hdu1.header.update('TTYPE17','MOM_CENTR2','column title: moment-derived row centroid') hdu1.header.update('TFORM17','E','column format: float32') hdu1.header.update('TUNIT17','pixel','column units: pixel') hdu1.header.update('TTYPE18','MOM_CENTR2_ERR','column title: moment-derived row error') hdu1.header.update('TFORM18','E','column format: float32') hdu1.header.update('TUNIT18','pixel','column units: pixel') hdu1.header.update('TTYPE19','POS_CORR1','column title: col correction for vel. abbern') hdu1.header.update('TFORM19','E','column format: float32') hdu1.header.update('TUNIT19','pixel','column units: pixel') hdu1.header.update('TTYPE20','POS_CORR2','column title: row correction for vel. abbern') hdu1.header.update('TFORM20','E','column format: float32') hdu1.header.update('TUNIT20','pixel','column units: pixel') hdu1.header.update('EXTNAME','LIGHTCURVE','name of extension') for i in range(len(cards1)): if (cards1[i].key not in hdu1.header.ascardlist().keys() and cards1[i].key[:4] not in ['TTYP','TFOR','TUNI','TDIS','TDIM','WCAX','1CTY', '2CTY','1CRP','2CRP','1CRV','2CRV','1CUN','2CUN', '1CDE','2CDE','1CTY','2CTY','1CDL','2CDL','11PC', '12PC','21PC','22PC']): hdu1.header.update(cards1[i].key, cards1[i].value, cards1[i].comment) outstr.append(hdu1) # construct output mask bitmap extension if status == 0: hdu2 = ImageHDU(maskmap) for i in range(len(cards2)): if cards2[i].key not in hdu2.header.ascardlist().keys(): hdu2.header.update(cards2[i].key, cards2[i].value, cards2[i].comment) else: hdu2.header.ascardlist()[cards2[i].key].comment = cards2[i].comment outstr.append(hdu2) # construct principal component table if status == 0: cols = [] for i in range(nmask): colname = 'PC' + str(i + 1) col = Column(name=colname,format='E',unit='e-/s',array=score[:,i]) cols.append(col) hdu3 = new_table(ColDefs(cols)) hdu3.header.update('EXTNAME','PRINCIPAL_COMPONENTS','name of extension') for i in range(nmask): hdu3.header.update('TTYPE' + str(i + 1),'PC' + str(i + 1),'column title: principal component number' + str(i + 1)) hdu3.header.update('TFORM' + str(i + 1),'E','column format: float32') hdu3.header.update('TUNIT' + str(i + 1),'e-/s','column units: electrons per sec') outstr.append(hdu3) # write output file if status == 0: outstr.writeto(outfile,checksum=True) # close input structure if status == 0: status = kepio.closefits(instr,logfile,verbose) # plotting defaults if status == 0: plotLatex = True try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: plotLatex = False if status == 0: pylab.figure(figsize=[xsize,ysize]) pylab.clf() # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) ptime = time + bjdref - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = copy(score) nrm = len(str(int(pout.max())))-1 pout = pout / 10**nrm ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() xr = xmax - xmin yr = ymax - ymin # plot window ax = pylab.axes([0.06,0.54,0.93,0.43]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90) pylab.setp(pylab.gca(),xticklabels=[]) # plot principal components for i in range(nmask): pylab.plot(ptime,pout[:,i],linestyle='-',linewidth=lwidth) if not plotLatex: ylab = '10**%d electrons/sec' % nrm ylabel(ylab, {'color' : 'k'}) grid() # plot ranges pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) # plot output data ax = pylab.axes([0.06,0.09,0.93,0.43]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, 'rotation', 90) # clean up y-axis units if status == 0: pout = copy(pca_flux) nrm = len(str(int(pout.max())))-1 pout = pout / 10**nrm ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits ymin = pout.min() ymax = pout.max() yr = ymax - ymin ptime = numpy.insert(ptime,[0],[ptime[0]]) ptime = numpy.append(ptime,[ptime[-1]]) pout = numpy.insert(pout,[0],[0.0]) pout = numpy.append(pout,0.0) # plot time coadded principal component series pylab.plot(ptime[1:-1],pout[1:-1],color=lcolor,linestyle='-',linewidth=lwidth) pylab.fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) pylab.xlabel(xlab, {'color' : 'k'}) pylab.ylabel(ylab, {'color' : 'k'}) pylab.grid() # plot ranges pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) else: pylab.ylim(1.0e-10,ymax+yr*0.01) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # stop time if status == 0: kepmsg.clock('KEPPCA ended at',logfile,verbose) return
def kepdeltapix(infile, nexp, columns, rows, fluxes, prfdir, interpolation, tolerance, fittype, imscale, colmap, verbose, logfile, status, cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPDELTAPIX -- ' call += 'infile=' + infile + ' ' call += 'nexp=' + str(nexp) + ' ' call += 'columns=' + columns + ' ' call += 'rows=' + rows + ' ' call += 'fluxes=' + fluxes + ' ' call += 'prfdir=' + prfdir + ' ' call += 'interpolation=' + interpolation + ' ' call += 'tolerance=' + str(tolerance) + ' ' call += 'fittype=' + str(fittype) + ' ' call += 'imscale=' + imscale + ' ' call += 'colmap=' + colmap + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # test log file logfile = kepmsg.test(logfile) # start time kepmsg.clock('KEPDELTAPIX started at', logfile, verbose) # reference color map if colmap == 'browse': status = cmap_plot(cmdLine) # open TPF FITS file if status == 0: try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) except: message = 'ERROR -- KEPDELTAPIX: is %s a Target Pixel File? ' % infile status = kepmsg.err(logfile, message, verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # print target data if status == 0 and verbose: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # determine suitable PRF calibration file if status == 0: if int(module) < 10: prefix = 'kplr0' else: prefix = 'kplr' prfglob = prfdir + '/' + prefix + str(module) + '.' + str( output) + '*' + '_prf.fits' try: prffile = glob.glob(prfglob)[0] except: message = 'ERROR -- KEPDELTAPIX: No PRF file found in ' + prfdir status = kepmsg.err(logfile, message, verbose) # read PRF images if status == 0: prfn = [0, 0, 0, 0, 0] crpix1p = numpy.zeros((5), dtype='float32') crpix2p = numpy.zeros((5), dtype='float32') crval1p = numpy.zeros((5), dtype='float32') crval2p = numpy.zeros((5), dtype='float32') cdelt1p = numpy.zeros((5), dtype='float32') cdelt2p = numpy.zeros((5), dtype='float32') for i in range(5): prfn[i], crpix1p[i], crpix2p[i], crval1p[i], crval2p[i], cdelt1p[i], cdelt2p[i], status \ = kepio.readPRFimage(prffile,i+1,logfile,verbose) # choose rows in the TPF table at random if status == 0: i = 0 rownum = [] while i < nexp: work = int(random.random() * len(barytime)) if numpy.isfinite(barytime[work]) and numpy.isfinite( fluxpixels[work, ydim * xdim / 2]): rownum.append(work) i += 1 # construct input pixel image if status == 0: fscat = numpy.empty((len(fluxes), nexp), dtype='float32') xscat = numpy.empty((len(columns), nexp), dtype='float32') yscat = numpy.empty((len(rows), nexp), dtype='float32') for irow in range(nexp): flux = fluxpixels[rownum[irow], :] # image scale and intensity limits of pixel data if status == 0: flux_pl, zminfl, zmaxfl = kepplot.intScale1D(flux, imscale) n = 0 imgflux_pl = empty((ydim, xdim)) for i in range(ydim): for j in range(xdim): imgflux_pl[i, j] = flux_pl[n] n += 1 # fit PRF model to pixel data if status == 0: start = time.time() f, y, x, prfMod, prfFit, prfRes = kepfit.fitMultiPRF( flux, ydim, xdim, column, row, prfn, crval1p, crval2p, cdelt1p, cdelt2p, interpolation, tolerance, fluxes, columns, rows, fittype, verbose, logfile) if verbose: print '\nConvergence time = %.1fs' % (time.time() - start) # best fit parameters if status == 0: for i in range(len(f)): fscat[i, irow] = f[i] xscat[i, irow] = x[i] yscat[i, irow] = y[i] # replace starting guess with previous fit parameters if status == 0: fluxes = copy(f) columns = copy(x) rows = copy(y) # mean and rms results if status == 0: fmean = [] fsig = [] xmean = [] xsig = [] ymean = [] ysig = [] for i in range(len(f)): fmean.append(numpy.mean(fscat[i, :])) xmean.append(numpy.mean(xscat[i, :])) ymean.append(numpy.mean(yscat[i, :])) fsig.append(numpy.std(fscat[i, :])) xsig.append(numpy.std(xscat[i, :])) ysig.append(numpy.std(yscat[i, :])) txt = 'Flux = %10.2f e-/s ' % fmean[-1] txt += 'X = %7.4f +/- %6.4f pix ' % (xmean[-1], xsig[i]) txt += 'Y = %7.4f +/- %6.4f pix' % (ymean[-1], ysig[i]) kepmsg.log(logfile, txt, True) # output results for kepprfphot if status == 0: txt1 = 'columns=0.0' txt2 = ' rows=0.0' for i in range(1, len(f)): txt1 += ',%.4f' % (xmean[i] - xmean[0]) txt2 += ',%.4f' % (ymean[i] - ymean[0]) kepmsg.log(logfile, '\nkepprfphot input fields:', True) kepmsg.log(logfile, txt1, True) kepmsg.log(logfile, txt2, True) # image scale and intensity limits for PRF model image if status == 0: imgprf_pl, zminpr, zmaxpr = kepplot.intScale2D(prfMod, imscale) # image scale and intensity limits for PRF fit image if status == 0: imgfit_pl, zminfi, zmaxfi = kepplot.intScale2D(prfFit, imscale) # image scale and intensity limits for data - fit residual if status == 0: imgres_pl, zminre, zmaxre = kepplot.intScale2D(prfRes, imscale) # plot style if status == 0: try: params = { 'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 10, 'ytick.labelsize': 10 } pylab.rcParams.update(params) except: pass pylab.figure(figsize=[10, 10]) pylab.clf() plotimage(imgflux_pl, zminfl, zmaxfl, 1, row, column, xdim, ydim, 0.06, 0.52, 'flux', colmap) plotimage(imgfit_pl, zminfl, zmaxfl, 3, row, column, xdim, ydim, 0.06, 0.06, 'fit', colmap) plotimage(imgres_pl, zminfl, zmaxfl, 4, row, column, xdim, ydim, 0.52, 0.06, 'residual', colmap) plotimage(imgprf_pl, zminpr, zmaxpr * 0.9, 2, row, column, xdim, ydim, 0.52, 0.52, 'model', colmap) for i in range(len(f)): pylab.plot(xscat[i, :], yscat[i, :], 'o', color='k') # Plot creep of target position over time, relative to the central source # barytime0 = float(int(barytime[0] / 100) * 100.0) # barytime -= barytime0 # xlab = 'BJD $-$ %d' % barytime0 # xmin = numpy.nanmin(barytime) # xmax = numpy.nanmax(barytime) # y1min = numpy.nanmin(data) # y1max = numpy.nanmax(data) # xr = xmax - xmin # yr = ymax - ymin # barytime = insert(barytime,[0],[barytime[0]]) # barytime = append(barytime,[barytime[-1]]) # data = insert(data,[0],[0.0]) # data = append(data,0.0) # # pylab.figure(2,figsize=[10,10]) # pylab.clf() # ax = pylab.subplot(211) # pylab.subplots_adjust(0.1,0.5,0.88,0.42) # pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # labels = ax.get_yticklabels() # setp(labels, 'rotation', 90, fontsize=ticksize) # for i in range(1,len(f)): # pylab.plot(rownum,xscat[i,:]-xscat[0,:],'o') # pylab.ylabel('$\Delta$Columns', {'color' : 'k'}) # ax = pylab.subplot(211) # pylab.subplots_adjust(0.1,0.1,0.88,0.42) # pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # labels = ax.get_yticklabels() # setp(labels, 'rotation', 90, fontsize=ticksize) # for i in range(1,len(f)): # pylab.plot(rownum,yscat[i,:]-yscat[0,:],'o') # pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) # if ymin-yr*0.01 <= 0.0 or fullrange: # pylab.ylim(1.0e-10,ymax+yr*0.01) # else: # pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) # pylab.ylabel('$\Delta$Rows', {'color' : 'k'}) # pylab.xlabel(xlab, {'color' : 'k'}) # render plot if status == 0: if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # stop time kepmsg.clock('\nKEPDELTAPIX ended at', logfile, verbose) return
def kepprf( infile, columns, rows, fluxes, rownum=0, border=0, background=0, focus=0, prfdir="../KeplerPRF", xtol=1.0e-6, ftol=1.0e-6, imscale="linear", cmap="YlOrBr", lcolor="k", acolor="b", logfile="kepcrowd.log", CrowdTPF=np.nan, srcinfo=None, **kwargs ): # log the call hashline = "----------------------------------------------------------------------------" kepmsg.log(logfile, hashline, True) call = "KEPPRF -- " call += "infile=" + infile + " " call += "rownum=" + str(rownum) + " " call += "columns=" + columns + " " call += "rows=" + rows + " " call += "fluxes=" + fluxes + " " call += "border=" + str(border) + " " bground = "n" if background: bground = "y" call += "background=" + bground + " " focs = "n" if focus: focs = "y" call += "focus=" + focs + " " call += "prfdir=" + prfdir + " " call += "xtol=" + str(xtol) + " " call += "ftol=" + str(xtol) + " " call += "logfile=" + logfile kepmsg.log(logfile, call + "\n", True) guess = [] try: f = fluxes.strip().split(",") x = columns.strip().split(",") y = rows.strip().split(",") for i in range(len(f)): f[i] = float(f[i]) except: f = fluxes x = columns y = rows nsrc = len(f) for i in range(nsrc): try: guess.append(float(f[i])) except: message = "ERROR -- KEPPRF: Fluxes must be floating point numbers" kepmsg.err(logfile, message, True) return None if len(x) != nsrc or len(y) != nsrc: message = "ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and " message += "fluxes must have the same number of sources" kepmsg.err(logfile, message, True) return None for i in range(nsrc): try: guess.append(float(x[i])) except: message = "ERROR -- KEPPRF: Columns must be floating point numbers" kepmsg.err(logfile, message, True) return None for i in range(nsrc): try: guess.append(float(y[i])) except: message = "ERROR -- KEPPRF: Rows must be floating point numbers" kepmsg.err(logfile, message, True) return None if background: if border == 0: guess.append(0.0) else: for i in range((border + 1) * 2): guess.append(0.0) if focus: guess.append(1.0) guess.append(1.0) guess.append(0.0) # open TPF FITS file try: kepid, channel, skygroup, module, output, quarter, season, ra, dec, column, row, kepmag, xdim, ydim, barytime, status = kepio.readTPF( infile, "TIME", logfile, True ) except: message = "ERROR -- KEPPRF: is %s a Target Pixel File? " % infile kepmsg.err(logfile, message, True) return None kepid, channel, skygroup, module, output, quarter, season, ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = kepio.readTPF( infile, "TIMECORR", logfile, True ) kepid, channel, skygroup, module, output, quarter, season, ra, dec, column, row, kepmag, xdim, ydim, cadno, status = kepio.readTPF( infile, "CADENCENO", logfile, True ) kepid, channel, skygroup, module, output, quarter, season, ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = kepio.readTPF( infile, "FLUX", logfile, True ) kepid, channel, skygroup, module, output, quarter, season, ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = kepio.readTPF( infile, "FLUX_ERR", logfile, True ) kepid, channel, skygroup, module, output, quarter, season, ra, dec, column, row, kepmag, xdim, ydim, qual, status = kepio.readTPF( infile, "QUALITY", logfile, True ) # read mask defintion data from TPF file maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition(infile, logfile, True) npix = np.size(np.nonzero(maskimg)[0]) print("") print(" KepID: %s" % kepid) print(" BJD: %.2f" % (barytime[rownum - 1] + 2454833.0)) print(" RA (J2000): %s" % ra) print("Dec (J2000): %s" % dec) print(" KepMag: %s" % kepmag) print(" SkyGroup: %2s" % skygroup) print(" Season: %2s" % str(season)) print(" Channel: %2s" % channel) print(" Module: %2s" % module) print(" Output: %1s" % output) print("") # is this a good row with finite timestamp and pixels? if not np.isfinite(barytime[rownum - 1]) or np.nansum(fluxpixels[rownum - 1, :]) == np.nan: message = "ERROR -- KEPFIELD: Row " + str(rownum) + " is a bad quality timestamp" status = kepmsg.err(logfile, message, True) # construct input pixel image flux = fluxpixels[rownum - 1, :] ferr = errpixels[rownum - 1, :] DATx = np.arange(column, column + xdim) DATy = np.arange(row, row + ydim) # image scale and intensity limits of pixel data n = 0 DATimg = np.empty((ydim, xdim)) ERRimg = np.empty((ydim, xdim)) for i in range(ydim): for j in range(xdim): DATimg[i, j] = flux[n] ERRimg[i, j] = ferr[n] n += 1 # determine suitable PRF calibration file if int(module) < 10: prefix = "kplr0" else: prefix = "kplr" prfglob = prfdir + "/" + prefix + str(module) + "." + str(output) + "*" + "_prf.fits" try: prffile = glob.glob(prfglob)[0] except: message = "ERROR -- KEPPRF: No PRF file found in " + prfdir kepmsg.err(logfile, message, True) return None # read PRF images prfn = [0, 0, 0, 0, 0] crpix1p = np.zeros((5), dtype="float32") crpix2p = np.zeros((5), dtype="float32") crval1p = np.zeros((5), dtype="float32") crval2p = np.zeros((5), dtype="float32") cdelt1p = np.zeros((5), dtype="float32") cdelt2p = np.zeros((5), dtype="float32") for i in range(5): prfn[i], crpix1p[i], crpix2p[i], crval1p[i], crval2p[i], cdelt1p[i], cdelt2p[i], status = kepio.readPRFimage( prffile, i + 1, logfile, True ) prfn = np.array(prfn) PRFx = np.arange(0.5, np.shape(prfn[0])[1] + 0.5) PRFy = np.arange(0.5, np.shape(prfn[0])[0] + 0.5) PRFx = (PRFx - np.size(PRFx) / 2) * cdelt1p[0] PRFy = (PRFy - np.size(PRFy) / 2) * cdelt2p[0] # interpolate the calibrated PRF shape to the target position prf = np.zeros(np.shape(prfn[0]), dtype="float32") prfWeight = np.zeros((5), dtype="float32") for i in range(5): prfWeight[i] = np.sqrt((column - crval1p[i]) ** 2 + (row - crval2p[i]) ** 2) if prfWeight[i] == 0.0: prfWeight[i] = 1.0e-6 prf = prf + prfn[i] / prfWeight[i] prf = prf / np.nansum(prf) / cdelt1p[0] / cdelt2p[0] # location of the data image centered on the PRF image (in PRF pixel units) prfDimY = int(ydim / cdelt1p[0]) prfDimX = int(xdim / cdelt2p[0]) PRFy0 = (np.shape(prf)[0] - prfDimY) / 2 PRFx0 = (np.shape(prf)[1] - prfDimX) / 2 # interpolation function over the PRF splineInterpolation = scipy.interpolate.RectBivariateSpline(PRFx, PRFy, prf) # construct mesh for background model if background: bx = np.arange(1.0, float(xdim + 1)) by = np.arange(1.0, float(ydim + 1)) xx, yy = np.meshgrid(np.linspace(bx.min(), bx.max(), xdim), np.linspace(by.min(), by.max(), ydim)) # fit PRF model to pixel data start = time.time() if focus and background: args = (DATx, DATy, DATimg, ERRimg, nsrc, border, xx, yy, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRFwithFocusAndBackground, guess, args=args, xtol=xtol, ftol=ftol, disp=False) elif focus and not background: args = (DATx, DATy, DATimg, ERRimg, nsrc, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRFwithFocus, guess, args=args, xtol=xtol, ftol=ftol, disp=False) elif background and not focus: args = (DATx, DATy, DATimg, ERRimg, nsrc, border, xx, yy, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRFwithBackground, guess, args=args, xtol=xtol, ftol=ftol, disp=False) else: args = (DATx, DATy, DATimg, ERRimg, nsrc, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRF, guess, args=args, xtol=xtol, ftol=ftol, disp=False) kepmsg.log(logfile, "Convergence time = %.2fs\n" % (time.time() - start), True) # pad the PRF data if the PRF array is smaller than the data array flux = [] OBJx = [] OBJy = [] PRFmod = np.zeros((prfDimY, prfDimX)) if PRFy0 < 0 or PRFx0 < 0.0: PRFmod = np.zeros((prfDimY, prfDimX)) superPRF = np.zeros((prfDimY + 1, prfDimX + 1)) superPRF[ np.abs(PRFy0) : np.abs(PRFy0) + np.shape(prf)[0], np.abs(PRFx0) : np.abs(PRFx0) + np.shape(prf)[1] ] = prf prf = superPRF * 1.0 PRFy0 = 0 PRFx0 = 0 # rotate the PRF model around its center if focus: angle = ans[-1] prf = rotate(prf, -angle, reshape=False, mode="nearest") # iterate through the sources in the best fit PSF model for i in range(nsrc): flux.append(ans[i]) OBJx.append(ans[nsrc + i]) OBJy.append(ans[nsrc * 2 + i]) # calculate best-fit model y = (OBJy[i] - np.mean(DATy)) / cdelt1p[0] x = (OBJx[i] - np.mean(DATx)) / cdelt2p[0] prfTmp = shift(prf, [y, x], order=3, mode="constant") prfTmp = prfTmp[PRFy0 : PRFy0 + prfDimY, PRFx0 : PRFx0 + prfDimX] PRFmod = PRFmod + prfTmp * flux[i] wx = 1.0 wy = 1.0 angle = 0 b = 0.0 # write out best fit parameters txt = "Flux = %10.2f e-/s " % flux[i] txt += "X = %9.4f pix " % OBJx[i] txt += "Y = %9.4f pix " % OBJy[i] kepmsg.log(logfile, txt, True) if background: bterms = border + 1 if bterms == 1: b = ans[nsrc * 3] else: bcoeff = np.array([ans[nsrc * 3 : nsrc * 3 + bterms], ans[nsrc * 3 + bterms : nsrc * 3 + bterms * 2]]) bkg = kepfunc.polyval2d(xx, yy, bcoeff) b = nanmean(bkg.reshape(bkg.size)) txt = "\n Mean background = %.2f e-/s" % b kepmsg.log(logfile, txt, True) if focus: wx = ans[-3] wy = ans[-2] angle = ans[-1] if not background: kepmsg.log(logfile, "", True) kepmsg.log(logfile, " X/Y focus factors = %.3f/%.3f" % (wx, wy), True) kepmsg.log(logfile, "PRF rotation angle = %.2f deg" % angle, True) # measure flux fraction and contamination # LUGER: This looks horribly bugged. ``PRFall`` is certainly NOT the sum of the all the sources. # Check out my comments in ``kepfunc.py``. PRFall = kepfunc.PRF2DET(flux, OBJx, OBJy, DATx, DATy, wx, wy, angle, splineInterpolation) PRFone = kepfunc.PRF2DET([flux[0]], [OBJx[0]], [OBJy[0]], DATx, DATy, wx, wy, angle, splineInterpolation) # LUGER: Add up contaminant fluxes PRFcont = np.zeros_like(PRFone) for ncont in range(1, len(flux)): PRFcont += kepfunc.PRF2DET( [flux[ncont]], [OBJx[ncont]], [OBJy[ncont]], DATx, DATy, wx, wy, angle, splineInterpolation ) PRFcont[np.where(PRFcont < 0)] = 0 FluxInMaskAll = np.nansum(PRFall) FluxInMaskOne = np.nansum(PRFone) FluxInAperAll = 0.0 FluxInAperOne = 0.0 FluxInAperAllTrue = 0.0 for i in range(1, ydim): for j in range(1, xdim): if kepstat.bitInBitmap(maskimg[i, j], 2): FluxInAperAll += PRFall[i, j] FluxInAperOne += PRFone[i, j] FluxInAperAllTrue += PRFone[i, j] + PRFcont[i, j] FluxFraction = FluxInAperOne / flux[0] try: Contamination = (FluxInAperAll - FluxInAperOne) / FluxInAperAll except: Contamination = 0.0 # LUGER: Pixel crowding metrics Crowding = PRFone / (PRFone + PRFcont) Crowding[np.where(Crowding < 0)] = np.nan # LUGER: Optimal aperture crowding metric CrowdAper = FluxInAperOne / FluxInAperAllTrue kepmsg.log(logfile, "\n Total flux in mask = %.2f e-/s" % FluxInMaskAll, True) kepmsg.log(logfile, " Target flux in mask = %.2f e-/s" % FluxInMaskOne, True) kepmsg.log(logfile, " Total flux in aperture = %.2f e-/s" % FluxInAperAll, True) kepmsg.log(logfile, " Target flux in aperture = %.2f e-/s" % FluxInAperOne, True) kepmsg.log(logfile, " Target flux fraction in aperture = %.2f%%" % (FluxFraction * 100.0), True) kepmsg.log(logfile, "Contamination fraction in aperture = %.2f%%" % (Contamination * 100.0), True) kepmsg.log(logfile, " Crowding metric in aperture = %.4f" % (CrowdAper), True) kepmsg.log(logfile, " Crowding metric from TPF = %.4f" % (CrowdTPF), True) # constuct model PRF in detector coordinates PRFfit = PRFall + 0.0 if background and bterms == 1: PRFfit = PRFall + b if background and bterms > 1: PRFfit = PRFall + bkg # calculate residual of DATA - FIT PRFres = DATimg - PRFfit FLUXres = np.nansum(PRFres) / npix # calculate the sum squared difference between data and model Pearson = np.abs(np.nansum(np.square(DATimg - PRFfit) / PRFfit)) Chi2 = np.nansum(np.square(DATimg - PRFfit) / np.square(ERRimg)) DegOfFreedom = npix - len(guess) - 1 try: kepmsg.log(logfile, "\n Residual flux = %.2f e-/s" % FLUXres, True) kepmsg.log(logfile, "Pearson's chi^2 test = %d for %d dof" % (Pearson, DegOfFreedom), True) except: pass kepmsg.log(logfile, " Chi^2 test = %d for %d dof" % (Chi2, DegOfFreedom), True) # image scale and intensity limits for plotting images imgdat_pl, zminfl, zmaxfl = kepplot.intScale2D(DATimg, imscale) imgprf_pl, zminpr, zmaxpr = kepplot.intScale2D(PRFmod, imscale) imgfit_pl, zminfi, zmaxfi = kepplot.intScale2D(PRFfit, imscale) imgres_pl, zminre, zmaxre = kepplot.intScale2D(PRFres, "linear") if imscale == "linear": zmaxpr *= 0.9 elif imscale == "logarithmic": zmaxpr = np.max(zmaxpr) zminpr = zmaxpr / 2 # plot pl.figure(figsize=[12, 10]) pl.clf() # data plotimage(imgdat_pl, zminfl, zmaxfl, 1, row, column, xdim, ydim, 0.07, 0.58, "observation", cmap, lcolor) pl.text( 0.05, 0.05, "CROWDSAP: %.4f" % CrowdTPF, horizontalalignment="left", verticalalignment="center", fontsize=18, fontweight=500, color=lcolor, transform=pl.gca().transAxes, ) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, acolor, "--", 0.5) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, acolor, "-", 3.0) # model plotimage(imgprf_pl, zminpr, zmaxpr, 2, row, column, xdim, ydim, 0.445, 0.58, "model", cmap, lcolor) pl.text( 0.05, 0.05, "Crowding: %.4f" % CrowdAper, horizontalalignment="left", verticalalignment="center", fontsize=18, fontweight=500, color=lcolor, transform=pl.gca().transAxes, ) for x, y in zip(OBJx, OBJy): pl.scatter(x, y, marker="x", color="w") kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, acolor, "--", 0.5) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, acolor, "-", 3.0) if srcinfo is not None: kepid, sx, sy, kepmag = srcinfo for i in range(len(sx) - 1, -1, -1): if kepid[i] != 0 and kepmag[i] != 0.0: size = max(np.array([80.0, 80.0 + (2.5 ** (18.0 - max(12.0, float(kepmag[i])))) * 250.0])) pl.scatter(sx[i], sy[i], s=size, facecolors="g", edgecolors="k", alpha=0.1) else: pl.scatter(sx[i], sy[i], s=80, facecolors="r", edgecolors="k", alpha=0.1) # binned model plotimage(imgfit_pl, zminfl, zmaxfl, 3, row, column, xdim, ydim, 0.07, 0.18, "fit", cmap, lcolor, crowd=Crowding) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, acolor, "--", 0.5) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, acolor, "-", 3.0) # residuals reslim = max(np.abs(zminre), np.abs(zmaxre)) plotimage(imgres_pl, -reslim, reslim, 4, row, column, xdim, ydim, 0.445, 0.18, "residual", "coolwarm", lcolor) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, acolor, "--", 0.5) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, acolor, "-", 3.0) # plot data color bar barwin = pl.axes([0.84, 0.18, 0.03, 0.8]) if imscale == "linear": brange = np.arange(zminfl, zmaxfl, (zmaxfl - zminfl) / 1000) elif imscale == "logarithmic": brange = np.arange(10.0 ** zminfl, 10.0 ** zmaxfl, (10.0 ** zmaxfl - 10.0 ** zminfl) / 1000) elif imscale == "squareroot": brange = np.arange(zminfl ** 2, zmaxfl ** 2, (zmaxfl ** 2 - zminfl ** 2) / 1000) if imscale == "linear": barimg = np.resize(brange, (1000, 1)) elif imscale == "logarithmic": barimg = np.log10(np.resize(brange, (1000, 1))) elif imscale == "squareroot": barimg = np.sqrt(np.resize(brange, (1000, 1))) try: nrm = len(str(int(np.nanmax(brange)))) - 1 except: nrm = 0 brange = brange / 10 ** nrm pl.imshow( barimg, aspect="auto", interpolation="nearest", origin="lower", vmin=np.nanmin(barimg), vmax=np.nanmax(barimg), extent=(0.0, 1.0, brange[0], brange[-1]), cmap=cmap, ) barwin.yaxis.tick_right() barwin.yaxis.set_label_position("right") barwin.yaxis.set_major_locator(MaxNLocator(7)) pl.gca().yaxis.set_major_formatter(pl.ScalarFormatter(useOffset=False)) pl.gca().set_autoscale_on(False) pl.setp(pl.gca(), xticklabels=[], xticks=[]) pl.ylabel("Flux (10$^%d$ e$^-$ s$^{-1}$)" % nrm) pl.setp(barwin.get_yticklabels(), "rotation", 90) barwin.yaxis.set_major_formatter(FormatStrFormatter("%.1f")) # plot residual color bar barwin = pl.axes([0.07, 0.08, 0.75, 0.03]) brange = np.arange(-reslim, reslim, reslim / 500) barimg = np.resize(brange, (1, 1000)) pl.imshow( barimg, aspect="auto", interpolation="nearest", origin="lower", vmin=np.nanmin(barimg), vmax=np.nanmax(barimg), extent=(brange[0], brange[-1], 0.0, 1.0), cmap="coolwarm", ) barwin.xaxis.set_major_locator(MaxNLocator(7)) pl.gca().xaxis.set_major_formatter(pl.ScalarFormatter(useOffset=False)) pl.gca().set_autoscale_on(False) pl.setp(pl.gca(), yticklabels=[], yticks=[]) pl.xlabel("Residuals (e$^-$ s$^{-1}$)") barwin.xaxis.set_major_formatter(FormatStrFormatter("%.1f")) # render plot pl.show(block=True) pl.close() # stop time kepmsg.clock("\nKEPPRF ended at", logfile, True) return Crowding
def kepprf(infile, plotfile, rownum, columns, rows, fluxes, border, background, focus, prfdir, xtol, ftol, imscale, colmap, labcol, apercol, plt, verbose, logfile, status, cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPPRF -- ' call += 'infile=' + infile + ' ' call += 'plotfile=' + plotfile + ' ' call += 'rownum=' + str(rownum) + ' ' call += 'columns=' + columns + ' ' call += 'rows=' + rows + ' ' call += 'fluxes=' + fluxes + ' ' call += 'border=' + str(border) + ' ' bground = 'n' if (background): bground = 'y' call += 'background=' + bground + ' ' focs = 'n' if (focus): focs = 'y' call += 'focus=' + focs + ' ' call += 'prfdir=' + prfdir + ' ' call += 'xtol=' + str(xtol) + ' ' call += 'ftol=' + str(xtol) + ' ' call += 'imscale=' + imscale + ' ' call += 'colmap=' + colmap + ' ' call += 'labcol=' + labcol + ' ' call += 'apercol=' + apercol + ' ' plotit = 'n' if (plt): plotit = 'y' call += 'plot=' + plotit + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # test log file logfile = kepmsg.test(logfile) # start time kepmsg.clock('KEPPRF started at', logfile, verbose) # reference color map if colmap == 'browse': status = cmap_plot(cmdLine) # construct inital guess vector for fit if status == 0: guess = [] try: f = fluxes.strip().split(',') x = columns.strip().split(',') y = rows.strip().split(',') for i in range(len(f)): f[i] = float(f[i]) except: f = fluxes x = columns y = rows nsrc = len(f) for i in range(nsrc): try: guess.append(float(f[i])) except: message = 'ERROR -- KEPPRF: Fluxes must be floating point numbers' status = kepmsg.err(logfile, message, verbose) if status == 0: if len(x) != nsrc or len(y) != nsrc: message = 'ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and ' message += 'fluxes must have the same number of sources' status = kepmsg.err(logfile, message, verbose) if status == 0: for i in range(nsrc): try: guess.append(float(x[i])) except: message = 'ERROR -- KEPPRF: Columns must be floating point numbers' status = kepmsg.err(logfile, message, verbose) if status == 0: for i in range(nsrc): try: guess.append(float(y[i])) except: message = 'ERROR -- KEPPRF: Rows must be floating point numbers' status = kepmsg.err(logfile, message, verbose) if status == 0 and background: if border == 0: guess.append(0.0) else: for i in range((border + 1) * 2): guess.append(0.0) if status == 0 and focus: guess.append(1.0) guess.append(1.0) guess.append(0.0) # open TPF FITS file if status == 0: try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) except: message = 'ERROR -- KEPPRF: is %s a Target Pixel File? ' % infile status = kepmsg.err(logfile, message, verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition( infile, logfile, verbose) npix = numpy.size(numpy.nonzero(maskimg)[0]) # print target data if status == 0 and verbose: print('') print(' KepID: %s' % kepid) print(' BJD: %.2f' % (barytime[rownum - 1] + 2454833.0)) print(' RA (J2000): %s' % ra) print('Dec (J2000): %s' % dec) print(' KepMag: %s' % kepmag) print(' SkyGroup: %2s' % skygroup) print(' Season: %2s' % str(season)) print(' Channel: %2s' % channel) print(' Module: %2s' % module) print(' Output: %1s' % output) print('') # is this a good row with finite timestamp and pixels? if status == 0: if not numpy.isfinite(barytime[rownum - 1]) or numpy.nansum( fluxpixels[rownum - 1, :]) == numpy.nan: message = 'ERROR -- KEPFIELD: Row ' + str( rownum) + ' is a bad quality timestamp' status = kepmsg.err(logfile, message, verbose) # construct input pixel image if status == 0: flux = fluxpixels[rownum - 1, :] ferr = errpixels[rownum - 1, :] DATx = arange(column, column + xdim) DATy = arange(row, row + ydim) # if numpy.nanmin > 420000.0: flux -= 420000.0 # image scale and intensity limits of pixel data if status == 0: n = 0 DATimg = empty((ydim, xdim)) ERRimg = empty((ydim, xdim)) for i in range(ydim): for j in range(xdim): DATimg[i, j] = flux[n] ERRimg[i, j] = ferr[n] n += 1 # determine suitable PRF calibration file if status == 0: if int(module) < 10: prefix = 'kplr0' else: prefix = 'kplr' prfglob = prfdir + '/' + prefix + str(module) + '.' + str( output) + '*' + '_prf.fits' try: prffile = glob.glob(prfglob)[0] except: message = 'ERROR -- KEPPRF: No PRF file found in ' + prfdir status = kepmsg.err(logfile, message, verbose) # read PRF images if status == 0: prfn = [0, 0, 0, 0, 0] crpix1p = numpy.zeros((5), dtype='float32') crpix2p = numpy.zeros((5), dtype='float32') crval1p = numpy.zeros((5), dtype='float32') crval2p = numpy.zeros((5), dtype='float32') cdelt1p = numpy.zeros((5), dtype='float32') cdelt2p = numpy.zeros((5), dtype='float32') for i in range(5): prfn[i], crpix1p[i], crpix2p[i], crval1p[i], crval2p[i], cdelt1p[i], cdelt2p[i], status \ = kepio.readPRFimage(prffile,i+1,logfile,verbose) prfn = array(prfn) PRFx = arange(0.5, shape(prfn[0])[1] + 0.5) PRFy = arange(0.5, shape(prfn[0])[0] + 0.5) PRFx = (PRFx - size(PRFx) / 2) * cdelt1p[0] PRFy = (PRFy - size(PRFy) / 2) * cdelt2p[0] # interpolate the calibrated PRF shape to the target position if status == 0: prf = zeros(shape(prfn[0]), dtype='float32') prfWeight = zeros((5), dtype='float32') for i in range(5): prfWeight[i] = sqrt((column - crval1p[i])**2 + (row - crval2p[i])**2) if prfWeight[i] == 0.0: prfWeight[i] = 1.0e-6 prf = prf + prfn[i] / prfWeight[i] prf = prf / nansum(prf) / cdelt1p[0] / cdelt2p[0] # interpolate the calibrated PRF shape to the target position # if status == 0: # prf = zeros(shape(prfn[0,:,:]),dtype='float32') # px = crval1p + len(PRFx) / 2 * cdelt1p[0] # py = crval2p + len(PRFy) / 2 * cdelt2p[0] # pp = [[px[0],py[0]], # [px[1],py[1]], # [px[2],py[2]], # [px[3],py[3]], # [px[4],py[4]]] # for index,value in ndenumerate(prf): # pz = prfn[:,index[0],index[1]] # prf[index] = griddata(pp, pz, ([column], [row]), method='linear') # print shape(prf) # location of the data image centered on the PRF image (in PRF pixel units) if status == 0: prfDimY = int(ydim / cdelt1p[0]) prfDimX = int(xdim / cdelt2p[0]) PRFy0 = (shape(prf)[0] - prfDimY) / 2 PRFx0 = (shape(prf)[1] - prfDimX) / 2 # interpolation function over the PRF if status == 0: splineInterpolation = scipy.interpolate.RectBivariateSpline( PRFx, PRFy, prf) # construct mesh for background model if status == 0 and background: bx = numpy.arange(1., float(xdim + 1)) by = numpy.arange(1., float(ydim + 1)) xx, yy = numpy.meshgrid(numpy.linspace(bx.min(), bx.max(), xdim), numpy.linspace(by.min(), by.max(), ydim)) # fit PRF model to pixel data if status == 0: start = time.time() if focus and background: args = (DATx, DATy, DATimg, ERRimg, nsrc, border, xx, yy, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRFwithFocusAndBackground, guess, args=args, xtol=xtol, ftol=ftol, disp=False) elif focus and not background: args = (DATx, DATy, DATimg, ERRimg, nsrc, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRFwithFocus, guess, args=args, xtol=xtol, ftol=ftol, disp=False) elif background and not focus: args = (DATx, DATy, DATimg, ERRimg, nsrc, border, xx, yy, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRFwithBackground, guess, args=args, xtol=xtol, ftol=ftol, disp=False) else: args = (DATx, DATy, DATimg, ERRimg, nsrc, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRF, guess, args=args, xtol=xtol, ftol=ftol, disp=False) print('Convergence time = %.2fs\n' % (time.time() - start)) # pad the PRF data if the PRF array is smaller than the data array if status == 0: flux = [] OBJx = [] OBJy = [] PRFmod = numpy.zeros((prfDimY, prfDimX)) if PRFy0 < 0 or PRFx0 < 0.0: PRFmod = numpy.zeros((prfDimY, prfDimX)) superPRF = zeros((prfDimY + 1, prfDimX + 1)) superPRF[abs(PRFy0):abs(PRFy0) + shape(prf)[0], abs(PRFx0):abs(PRFx0) + shape(prf)[1]] = prf prf = superPRF * 1.0 PRFy0 = 0 PRFx0 = 0 # rotate the PRF model around its center if focus: angle = ans[-1] prf = rotate(prf, -angle, reshape=False, mode='nearest') # iterate through the sources in the best fit PSF model for i in range(nsrc): flux.append(ans[i]) OBJx.append(ans[nsrc + i]) OBJy.append(ans[nsrc * 2 + i]) # calculate best-fit model y = (OBJy[i] - mean(DATy)) / cdelt1p[0] x = (OBJx[i] - mean(DATx)) / cdelt2p[0] prfTmp = shift(prf, [y, x], order=3, mode='constant') prfTmp = prfTmp[PRFy0:PRFy0 + prfDimY, PRFx0:PRFx0 + prfDimX] PRFmod = PRFmod + prfTmp * flux[i] wx = 1.0 wy = 1.0 angle = 0 b = 0.0 # write out best fit parameters if verbose: txt = 'Flux = %10.2f e-/s ' % flux[i] txt += 'X = %9.4f pix ' % OBJx[i] txt += 'Y = %9.4f pix ' % OBJy[i] kepmsg.log(logfile, txt, True) # # params = {'backend': 'png', # 'axes.linewidth': 2.5, # 'axes.labelsize': 24, # 'axes.font': 'sans-serif', # 'axes.fontweight' : 'bold', # 'text.fontsize': 12, # 'legend.fontsize': 12, # 'xtick.labelsize': 24, # 'ytick.labelsize': 24} # pylab.rcParams.update(params) # # pylab.figure(figsize=[20,10]) # ax = pylab.axes([0.05,0.08,0.46,0.9]) # xxx = numpy.arange(397.5,402.5,0.02) # yyy = numpy.sum(PRFmod,axis=0) / numpy.max(numpy.sum(PRFmod,axis=0)) # pylab.plot(xxx,yyy,color='b',linewidth=3.0) # xxx = numpy.append(numpy.insert(xxx,[0],[xxx[0]]),xxx[-1]) # yyy = numpy.append(numpy.insert(yyy,[0],[0.0]),yyy[-1]) # pylab.fill(xxx,yyy,fc='y',linewidth=0.0,alpha=0.3) # pylab.xlabel('Pixel Column Number') # pylab.xlim(397.5,402.5) # pylab.ylim(1.0e-30,1.02) # for xmaj in numpy.arange(397.5,402.5,1.0): # pylab.plot([xmaj,xmaj],[0.0,1.1],color='k',linewidth=0.5,linestyle=':') # for xmaj in numpy.arange(0.2,1.2,0.2): # pylab.plot([0.0,2000.0],[xmaj,xmaj],color='k',linewidth=0.5,linestyle=':') # # # ax = pylab.axes([0.51,0.08,0.46,0.9]) # xxx = numpy.arange(32.5,37.5,0.02) # yyy = numpy.sum(PRFmod,axis=1) / numpy.max(numpy.sum(PRFmod,axis=1)) # pylab.plot(xxx,yyy,color='b',linewidth=3.0) # xxx = numpy.append(numpy.insert(xxx,[0],[xxx[0]]),xxx[-1]) # yyy = numpy.append(numpy.insert(yyy,[0],[0.0]),yyy[-1]) # pylab.fill(xxx,yyy,fc='y',linewidth=0.0,alpha=0.3) # pylab.setp(pylab.gca(),yticklabels=[]) # pylab.xlabel('Pixel Row Number') # pylab.xlim(32.5,37.5) # pylab.ylim(1.0e-30,1.02) # for xmaj in numpy.arange(32.5,37.5,1.0): # pylab.plot([xmaj,xmaj],[0.0,1.1],color='k',linewidth=0.5,linestyle=':') # for xmaj in numpy.arange(0.2,1.2,0.2): # pylab.plot([0.0,2000.0],[xmaj,xmaj],color='k',linewidth=0.5,linestyle=':') # pylab.ion() # pylab.plot([]) # pylab.ioff() if verbose and background: bterms = border + 1 if bterms == 1: b = ans[nsrc * 3] else: bcoeff = array([ ans[nsrc * 3:nsrc * 3 + bterms], ans[nsrc * 3 + bterms:nsrc * 3 + bterms * 2] ]) bkg = kepfunc.polyval2d(xx, yy, bcoeff) b = nanmean(bkg.reshape(bkg.size)) txt = '\n Mean background = %.2f e-/s' % b kepmsg.log(logfile, txt, True) if focus: wx = ans[-3] wy = ans[-2] angle = ans[-1] if verbose and focus: if not background: kepmsg.log(logfile, '', True) kepmsg.log(logfile, ' X/Y focus factors = %.3f/%.3f' % (wx, wy), True) kepmsg.log(logfile, 'PRF rotation angle = %.2f deg' % angle, True) # measure flux fraction and contamination # LUGER: This looks horribly bugged. ``PRFall`` is certainly NOT the sum of the all the sources. if status == 0: PRFall = kepfunc.PRF2DET(flux, OBJx, OBJy, DATx, DATy, wx, wy, angle, splineInterpolation) PRFone = kepfunc.PRF2DET([flux[0]], [OBJx[0]], [OBJy[0]], DATx, DATy, wx, wy, angle, splineInterpolation) # LUGER: Add up contaminant fluxes PRFcont = np.zeros_like(PRFone) for ncont in range(1, len(flux)): PRFcont += kepfunc.PRF2DET([flux[ncont]], [OBJx[ncont]], [OBJy[ncont]], DATx, DATy, wx, wy, angle, splineInterpolation) PRFcont[np.where(PRFcont < 0)] = 0 FluxInMaskAll = numpy.nansum(PRFall) FluxInMaskOne = numpy.nansum(PRFone) FluxInAperAll = 0.0 FluxInAperOne = 0.0 FluxInAperAllTrue = 0.0 for i in range(1, ydim): for j in range(1, xdim): if kepstat.bitInBitmap(maskimg[i, j], 2): FluxInAperAll += PRFall[i, j] FluxInAperOne += PRFone[i, j] FluxInAperAllTrue += PRFone[i, j] + PRFcont[i, j] FluxFraction = FluxInAperOne / flux[0] try: Contamination = (FluxInAperAll - FluxInAperOne) / FluxInAperAll except: Contamination = 0.0 # LUGER: Pixel crowding metrics Crowding = PRFone / (PRFone + PRFcont) # LUGER: Optimal aperture crowding metric CrowdAper = FluxInAperOne / FluxInAperAllTrue kepmsg.log( logfile, '\n Total flux in mask = %.2f e-/s' % FluxInMaskAll, True) kepmsg.log( logfile, ' Target flux in mask = %.2f e-/s' % FluxInMaskOne, True) kepmsg.log( logfile, ' Total flux in aperture = %.2f e-/s' % FluxInAperAll, True) kepmsg.log( logfile, ' Target flux in aperture = %.2f e-/s' % FluxInAperOne, True) kepmsg.log( logfile, ' Target flux fraction in aperture = %.2f%%' % (FluxFraction * 100.0), True) kepmsg.log( logfile, 'Contamination fraction in aperture = %.2f%%' % (Contamination * 100.0), True) kepmsg.log(logfile, ' Crowding metric in aperture = %.4f' % (CrowdAper), True) # constuct model PRF in detector coordinates if status == 0: PRFfit = PRFall + 0.0 if background and bterms == 1: PRFfit = PRFall + b if background and bterms > 1: PRFfit = PRFall + bkg # calculate residual of DATA - FIT if status == 0: PRFres = DATimg - PRFfit FLUXres = numpy.nansum(PRFres) / npix # calculate the sum squared difference between data and model if status == 0: Pearson = abs(numpy.nansum(numpy.square(DATimg - PRFfit) / PRFfit)) Chi2 = numpy.nansum( numpy.square(DATimg - PRFfit) / numpy.square(ERRimg)) DegOfFreedom = npix - len(guess) - 1 try: kepmsg.log(logfile, '\n Residual flux = %.2f e-/s' % FLUXres, True) kepmsg.log( logfile, 'Pearson\'s chi^2 test = %d for %d dof' % (Pearson, DegOfFreedom), True) except: pass kepmsg.log( logfile, ' Chi^2 test = %d for %d dof' % (Chi2, DegOfFreedom), True) # image scale and intensity limits for plotting images if status == 0: imgdat_pl, zminfl, zmaxfl = kepplot.intScale2D(DATimg, imscale) imgprf_pl, zminpr, zmaxpr = kepplot.intScale2D(PRFmod, imscale) imgfit_pl, zminfi, zmaxfi = kepplot.intScale2D(PRFfit, imscale) imgres_pl, zminre, zmaxre = kepplot.intScale2D(PRFres, 'linear') if imscale == 'linear': zmaxpr *= 0.9 elif imscale == 'logarithmic': zmaxpr = numpy.max(zmaxpr) zminpr = zmaxpr / 2 # plot style if status == 0: pylab.figure(figsize=[12, 10]) pylab.clf() plotimage(imgdat_pl, zminfl, zmaxfl, 1, row, column, xdim, ydim, 0.07, 0.53, 'observation', colmap, labcol) # pylab.text(830.0,242.1,'A',horizontalalignment='center',verticalalignment='center', # fontsize=28,fontweight=500,color='white') # pylab.text(831.1,240.62,'B',horizontalalignment='center',verticalalignment='center', # fontsize=28,fontweight=500,color='white') # plotimage(imgprf_pl,0.0,zmaxpr/0.5,2,row,column,xdim,ydim,0.52,0.52,'model',colmap) plotimage(imgprf_pl, zminpr, zmaxpr, 2, row, column, xdim, ydim, 0.44, 0.53, 'model', colmap, labcol) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, apercol, '--', 0.5) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, apercol, '-', 3.0) plotimage(imgfit_pl, zminfl, zmaxfl, 3, row, column, xdim, ydim, 0.07, 0.08, 'fit', colmap, labcol, crowd=Crowding) # plotimage(imgres_pl,-zmaxre,zmaxre,4,row,column,xdim,ydim,0.44,0.08,'residual',colmap,'k') plotimage(imgres_pl, zminfl, zmaxfl, 4, row, column, xdim, ydim, 0.44, 0.08, 'residual', colmap, labcol) # plot data color bar # barwin = pylab.axes([0.84,0.53,0.06,0.45]) barwin = pylab.axes([0.84, 0.08, 0.06, 0.9]) if imscale == 'linear': brange = numpy.arange(zminfl, zmaxfl, (zmaxfl - zminfl) / 1000) elif imscale == 'logarithmic': brange = numpy.arange(10.0**zminfl, 10.0**zmaxfl, (10.0**zmaxfl - 10.0**zminfl) / 1000) elif imscale == 'squareroot': brange = numpy.arange(zminfl**2, zmaxfl**2, (zmaxfl**2 - zminfl**2) / 1000) if imscale == 'linear': barimg = numpy.resize(brange, (1000, 1)) elif imscale == 'logarithmic': barimg = numpy.log10(numpy.resize(brange, (1000, 1))) elif imscale == 'squareroot': barimg = numpy.sqrt(numpy.resize(brange, (1000, 1))) try: nrm = len(str(int(numpy.nanmax(brange)))) - 1 except: nrm = 0 brange = brange / 10**nrm pylab.imshow(barimg, aspect='auto', interpolation='nearest', origin='lower', vmin=numpy.nanmin(barimg), vmax=numpy.nanmax(barimg), extent=(0.0, 1.0, brange[0], brange[-1]), cmap=colmap) barwin.yaxis.tick_right() barwin.yaxis.set_label_position('right') barwin.yaxis.set_major_locator(MaxNLocator(7)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().set_autoscale_on(False) pylab.setp(pylab.gca(), xticklabels=[], xticks=[]) pylab.ylabel('Flux (10$^%d$ e$^-$ s$^{-1}$)' % nrm) setp(barwin.get_yticklabels(), 'rotation', 90) barwin.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f')) # plot residual color bar # barwin = pylab.axes([0.84,0.08,0.06,0.45]) # Brange = numpy.arange(-zmaxre,zmaxre,(zmaxre+zmaxre)/1000) # try: # nrm = len(str(int(numpy.nanmax(brange))))-1 # except: # nrm = 0 # brange = brange / 10**nrm # barimg = numpy.resize(brange,(1000,1)) # pylab.imshow(barimg,aspect='auto',interpolation='nearest',origin='lower', # vmin=brange[0],vmax=brange[-1],extent=(0.0,1.0,brange[0],brange[-1]),cmap=colmap) # barwin.yaxis.tick_right() # barwin.yaxis.set_label_position('right') # barwin.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f')) # barwin.yaxis.set_major_locator(MaxNLocator(7)) # pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # pylab.gca().set_autoscale_on(False) # pylab.setp(pylab.gca(),xticklabels=[],xticks=[]) # pylab.ylabel('Residual (10$^%d$ e$^-$ s$^{-1}$)' % nrm) # setp(barwin.get_yticklabels(), 'rotation', 90) # render plot if status == 0 and len(plotfile) > 0 and plotfile.lower() != 'none': pylab.savefig(plotfile) if status == 0 and plt: if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # stop time kepmsg.clock('\nKEPPRF ended at', logfile, verbose) return
def kepmask(infile,mfile,pfile,tabrow,imin,imax,iscale,cmap,verbose,logfile,status,cLine=False): global pimg, zscale, zmin, zmax, xmin, xmax, ymin, ymax, quarter global pxdim, pydim, kepmag, skygroup, season, channel global module, output, row, column, maskfile, plotfile global pkepid, pkepmag, pra, pdec, colmap, cmdLine # input arguments status = 0 numpy.seterr(all="ignore") zmin = imin; zmax = imax; zscale = iscale; colmap = cmap maskfile = mfile; plotfile = pfile cmdLine = cLine # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPMASK -- ' call += 'infile='+infile+' ' call += 'maskfile='+mfile+' ' call += 'plotfile='+pfile+' ' call += 'tabrow='+str(tabrow)+' ' call += 'imin='+str(imin)+' ' call += 'imax='+str(imax)+' ' call += 'iscale='+str(iscale)+' ' call += 'cmap='+str(cmap)+' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPMASK started at',logfile,verbose) # reference color map if cmap == 'browse': status = cmap_plot() # open TPF FITS file and check tabrow exists if status == 0: tpf, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: try: naxis2 = tpf['TARGETTABLES'].header['NAXIS2'] except: txt = 'ERROR -- KEPMASK: No NAXIS2 keyword in ' + infile + '[TARGETTABLES]' status = kepmsg.err(logfile,txt,True) if status == 0 and tabrow > naxis2: txt = 'ERROR -- KEPMASK: tabrow is too large. There are ' + str(naxis2) + ' rows in the table.' status = kepmsg.err(logfile,txt,True) if status == 0: status = kepio.closefits(tpf,logfile,verbose) # read TPF data pixel image if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, pixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) img = pixels[tabrow] pkepid = copy(kepid) pra = copy(ra) pdec = copy(dec) pkepmag = copy(kepmag) pxdim = copy(xdim) pydim = copy(ydim) pimg = copy(img) # print target data if status == 0: print('') print(' KepID: %s' % kepid) print(' RA (J2000): %s' % ra) print('Dec (J2000): %s' % dec) print(' KepMag: %s' % kepmag) print(' SkyGroup: %2s' % skygroup) print(' Season: %2s' % str(season)) print(' Channel: %2s' % channel) print(' Module: %2s' % module) print(' Output: %1s' % output) print('') # subimage of channel for plot if status == 0: ymin = copy(row) ymax = ymin + ydim xmin = copy(column) xmax = xmin + xdim # intensity scale if status == 0: pimg, imin, imax = kepplot.intScale1D(pimg,zscale) if zmin and zmax and 'log' in zscale: zmin = log10(zmin) zmax = log10(zmax) elif zmin and zmax and 'sq' in zscale: zmin = sqrt(zmin) zmax = sqrt(zmax) elif zmin and zmax and 'li' in zscale: zmin *= 1.0 zmax *= 1.0 else: zmin = copy(imin) zmax = copy(imax) # nstat = 2; pixels = [] # work = array(sort(img),dtype=float32) # for i in range(len(work)): # if 'nan' not in str(work[i]): # pixels.append(work[i]) # pixels = array(pixels,dtype=float32) # if int(float(len(pixels)) / 10 + 0.5) > nstat: # nstat = int(float(len(pixels)) / 10 + 0.5) # if not zmin: # zmin = median(pixels[:nstat]) # if not zmax: # zmax = median(pixels[-nstat:]) # if 'log' in zscale: # pimg = log10(pimg) # if 'sq' in zscale: # pimg = sqrt(pimg) # plot limits ymin = float(ymin) - 0.5 ymax = float(ymax) - 0.5 xmin = float(xmin) - 0.5 xmax = float(xmax) - 0.5 # plot style try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 14, 'ytick.labelsize': 14} pylab.rcParams.update(params) except: pass if status == 0: pylab.figure(figsize=[10,7]) plotimage(cmdLine) return
def keppca(infile, maskfile, outfile, components, plotpca, nreps, clobber, verbose, logfile, status, cmdLine=False): try: import mdp except: msg = 'ERROR -- KEPPCA: this task has an external python dependency to MDP, a Modular toolkit for Data Processing (http://mdp-toolkit.sourceforge.net). In order to take advantage of this PCA task, the user must first install MDP with their current python distribution. Note carefully that you may have more than python installation on your machine, and ensure that MDP is installed with the same version of python that the PyKE tools employ. Installation instructions for MDP can be found at the URL provided above.' status = kepmsg.err(None, msg, True) # startup parameters status = 0 labelsize = 32 ticksize = 18 xsize = 16 ysize = 10 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 seterr(all="ignore") # log the call if status == 0: hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPPCA -- ' call += 'infile=' + infile + ' ' call += 'maskfile=' + maskfile + ' ' call += 'outfile=' + outfile + ' ' call += 'components=' + components + ' ' ppca = 'n' if (plotpca): ppca = 'y' call += 'plotpca=' + ppca + ' ' call += 'nmaps=' + str(nreps) + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time if status == 0: kepmsg.clock('KEPPCA started at', logfile, verbose) # test log file if status == 0: logfile = kepmsg.test(logfile) # clobber output file if status == 0: if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPPCA: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile, message, verbose) # Set output file names - text file with data and plot if status == 0: dataout = copy(outfile) repname = re.sub('.fits', '.png', outfile) # open input file if status == 0: instr = pyfits.open(infile, mode='readonly', memmap=True) tstart, tstop, bjdref, cadence, status = kepio.timekeys( instr, infile, logfile, verbose, status) # open TPF FITS file if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux_bkg, status = \ kepio.readTPF(infile,'FLUX_BKG',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux_bkg_err, status = \ kepio.readTPF(infile,'FLUX_BKG_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, pcorr1, status = \ kepio.readTPF(infile,'POS_CORR1',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, pcorr2, status = \ kepio.readTPF(infile,'POS_CORR2',logfile,verbose) # Save original data dimensions, in case of using maskfile if status == 0: xdimorig = xdim ydimorig = ydim # read mask definition file if it has been supplied if status == 0 and 'aper' not in maskfile.lower( ) and maskfile.lower() != 'all': maskx = array([], 'int') masky = array([], 'int') lines, status = kepio.openascii(maskfile, 'r', logfile, verbose) for line in lines: line = line.strip().split('|') if len(line) == 6: y0 = int(line[3]) x0 = int(line[4]) line = line[5].split(';') for items in line: try: masky = numpy.append(masky, y0 + int(items.split(',')[0])) maskx = numpy.append(maskx, x0 + int(items.split(',')[1])) except: continue status = kepio.closeascii(lines, logfile, verbose) if len(maskx) == 0 or len(masky) == 0: message = 'ERROR -- KEPPCA: ' + maskfile + ' contains no pixels.' status = kepmsg.err(logfile, message, verbose) xdim = max(maskx) - min(maskx) + 1 # Find largest x dimension of mask ydim = max(masky) - min(masky) + 1 # Find largest y dimension of mask # pad mask to ensure it is rectangular workx = array([], 'int') worky = array([], 'int') for ip in arange(min(maskx), max(maskx) + 1): for jp in arange(min(masky), max(masky) + 1): workx = append(workx, ip) worky = append(worky, jp) maskx = workx masky = worky # define new subimage bitmap... if status == 0 and maskfile.lower() != 'all': aperx = numpy.array([], 'int') apery = numpy.array([], 'int') aperb = maskx - x0 + xdimorig * ( masky - y0 ) # aperb is an array that contains the pixel numbers in the mask npix = len(aperb) # ...or use all pixels if status == 0 and maskfile.lower() == 'all': npix = xdimorig * ydimorig aperb = array([], 'int') aperb = numpy.r_[0:npix] # legal mask defined? if status == 0: if len(aperb) == 0: message = 'ERROR -- KEPPCA: no legal pixels within the subimage are defined.' status = kepmsg.err(logfile, message, verbose) # Identify principal components desired if status == 0: pcaout = [] txt = components.strip().split(',') for work1 in txt: try: pcaout.append(int(work1.strip())) except: work2 = work1.strip().split('-') try: for work3 in range(int(work2[0]), int(work2[1]) + 1): pcaout.append(work3) except: message = 'ERROR -- KEPPCA: cannot understand principal component list requested' status = kepmsg.err(logfile, message, verbose) if status == 0: pcaout = set(sort(pcaout)) pcarem = array( list(pcaout)) - 1 # The list of pca component numbers to be removed # Initialize arrays and variables, and apply pixel mask to the data if status == 0: ntim = 0 time = numpy.array([], dtype='float64') timecorr = numpy.array([], dtype='float32') cadenceno = numpy.array([], dtype='int') pixseries = numpy.array([], dtype='float32') errseries = numpy.array([], dtype='float32') bkgseries = numpy.array([], dtype='float32') berseries = numpy.array([], dtype='float32') quality = numpy.array([], dtype='float32') pos_corr1 = numpy.array([], dtype='float32') pos_corr2 = numpy.array([], dtype='float32') nrows = numpy.size(fluxpixels, 0) # Apply the pixel mask so we are left with only the desired pixels if status == 0: pixseriesb = fluxpixels[:, aperb] errseriesb = errpixels[:, aperb] bkgseriesb = flux_bkg[:, aperb] berseriesb = flux_bkg_err[:, aperb] # Read in the data to various arrays if status == 0: for i in range(nrows): if qual[i] < 10000 and \ numpy.isfinite(barytime[i]) and \ numpy.isfinite(fluxpixels[i,int(ydim*xdim/2+0.5)]) and \ numpy.isfinite(fluxpixels[i,1+int(ydim*xdim/2+0.5)]): ntim += 1 time = numpy.append(time, barytime[i]) timecorr = numpy.append(timecorr, tcorr[i]) cadenceno = numpy.append(cadenceno, cadno[i]) pixseries = numpy.append(pixseries, pixseriesb[i]) errseries = numpy.append(errseries, errseriesb[i]) bkgseries = numpy.append(bkgseries, bkgseriesb[i]) berseries = numpy.append(berseries, berseriesb[i]) quality = numpy.append(quality, qual[i]) pos_corr1 = numpy.append(pos_corr1, pcorr1[i]) pos_corr2 = numpy.append(pos_corr2, pcorr2[i]) pixseries = numpy.reshape(pixseries, (ntim, npix)) errseries = numpy.reshape(errseries, (ntim, npix)) bkgseries = numpy.reshape(bkgseries, (ntim, npix)) berseries = numpy.reshape(berseries, (ntim, npix)) tmp = numpy.median(pixseries, axis=1) for i in range(len(tmp)): pixseries[i] = pixseries[i] - tmp[i] # Figure out which pixels are undefined/nan and remove them. Keep track for adding back in later if status == 0: nanpixels = numpy.array([], dtype='int') i = 0 while (i < npix): if numpy.isnan(pixseries[0, i]): nanpixels = numpy.append(nanpixels, i) npix = npix - 1 i = i + 1 pixseries = numpy.delete(pixseries, nanpixels, 1) errseries = numpy.delete(errseries, nanpixels, 1) pixseries[numpy.isnan(pixseries)] = random.gauss(100, 10) errseries[numpy.isnan(errseries)] = 10 # Compute statistical weights, means, standard deviations if status == 0: weightseries = (pixseries / errseries)**2 pixMean = numpy.average(pixseries, axis=0, weights=weightseries) pixStd = numpy.std(pixseries, axis=0) # Normalize the input by subtracting the mean and divising by the standard deviation. # This makes it a correlation-based PCA, which is what we want. if status == 0: pixseriesnorm = (pixseries - pixMean) / pixStd # Number of principal components to compute. Setting it equal to the number of pixels if status == 0: nvecin = npix # Run PCA using the MDP Whitening PCA, which produces normalized PCA components (zero mean and unit variance) if status == 0: pcan = mdp.nodes.WhiteningNode(svd=True) pcar = pcan.execute(pixseriesnorm) eigvec = pcan.get_recmatrix() model = pcar # Re-insert nan columns as zeros if status == 0: for i in range(0, len(nanpixels)): nanpixels[i] = nanpixels[i] - i eigvec = numpy.insert(eigvec, nanpixels, 0, 1) pixMean = numpy.insert(pixMean, nanpixels, 0, 0) # Make output eigenvectors (correlation images) into xpix by ypix images if status == 0: eigvec = eigvec.reshape(nvecin, ydim, xdim) # Calculate sum of all pixels to display as raw lightcurve and other quantities if status == 0: pixseriessum = sum(pixseries, axis=1) nrem = len(pcarem) # Number of components to remove nplot = npix # Number of pcas to plot - currently set to plot all components, but could set # nplot = nrem to just plot as many components as is being removed # Subtract components by fitting them to the summed light curve if status == 0: x0 = numpy.tile(-1.0, 1) for k in range(0, nrem): def f(x): fluxcor = pixseriessum for k in range(0, len(x)): fluxcor = fluxcor - x[k] * model[:, pcarem[k]] return mad(fluxcor) if k == 0: x0 = array([-1.0]) else: x0 = numpy.append(x0, 1.0) myfit = scipy.optimize.fmin(f, x0, maxiter=50000, maxfun=50000, disp=False) x0 = myfit # Now that coefficients for all components have been found, subtract them to produce a calibrated time-series, # and then divide by the robust mean to produce a normalized time series as well if status == 0: c = myfit fluxcor = pixseriessum for k in range(0, nrem): fluxcor = fluxcor - c[k] * model[:, pcarem[k]] normfluxcor = fluxcor / mean(reject_outliers(fluxcor, 2)) # input file data if status == 0: cards0 = instr[0].header.cards cards1 = instr[1].header.cards cards2 = instr[2].header.cards table = instr[1].data[:] maskmap = copy(instr[2].data) # subimage physical WCS data if status == 0: crpix1p = cards2['CRPIX1P'].value crpix2p = cards2['CRPIX2P'].value crval1p = cards2['CRVAL1P'].value crval2p = cards2['CRVAL2P'].value cdelt1p = cards2['CDELT1P'].value cdelt2p = cards2['CDELT2P'].value # dummy columns for output file if status == 0: sap_flux_err = numpy.empty(len(time)) sap_flux_err[:] = numpy.nan sap_bkg = numpy.empty(len(time)) sap_bkg[:] = numpy.nan sap_bkg_err = numpy.empty(len(time)) sap_bkg_err[:] = numpy.nan pdc_flux = numpy.empty(len(time)) pdc_flux[:] = numpy.nan pdc_flux_err = numpy.empty(len(time)) pdc_flux_err[:] = numpy.nan psf_centr1 = numpy.empty(len(time)) psf_centr1[:] = numpy.nan psf_centr1_err = numpy.empty(len(time)) psf_centr1_err[:] = numpy.nan psf_centr2 = numpy.empty(len(time)) psf_centr2[:] = numpy.nan psf_centr2_err = numpy.empty(len(time)) psf_centr2_err[:] = numpy.nan mom_centr1 = numpy.empty(len(time)) mom_centr1[:] = numpy.nan mom_centr1_err = numpy.empty(len(time)) mom_centr1_err[:] = numpy.nan mom_centr2 = numpy.empty(len(time)) mom_centr2[:] = numpy.nan mom_centr2_err = numpy.empty(len(time)) mom_centr2_err[:] = numpy.nan # mask bitmap if status == 0 and 'aper' not in maskfile.lower( ) and maskfile.lower() != 'all': for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): aperx = append(aperx, crval1p + (j + 1 - crpix1p) * cdelt1p) apery = append(apery, crval2p + (i + 1 - crpix2p) * cdelt2p) if maskmap[i, j] == 0: pass else: maskmap[i, j] = 1 for k in range(len(maskx)): if aperx[-1] == maskx[k] and apery[-1] == masky[k]: maskmap[i, j] = 3 # construct output primary extension if status == 0: hdu0 = pyfits.PrimaryHDU() for i in range(len(cards0)): if cards0[i].keyword not in list(hdu0.header.keys()): hdu0.header[cards0[i].keyword] = (cards0[i].value, cards0[i].comment) else: hdu0.header.cards[ cards0[i].keyword].comment = cards0[i].comment status = kepkey.history(call, hdu0, outfile, logfile, verbose) outstr = HDUList(hdu0) # construct output light curve extension if status == 0: col1 = Column(name='TIME', format='D', unit='BJD - 2454833', array=time) col2 = Column(name='TIMECORR', format='E', unit='d', array=timecorr) col3 = Column(name='CADENCENO', format='J', array=cadenceno) col4 = Column(name='SAP_FLUX', format='E', unit='e-/s', array=pixseriessum) col5 = Column(name='SAP_FLUX_ERR', format='E', unit='e-/s', array=sap_flux_err) col6 = Column(name='SAP_BKG', format='E', unit='e-/s', array=sap_bkg) col7 = Column(name='SAP_BKG_ERR', format='E', unit='e-/s', array=sap_bkg_err) col8 = Column(name='PDCSAP_FLUX', format='E', unit='e-/s', array=pdc_flux) col9 = Column(name='PDCSAP_FLUX_ERR', format='E', unit='e-/s', array=pdc_flux_err) col10 = Column(name='SAP_QUALITY', format='J', array=quality) col11 = Column(name='PSF_CENTR1', format='E', unit='pixel', array=psf_centr1) col12 = Column(name='PSF_CENTR1_ERR', format='E', unit='pixel', array=psf_centr1_err) col13 = Column(name='PSF_CENTR2', format='E', unit='pixel', array=psf_centr2) col14 = Column(name='PSF_CENTR2_ERR', format='E', unit='pixel', array=psf_centr2_err) col15 = Column(name='MOM_CENTR1', format='E', unit='pixel', array=mom_centr1) col16 = Column(name='MOM_CENTR1_ERR', format='E', unit='pixel', array=mom_centr1_err) col17 = Column(name='MOM_CENTR2', format='E', unit='pixel', array=mom_centr2) col18 = Column(name='MOM_CENTR2_ERR', format='E', unit='pixel', array=mom_centr2_err) col19 = Column(name='POS_CORR1', format='E', unit='pixel', array=pos_corr1) col20 = Column(name='POS_CORR2', format='E', unit='pixel', array=pos_corr2) col21 = Column(name='PCA_FLUX', format='E', unit='e-/s', array=fluxcor) col22 = Column(name='PCA_FLUX_NRM', format='E', array=normfluxcor) cols = ColDefs([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11, \ col12,col13,col14,col15,col16,col17,col18,col19,col20,col21,col22]) hdu1 = new_table(cols) hdu1.header['TTYPE1'] = ('TIME', 'column title: data time stamps') hdu1.header['TFORM1'] = ('D', 'data type: float64') hdu1.header['TUNIT1'] = ('BJD - 2454833', 'column units: barycenter corrected JD') hdu1.header['TDISP1'] = ('D12.7', 'column display format') hdu1.header['TTYPE2'] = ( 'TIMECORR', 'column title: barycentric-timeslice correction') hdu1.header['TFORM2'] = ('E', 'data type: float32') hdu1.header['TUNIT2'] = ('d', 'column units: days') hdu1.header['TTYPE3'] = ('CADENCENO', 'column title: unique cadence number') hdu1.header['TFORM3'] = ('J', 'column format: signed integer32') hdu1.header['TTYPE4'] = ('SAP_FLUX', 'column title: aperture photometry flux') hdu1.header['TFORM4'] = ('E', 'column format: float32') hdu1.header['TUNIT4'] = ('e-/s', 'column units: electrons per second') hdu1.header['TTYPE5'] = ('SAP_FLUX_ERR', 'column title: aperture phot. flux error') hdu1.header['TFORM5'] = ('E', 'column format: float32') hdu1.header['TUNIT5'] = ( 'e-/s', 'column units: electrons per second (1-sigma)') hdu1.header['TTYPE6'] = ( 'SAP_BKG', 'column title: aperture phot. background flux') hdu1.header['TFORM6'] = ('E', 'column format: float32') hdu1.header['TUNIT6'] = ('e-/s', 'column units: electrons per second') hdu1.header['TTYPE7'] = ( 'SAP_BKG_ERR', 'column title: ap. phot. background flux error') hdu1.header['TFORM7'] = ('E', 'column format: float32') hdu1.header['TUNIT7'] = ( 'e-/s', 'column units: electrons per second (1-sigma)') hdu1.header['TTYPE8'] = ('PDCSAP_FLUX', 'column title: PDC photometry flux') hdu1.header['TFORM8'] = ('E', 'column format: float32') hdu1.header['TUNIT8'] = ('e-/s', 'column units: electrons per second') hdu1.header['TTYPE9'] = ('PDCSAP_FLUX_ERR', 'column title: PDC flux error') hdu1.header['TFORM9'] = ('E', 'column format: float32') hdu1.header['TUNIT9'] = ( 'e-/s', 'column units: electrons per second (1-sigma)') hdu1.header['TTYPE10'] = ( 'SAP_QUALITY', 'column title: aperture photometry quality flag') hdu1.header['TFORM10'] = ('J', 'column format: signed integer32') hdu1.header['TTYPE11'] = ('PSF_CENTR1', 'column title: PSF fitted column centroid') hdu1.header['TFORM11'] = ('E', 'column format: float32') hdu1.header['TUNIT11'] = ('pixel', 'column units: pixel') hdu1.header['TTYPE12'] = ('PSF_CENTR1_ERR', 'column title: PSF fitted column error') hdu1.header['TFORM12'] = ('E', 'column format: float32') hdu1.header['TUNIT12'] = ('pixel', 'column units: pixel') hdu1.header['TTYPE13'] = ('PSF_CENTR2', 'column title: PSF fitted row centroid') hdu1.header['TFORM13'] = ('E', 'column format: float32') hdu1.header['TUNIT13'] = ('pixel', 'column units: pixel') hdu1.header['TTYPE14'] = ('PSF_CENTR2_ERR', 'column title: PSF fitted row error') hdu1.header['TFORM14'] = ('E', 'column format: float32') hdu1.header['TUNIT14'] = ('pixel', 'column units: pixel') hdu1.header['TTYPE15'] = ( 'MOM_CENTR1', 'column title: moment-derived column centroid') hdu1.header['TFORM15'] = ('E', 'column format: float32') hdu1.header['TUNIT15'] = ('pixel', 'column units: pixel') hdu1.header['TTYPE16'] = ('MOM_CENTR1_ERR', 'column title: moment-derived column error') hdu1.header['TFORM16'] = ('E', 'column format: float32') hdu1.header['TUNIT16'] = ('pixel', 'column units: pixel') hdu1.header['TTYPE17'] = ('MOM_CENTR2', 'column title: moment-derived row centroid') hdu1.header['TFORM17'] = ('E', 'column format: float32') hdu1.header['TUNIT17'] = ('pixel', 'column units: pixel') hdu1.header['TTYPE18'] = ('MOM_CENTR2_ERR', 'column title: moment-derived row error') hdu1.header['TFORM18'] = ('E', 'column format: float32') hdu1.header['TUNIT18'] = ('pixel', 'column units: pixel') hdu1.header['TTYPE19'] = ( 'POS_CORR1', 'column title: col correction for vel. abbern') hdu1.header['TFORM19'] = ('E', 'column format: float32') hdu1.header['TUNIT19'] = ('pixel', 'column units: pixel') hdu1.header['TTYPE20'] = ( 'POS_CORR2', 'column title: row correction for vel. abbern') hdu1.header['TFORM20'] = ('E', 'column format: float32') hdu1.header['TUNIT20'] = ('pixel', 'column units: pixel') hdu1.header['TTYPE21'] = ('PCA_FLUX', 'column title: PCA-corrected flux') hdu1.header['TFORM21'] = ('E', 'column format: float32') hdu1.header['TUNIT21'] = ('pixel', 'column units: e-/s') hdu1.header['TTYPE22'] = ( 'PCA_FLUX_NRM', 'column title: normalized PCA-corrected flux') hdu1.header['TFORM22'] = ('E', 'column format: float32') hdu1.header['EXTNAME'] = ('LIGHTCURVE', 'name of extension') for i in range(len(cards1)): if (cards1[i].keyword not in list(hdu1.header.keys()) and cards1[i].keyword[:4] not in [ 'TTYP', 'TFOR', 'TUNI', 'TDIS', 'TDIM', 'WCAX', '1CTY', '2CTY', '1CRP', '2CRP', '1CRV', '2CRV', '1CUN', '2CUN', '1CDE', '2CDE', '1CTY', '2CTY', '1CDL', '2CDL', '11PC', '12PC', '21PC', '22PC' ]): hdu1.header[cards1[i].keyword] = (cards1[i].value, cards1[i].comment) outstr.append(hdu1) # construct output mask bitmap extension if status == 0: hdu2 = ImageHDU(maskmap) for i in range(len(cards2)): if cards2[i].keyword not in list(hdu2.header.keys()): hdu2.header[cards2[i].keyword] = (cards2[i].value, cards2[i].comment) else: hdu2.header.cards[ cards2[i].keyword].comment = cards2[i].comment outstr.append(hdu2) # construct principal component table if status == 0: cols = [ Column(name='TIME', format='E', unit='BJD - 2454833', array=time) ] for i in range(len(pcar[0, :])): colname = 'PC' + str(i + 1) col = Column(name=colname, format='E', array=pcar[:, i]) cols.append(col) hdu3 = new_table(ColDefs(cols)) hdu3.header['EXTNAME'] = ('PRINCIPAL_COMPONENTS', 'name of extension') hdu3.header['TTYPE1'] = ('TIME', 'column title: data time stamps') hdu3.header['TFORM1'] = ('D', 'data type: float64') hdu3.header['TUNIT1'] = ('BJD - 2454833', 'column units: barycenter corrected JD') hdu3.header['TDISP1'] = ('D12.7', 'column display format') for i in range(len(pcar[0, :])): hdu3.header['TTYPE' + str(i + 2)] = \ ('PC' + str(i + 1), 'column title: principal component number' + str(i + 1)) hdu3.header['TFORM' + str(i + 2)] = ('E', 'column format: float32') outstr.append(hdu3) # write output file if status == 0: outstr.writeto(outfile) # close input structure if status == 0: status = kepio.closefits(instr, logfile, verbose) # Create PCA report if status == 0 and plotpca: npp = 7 # Number of plots per page l = 1 repcnt = 1 for k in range(nreps): # First plot of every pagewith flux image, flux and calibrated time series status = kepplot.define(16, 12, logfile, verbose) if (k % (npp - 1) == 0): pylab.figure(figsize=[10, 16]) subplot2grid((npp, 6), (0, 0), colspan=2) # imshow(log10(pixMean.reshape(xdim,ydim).T-min(pixMean)+1),interpolation="nearest",cmap='RdYlBu') imshow(log10( flipud(pixMean.reshape(ydim, xdim)) - min(pixMean) + 1), interpolation="nearest", cmap='RdYlBu') xticks([]) yticks([]) ax1 = subplot2grid((npp, 6), (0, 2), colspan=4) px = copy(time) + bjdref py = copy(pixseriessum) px, xlab, status = kepplot.cleanx(px, logfile, verbose) py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose) kepplot.RangeOfPlot(px, py, 0.01, False) kepplot.plot1d(px, py, cadence, lcolor, lwidth, fcolor, falpha, True) py = copy(fluxcor) py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose) plot(px, py, marker='.', color='r', linestyle='', markersize=1.0) kepplot.labels('', re.sub('\)', '', re.sub('Flux \(', '', ylab)), 'k', 18) grid() setp(ax1.get_xticklabels(), visible=False) # plot principal components subplot2grid((npp, 6), (l, 0), colspan=2) imshow(eigvec[k], interpolation="nearest", cmap='RdYlBu') xlim(-0.5, xdim - 0.5) ylim(-0.5, ydim - 0.5) xticks([]) yticks([]) # The last plot on the page that should have the xlabel if (k % (npp - 1) == npp - 2 or k == nvecin - 1): subplot2grid((npp, 6), (l, 2), colspan=4) py = copy(model[:, k]) kepplot.RangeOfPlot(px, py, 0.01, False) kepplot.plot1d(px, py, cadence, 'r', lwidth, 'g', falpha, True) kepplot.labels(xlab, 'PC ' + str(k + 1), 'k', 18) pylab.grid() pylab.tight_layout() l = 1 pylab.savefig(re.sub('.png', '_%d.png' % repcnt, repname)) if not cmdLine: kepplot.render(cmdLine) repcnt += 1 # The other plots on the page that should have no xlabel else: ax2 = subplot2grid((npp, 6), (l, 2), colspan=4) py = copy(model[:, k]) kepplot.RangeOfPlot(px, py, 0.01, False) kepplot.plot1d(px, py, cadence, 'r', lwidth, 'g', falpha, True) kepplot.labels('', 'PC ' + str(k + 1), 'k', 18) grid() setp(ax2.get_xticklabels(), visible=False) pylab.tight_layout() l = l + 1 pylab.savefig(re.sub('.png', '_%d.png' % repcnt, repname)) if not cmdLine: kepplot.render(cmdLine) # plot style and size if status == 0 and plotpca: status = kepplot.define(labelsize, ticksize, logfile, verbose) pylab.figure(figsize=[xsize, ysize]) pylab.clf() # plot aperture photometry and PCA corrected data if status == 0 and plotpca: ax = kepplot.location([0.06, 0.54, 0.93, 0.43]) px = copy(time) + bjdref py = copy(pixseriessum) px, xlab, status = kepplot.cleanx(px, logfile, verbose) py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose) kepplot.RangeOfPlot(px, py, 0.01, False) kepplot.plot1d(px, py, cadence, lcolor, lwidth, fcolor, falpha, True) py = copy(fluxcor) py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose) kepplot.plot1d(px, py, cadence, 'r', 2, fcolor, 0.0, True) pylab.setp(pylab.gca(), xticklabels=[]) kepplot.labels('', ylab, 'k', 24) pylab.grid() # plot aperture photometry and PCA corrected data if status == 0 and plotpca: ax = kepplot.location([0.06, 0.09, 0.93, 0.43]) yr = array([], 'float32') npc = min([6, nrem]) for i in range(npc - 1, -1, -1): py = pcar[:, i] * c[i] py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose) cl = float(i) / (float(npc)) kepplot.plot1d(px, py, cadence, [1.0 - cl, 0.0, cl], 2, fcolor, 0.0, True) yr = append(yr, py) y1 = max(yr) y2 = -min(yr) kepplot.RangeOfPlot(px, array([-y1, y1, -y2, y2]), 0.01, False) kepplot.labels(xlab, 'Principal Components', 'k', 24) pylab.grid() # save plot to file if status == 0 and plotpca: pylab.savefig(repname) # render plot if status == 0 and plotpca: kepplot.render(cmdLine) # stop time if status == 0: kepmsg.clock('KEPPCA ended at', logfile, verbose) return
def kepfield(infile,plotfile,rownum,imscale,colmap,lcolor,srctab,verbose,logfile,status,cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPFIELD -- ' call += 'infile='+infile+' ' call += 'plotfile='+plotfile+' ' call += 'rownum='+str(rownum)+' ' call += 'imscale='+imscale+' ' call += 'colmap='+colmap+' ' call += 'lcolor='+lcolor+' ' srct = 'n' if (srctab): srct = 'y' call += 'srctab='+srct+' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPFIELD started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # reference color map if colmap == 'browse': status = cmap_plot(cmdLine) # open TPF FITS file if status == 0: try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) except: message = 'ERROR -- KEPFIELD: is %s a Target Pixel File? ' % infile status = kepmsg.err(logfile,message,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition(infile,logfile,verbose) # observed or simulated data? if status == 0: coa = False instr = pyfits.open(infile,mode='readonly',memmap=True) filever, status = kepkey.get(infile,instr[0],'FILEVER',logfile,verbose) if filever == 'COA': coa = True # print target data if status == 0 and verbose: print('') print(' KepID: %s' % kepid) print(' BJD: %.2f' % (barytime[rownum-1] + 2454833.0)) print(' RA (J2000): %s' % ra) print('Dec (J2000): %s' % dec) print(' KepMag: %s' % kepmag) print(' SkyGroup: %2s' % skygroup) print(' Season: %2s' % str(season)) print(' Channel: %2s' % channel) print(' Module: %2s' % module) print(' Output: %1s' % output) print('') # is this a good row with finite timestamp and pixels? if status == 0: if not numpy.isfinite(barytime[rownum-1]) or not numpy.nansum(fluxpixels[rownum-1,:]): message = 'ERROR -- KEPFIELD: Row ' + str(rownum) + ' is a bad quality timestamp' status = kepmsg.err(logfile,message,verbose) # construct input pixel image if status == 0: flux = fluxpixels[rownum-1,:] # image scale and intensity limits of pixel data if status == 0: flux_pl, zminfl, zmaxfl = kepplot.intScale1D(flux,imscale) n = 0 imgflux_pl = empty((ydim+2,xdim+2)) for i in range(ydim+2): for j in range(xdim+2): imgflux_pl[i,j] = numpy.nan for i in range(ydim): for j in range(xdim): imgflux_pl[i+1,j+1] = flux_pl[n] n += 1 # cone search around target coordinates using the MAST target search form if status == 0: dr = max([ydim+2,xdim+2]) * 4.0 kepid,ra,dec,kepmag = MASTRADec(float(ra),float(dec),dr,srctab) # convert celestial coordinates to detector coordinates if status == 0: sx = numpy.array([]) sy = numpy.array([]) inf, status = kepio.openfits(infile,'readonly',logfile,verbose) try: crpix1, crpix2, crval1, crval2, cdelt1, cdelt2, pc, status = \ kepkey.getWCSs(infile,inf['APERTURE'],logfile,verbose) crpix1p, crpix2p, crval1p, crval2p, cdelt1p, cdelt2p, status = \ kepkey.getWCSp(infile,inf['APERTURE'],logfile,verbose) for i in range(len(kepid)): dra = (ra[i] - crval1) * math.cos(math.radians(dec[i])) / cdelt1 ddec = (dec[i] - crval2) / cdelt2 if coa: sx = numpy.append(sx,-(pc[0,0] * dra + pc[0,1] * ddec) + crpix1 + crval1p - 1.0) else: sx = numpy.append(sx,pc[0,0] * dra + pc[0,1] * ddec + crpix1 + crval1p - 1.0) sy = numpy.append(sy,pc[1,0] * dra + pc[1,1] * ddec + crpix2 + crval2p - 1.0) except: message = 'ERROR -- KEPFIELD: Non-compliant WCS information within file %s' % infile status = kepmsg.err(logfile,message,verbose) # plot style if status == 0: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 48, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 20, 'ytick.labelsize': 20} pylab.rcParams.update(params) except: pass pylab.figure(figsize=[10,10]) pylab.clf() # pixel limits of the subimage if status == 0: ymin = copy(float(row)) ymax = ymin + ydim xmin = copy(float(column)) xmax = xmin + xdim # plot limits for flux image if status == 0: ymin = float(ymin) - 1.5 ymax = float(ymax) + 0.5 xmin = float(xmin) - 1.5 xmax = float(xmax) + 0.5 # plot the image window if status == 0: ax = pylab.axes([0.1,0.11,0.88,0.88]) pylab.imshow(imgflux_pl,aspect='auto',interpolation='nearest',origin='lower', vmin=zminfl,vmax=zmaxfl,extent=(xmin,xmax,ymin,ymax),cmap=colmap) pylab.gca().set_autoscale_on(False) labels = ax.get_yticklabels() setp(labels, 'rotation', 90) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.xlabel('Pixel Column Number', {'color' : 'k'}) pylab.ylabel('Pixel Row Number', {'color' : 'k'}) # plot mask borders if status == 0: kepplot.borders(maskimg,xdim,ydim,pixcoord1,pixcoord2,1,lcolor,'--',0.5) # plot aperture borders if status == 0: kepplot.borders(maskimg,xdim,ydim,pixcoord1,pixcoord2,2,lcolor,'-',4.0) # list sources if status == 0: print('Column Row RA J2000 Dec J2000 Kp Kepler ID') print('----------------------------------------------------') for i in range(len(sx)-1,-1,-1): if sx[i] >= xmin and sx[i] < xmax and sy[i] >= ymin and sy[i] < ymax: if kepid[i] != 0 and kepmag[i] != 0.0: print('%6.1f %6.1f %9.5f %8.5f %5.2f KIC %d' % \ (float(sx[i]),float(sy[i]),float(ra[i]),float(dec[i]),float(kepmag[i]),int(kepid[i]))) elif kepid[i] != 0 and kepmag[i] == 0.0: print('%6.1f %6.1f %9.5f %8.5f KIC %d' % \ (float(sx[i]),float(sy[i]),float(ra[i]),float(dec[i]),int(kepid[i]))) else: print('%6.1f %6.1f %9.5f %8.5f' % (float(sx[i]),float(sy[i]),float(ra[i]),float(dec[i]))) # plot sources if status == 0: for i in range(len(sx)-1,-1,-1): if kepid[i] != 0 and kepmag[i] != 0.0: size = max(array([80.0,80.0 + (2.5**(18.0 - max(12.0,float(kepmag[i])))) * 250.0])) pylab.scatter(sx[i],sy[i],s=size,facecolors='g',edgecolors='k',alpha=0.4) else: pylab.scatter(sx[i],sy[i],s=80,facecolors='r',edgecolors='k',alpha=0.4) # render plot if status == 0 and len(plotfile) > 0 and plotfile.lower() != 'none': pylab.savefig(plotfile) if status == 0: if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # stop time kepmsg.clock('\nKEPFIELD ended at',logfile,verbose) return
def kepprf(infile,plotfile,rownum,columns,rows,fluxes,border,background,focus,prfdir,xtol,ftol, imscale,colmap,plt,verbose,logfile,status,cmdLine=False): # input arguments print "... input arguments" status = 0 seterr(all="ignore") # log the call print "... logging the call" hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPPRF -- ' call += 'infile='+infile+' ' call += 'plotfile='+plotfile+' ' call += 'rownum='+str(rownum)+' ' call += 'columns='+columns+' ' call += 'rows='+rows+' ' call += 'fluxes='+fluxes+' ' call += 'border='+str(border)+' ' bground = 'n' if (background): bground = 'y' call += 'background='+bground+' ' focs = 'n' if (focus): focs = 'y' call += 'focus='+focs+' ' call += 'prfdir='+prfdir+' ' call += 'xtol='+str(xtol)+' ' call += 'ftol='+str(xtol)+' ' call += 'imscale='+imscale+' ' call += 'colmap='+colmap+' ' plotit = 'n' if (plt): plotit = 'y' call += 'plot='+plotit+' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # test log file logfile = kepmsg.test(logfile) # start time print "... starting kepler time" kepmsg.clock('KEPPRF started at',logfile,verbose) # reference color map if colmap == 'browse': status = cmap_plot(cmdLine) # construct inital guess vector for fit print " status = "+str(status) print "... initial guess" if status == 0: guess = [] try: f = fluxes.strip().split(',') x = columns.strip().split(',') y = rows.strip().split(',') for i in xrange(len(f)): f[i] = float(f[i]) except: f = fluxes x = columns y = rows nsrc = len(f) for i in xrange(nsrc): try: guess.append(float(f[i])) except: message = 'ERROR -- KEPPRF: Fluxes must be floating point numbers' status = kepmsg.err(logfile,message,verbose) if status == 0: if len(x) != nsrc or len(y) != nsrc: message = 'ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and ' message += 'fluxes must have the same number of sources' status = kepmsg.err(logfile,message,verbose) if status == 0: for i in xrange(nsrc): try: guess.append(float(x[i])) except: message = 'ERROR -- KEPPRF: Columns must be floating point numbers' status = kepmsg.err(logfile,message,verbose) if status == 0: for i in xrange(nsrc): try: guess.append(float(y[i])) except: message = 'ERROR -- KEPPRF: Rows must be floating point numbers' status = kepmsg.err(logfile,message,verbose) if status == 0 and background: if border == 0: guess.append(0.0) else: for i in range((border+1)*2): guess.append(0.0) if status == 0 and focus: guess.append(1.0); guess.append(1.0); guess.append(0.0) # open TPF FITS file print "... open tpf file" if status == 0: try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) except: message = 'ERROR -- KEPPRF: is %s a Target Pixel File? ' % infile status = kepmsg.err(logfile,message,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file print "... read mask definition" if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition(infile,logfile,verbose) npix = numpy.size(numpy.nonzero(maskimg)[0]) # print target data if status == 0 and verbose: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # is this a good row with finite timestamp and pixels? if status == 0: if not numpy.isfinite(barytime[rownum-1]) or numpy.nansum(fluxpixels[rownum-1,:]) == numpy.nan: message = 'ERROR -- KEPFIELD: Row ' + str(rownum) + ' is a bad quality timestamp' status = kepmsg.err(logfile,message,verbose) # construct input pixel image if status == 0: flux = fluxpixels[rownum-1,:] ferr = errpixels[rownum-1,:] DATx = arange(column,column+xdim) DATy = arange(row,row+ydim) # image scale and intensity limits of pixel data if status == 0: n = 0 DATimg = empty((ydim,xdim)) ERRimg = empty((ydim,xdim)) for i in range(ydim): for j in range(xdim): DATimg[i,j] = flux[n] ERRimg[i,j] = ferr[n] n += 1 # determine suitable PRF calibration file if status == 0: if int(module) < 10: prefix = 'kplr0' else: prefix = 'kplr' prfglob = prfdir + '/' + prefix + str(module) + '.' + str(output) + '*' + '_prf.fits' try: prffile = glob.glob(prfglob)[0] except: message = 'ERROR -- KEPPRF: No PRF file found in ' + prfdir status = kepmsg.err(logfile,message,verbose) # read PRF images if status == 0: prfn = [0,0,0,0,0] crpix1p = numpy.zeros((5),dtype='float32') crpix2p = numpy.zeros((5),dtype='float32') crval1p = numpy.zeros((5),dtype='float32') crval2p = numpy.zeros((5),dtype='float32') cdelt1p = numpy.zeros((5),dtype='float32') cdelt2p = numpy.zeros((5),dtype='float32') for i in range(5): prfn[i], crpix1p[i], crpix2p[i], crval1p[i], crval2p[i], cdelt1p[i], cdelt2p[i], status \ = kepio.readPRFimage(prffile,i+1,logfile,verbose) PRFx = arange(0.5,shape(prfn[0])[1]+0.5) PRFy = arange(0.5,shape(prfn[0])[0]+0.5) PRFx = (PRFx - size(PRFx) / 2) * cdelt1p[0] PRFy = (PRFy - size(PRFy) / 2) * cdelt2p[0] # interpolate the calibrated PRF shape to the target position if status == 0: prf = zeros(shape(prfn[0]),dtype='float32') prfWeight = zeros((5),dtype='float32') for i in xrange(5): prfWeight[i] = sqrt((column - crval1p[i])**2 + (row - crval2p[i])**2) if prfWeight[i] == 0.0: prfWeight[i] = 1.0e6 prf = prf + prfn[i] / prfWeight[i] prf = prf / nansum(prf) prf = prf / cdelt1p[0] / cdelt2p[0] # location of the data image centered on the PRF image (in PRF pixel units) if status == 0: prfDimY = int(ydim / cdelt1p[0]) prfDimX = int(xdim / cdelt2p[0]) PRFy0 = (shape(prf)[0] - prfDimY) / 2 PRFx0 = (shape(prf)[1] - prfDimX) / 2 # interpolation function over the PRF if status == 0: splineInterpolation = scipy.interpolate.RectBivariateSpline(PRFx,PRFy,prf) # construct mesh for background model if status == 0 and background: bx = numpy.arange(1.,float(xdim+1)) by = numpy.arange(1.,float(ydim+1)) xx, yy = numpy.meshgrid(numpy.linspace(bx.min(), bx.max(), xdim), numpy.linspace(by.min(), by.max(), ydim)) # fit PRF model to pixel data if status == 0: start = time.time() if focus and background: args = (DATx,DATy,DATimg,nsrc,border,xx,yy,PRFx,PRFy,splineInterpolation) ans = fmin_powell(kepfunc.PRFwithFocusAndBackground,guess,args=args,xtol=xtol, ftol=ftol,disp=False) elif focus and not background: args = (DATx,DATy,DATimg,nsrc,PRFx,PRFy,splineInterpolation) ans = fmin_powell(kepfunc.PRFwithFocus,guess,args=args,xtol=xtol, ftol=ftol,disp=False) elif background and not focus: args = (DATx,DATy,DATimg,nsrc,border,xx,yy,splineInterpolation) ans = fmin_powell(kepfunc.PRFwithBackground,guess,args=args,xtol=xtol, ftol=ftol,disp=False) else: args = (DATx,DATy,DATimg,splineInterpolation) ans = fmin_powell(kepfunc.PRF,guess,args=args,xtol=xtol, ftol=ftol,disp=False) print 'Convergence time = %.2fs\n' % (time.time() - start) # pad the PRF data if the PRF array is smaller than the data array if status == 0: flux = []; OBJx = []; OBJy = [] PRFmod = numpy.zeros((prfDimY,prfDimX)) if PRFy0 < 0 or PRFx0 < 0.0: PRFmod = numpy.zeros((prfDimY,prfDimX)) superPRF = zeros((prfDimY+1,prfDimX+1)) superPRF[abs(PRFy0):abs(PRFy0)+shape(prf)[0],abs(PRFx0):abs(PRFx0)+shape(prf)[1]] = prf prf = superPRF * 1.0 PRFy0 = 0 PRFx0 = 0 # rotate the PRF model around its center if focus: angle = ans[-1] prf = rotate(prf,-angle,reshape=False,mode='nearest') # iterate through the sources in the best fit PSF model for i in range(nsrc): flux.append(ans[i]) OBJx.append(ans[nsrc+i]) OBJy.append(ans[nsrc*2+i]) # calculate best-fit model y = (OBJy[i]-mean(DATy)) / cdelt1p[0] x = (OBJx[i]-mean(DATx)) / cdelt2p[0] prfTmp = shift(prf,[y,x],order=1,mode='constant') prfTmp = prfTmp[PRFy0:PRFy0+prfDimY,PRFx0:PRFx0+prfDimX] PRFmod = PRFmod + prfTmp * flux[i] wx = 1.0 wy = 1.0 angle = 0 b = 0.0 # write out best fit parameters if verbose: txt = 'Flux = %10.2f e-/s ' % flux[i] txt += 'X = %9.4f pix ' % OBJx[i] txt += 'Y = %9.4f pix ' % OBJy[i] kepmsg.log(logfile,txt,True) if verbose and background: bterms = border + 1 if bterms == 1: b = ans[nsrc*3] else: bcoeff = array([ans[nsrc*3:nsrc*3+bterms],ans[nsrc*3+bterms:nsrc*3+bterms*2]]) bkg = kepfunc.polyval2d(xx,yy,bcoeff) b = nanmean(bkg.reshape(bkg.size)) txt = '\n Mean background = %.2f e-/s' % b kepmsg.log(logfile,txt,True) if focus: wx = ans[-3] wy = ans[-2] angle = ans[-1] if verbose and focus: if not background: kepmsg.log(logfile,'',True) kepmsg.log(logfile,' X/Y focus factors = %.3f/%.3f' % (wx,wy),True) kepmsg.log(logfile,'PRF rotation angle = %.2f deg' % angle,True) # constuct model PRF in detector coordinates if status == 0: PRFfit = kepfunc.PRF2DET(flux,OBJx,OBJy,DATx,DATy,wx,wy,angle,splineInterpolation) if background and bterms == 1: PRFfit = PRFfit + b if background and bterms > 1: PRFfit = PRFfit + bkg # calculate residual of DATA - FIT if status == 0: PRFres = DATimg - PRFfit FLUXres = numpy.nansum(PRFres) # calculate the sum squared difference between data and model if status == 0: Pearson = abs(numpy.nansum(numpy.square(DATimg - PRFfit) / PRFfit)) Chi2 = numpy.nansum(numpy.square(DATimg - PRFfit) / numpy.square(ERRimg)) DegOfFreedom = npix - len(guess) try: kepmsg.log(logfile,'\nResidual flux = %.6f e-/s' % FLUXres,True) kepmsg.log(logfile,'Pearson\'s chi^2 test = %d for %d dof' % (Pearson,DegOfFreedom),True) except: pass # kepmsg.log(logfile,'Chi^2 test = %d for %d dof' % (Chi2,DegOfFreedom),True) # image scale and intensity limits for plotting images if status == 0: imgdat_pl, zminfl, zmaxfl = kepplot.intScale2D(DATimg,imscale) imgprf_pl, zminpr, zmaxpr = kepplot.intScale2D(PRFmod,imscale) imgfit_pl, zminfi, zmaxfi = kepplot.intScale2D(PRFfit,imscale) imgres_pl, zminre, zmaxre = kepplot.intScale2D(PRFres,imscale) if imscale == 'linear': zmaxpr *= 0.9 elif imscale == 'logarithmic': print zminpr,zmaxpr,numpy.max(zmaxpr) zmaxpr = numpy.max(zmaxpr) zminpr = zmaxpr / 2 # plot style if status == 0: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 10, 'ytick.labelsize': 10} pylab.rcParams.update(params) except: pass pylab.figure(figsize=[10,10]) pylab.clf() plotimage(imgdat_pl,zminfl,zmaxfl,1,row,column,xdim,ydim,0.06,0.52,'flux',colmap) plotimage(imgprf_pl,zminpr,zmaxpr,2,row,column,xdim,ydim,0.52,0.52,'model',colmap) kepplot.borders(maskimg,xdim,ydim,pixcoord1,pixcoord2,1,'b','--',0.5) kepplot.borders(maskimg,xdim,ydim,pixcoord1,pixcoord2,2,'b','-',3.0) plotimage(imgfit_pl,zminfl,zmaxfl,3,row,column,xdim,ydim,0.06,0.06,'fit',colmap) plotimage(imgres_pl,zminfl,zmaxfl,4,row,column,xdim,ydim,0.52,0.06,'residual',colmap) # render plot if status == 0 and len(plotfile) > 0 and plotfile.lower() != 'none': pylab.savefig(plotfile) if status == 0 and plt: if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # stop time kepmsg.clock('\nKEPPRF ended at',logfile,verbose) return
def kepfield(infile,plotfile,rownum,imscale='linear',colmap='YlOrBr',lcolor='gray',verbose=0, logfile='kepfield.log',status=0,kic=0,cmdLine=False): # input arguments seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPFIELD -- ' call += 'infile='+infile+' ' call += 'plotfile='+plotfile+' ' call += 'rownum='+str(rownum)+' ' call += 'imscale='+imscale+' ' call += 'colmap='+colmap+' ' call += 'lcolor='+lcolor+' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPFIELD started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # reference color map if colmap == 'browse': status = cmap_plot(cmdLine) # open TPF FITS file if status == 0: try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) except: message = 'ERROR -- KEPFIELD: is %s a Target Pixel File? ' % infile status = kepmsg.err(logfile,message,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition(infile,logfile,verbose) # observed or simulated data? if status == 0: coa = False instr = pyfits.open(infile,mode='readonly',memmap=True) filever, status = kepkey.get(infile,instr[0],'FILEVER',logfile,verbose) if filever == 'COA': coa = True # print target data if status == 0 and verbose: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # is this a good row with finite timestamp and pixels? if status == 0: if not numpy.isfinite(barytime[rownum-1]) or not numpy.nansum(fluxpixels[rownum-1,:]): message = 'ERROR -- KEPFIELD: Row ' + str(rownum) + ' is a bad quality timestamp' status = kepmsg.err(logfile,message,verbose) # construct input pixel image if status == 0: flux = fluxpixels[rownum-1,:] # image scale and intensity limits of pixel data if status == 0: flux_pl, zminfl, zmaxfl = kepplot.intScale1D(flux,imscale) n = 0 imgflux_pl = empty((ydim+2,xdim+2)) for i in range(ydim+2): for j in range(xdim+2): imgflux_pl[i,j] = numpy.nan for i in range(ydim): for j in range(xdim): imgflux_pl[i+1,j+1] = flux_pl[n] n += 1 # cone search around target coordinates using the MAST target search form if status == 0: dr = max([ydim+2,xdim+2]) * 4.0 kepid,ra,dec,kepmag = MASTRADec(float(ra),float(dec),dr) # convert celestial coordinates to detector coordinates if status == 0: sx = numpy.array([]) sy = numpy.array([]) inf, status = kepio.openfits(infile,'readonly',logfile,verbose) crpix1, crpix2, crval1, crval2, cdelt1, cdelt2, pc, status = \ kepkey.getWCSs(infile,inf['APERTURE'],logfile,verbose) crpix1p, crpix2p, crval1p, crval2p, cdelt1p, cdelt2p, status = \ kepkey.getWCSp(infile,inf['APERTURE'],logfile,verbose) for i in range(len(kepid)): dra = (ra[i] - crval1) * math.sin(math.radians(dec[i])) / cdelt1 ddec = (dec[i] - crval2) / cdelt2 if coa: sx = numpy.append(sx,-(pc[0,0] * dra + pc[0,1] * ddec) + crpix1 + crval1p - 1.0) else: sx = numpy.append(sx,pc[0,0] * dra + pc[0,1] * ddec + crpix1 + crval1p - 1.0) sy = numpy.append(sy,pc[1,0] * dra + pc[1,1] * ddec + crpix2 + crval2p - 1.0) # plot style if status == 0: try: params = {'backend': 'png', 'axes.linewidth': 2.0, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 18, 'ytick.labelsize': 18} pylab.rcParams.update(params) except: pass pylab.figure(figsize=[10,10]) pylab.clf() # pixel limits of the subimage if status == 0: ymin = copy(float(row)) ymax = ymin + ydim xmin = copy(float(column)) xmax = xmin + xdim # plot limits for flux image if status == 0: ymin = float(ymin) - 1.5 ymax = float(ymax) + 0.5 xmin = float(xmin) - 1.5 xmax = float(xmax) + 0.5 # plot the image window if status == 0: ax = pylab.axes([0.1,0.11,0.88,0.88]) pylab.imshow(imgflux_pl,aspect='auto',interpolation='nearest',origin='lower', vmin=zminfl,vmax=zmaxfl,extent=(xmin,xmax,ymin,ymax),cmap=colmap) pylab.gca().set_autoscale_on(False) labels = ax.get_yticklabels() setp(labels, 'rotation', 90) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.xlabel('Pixel Column Number', {'color' : 'k'}) pylab.ylabel('Pixel Row Number', {'color' : 'k'}) # plot mask borders if status == 0: kepplot.borders(maskimg,xdim,ydim,pixcoord1,pixcoord2,1,lcolor,'-',1) # plot aperture borders if status == 0: kepplot.borders(maskimg,xdim,ydim,pixcoord1,pixcoord2,2,lcolor,'-',4.0) # list sources if status == 0 and verbose: print 'Column Row RA J2000 Dec J2000 Kp Kepler ID' print '----------------------------------------------------' for i in range(len(sx)-1,-1,-1): if sx[i] >= xmin and sx[i] < xmax and sy[i] >= ymin and sy[i] < ymax: if kepid[i] != 0 and kepmag[i] != 0.0: print '%6.1f %6.1f %9.5f %8.5f %5.2f KIC %d' % \ (float(sx[i]),float(sy[i]),float(ra[i]),float(dec[i]),float(kepmag[i]),int(kepid[i])) elif kepid[i] != 0 and kepmag[i] == 0.0: print '%6.1f %6.1f %9.5f %8.5f KIC %d' % \ (float(sx[i]),float(sy[i]),float(ra[i]),float(dec[i]),int(kepid[i])) else: print '%6.1f %6.1f %9.5f %8.5f' % (float(sx[i]),float(sy[i]),float(ra[i]),float(dec[i])) # plot sources if status == 0: for i in range(len(sx)-1,-1,-1): if kepid[i] != 0 and kepmag[i] != 0.0: size = max(array([80.0,80.0 + (2.5**(18.0 - max(12.0,float(kepmag[i])))) * 250.0])) pylab.scatter(sx[i],sy[i],s=size,facecolors='g',edgecolors='k',alpha=0.4) else: pylab.scatter(sx[i],sy[i],s=80,facecolors='r',edgecolors='k',alpha=0.4) # render plot if status == 0 and len(plotfile) > 0 and plotfile.lower() != 'none': pylab.savefig(plotfile) if status == 0: if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() pylab.clf() # stop time kepmsg.clock('\nKEPFIELD ended at',logfile,verbose) # pdb.set_trace() if kic > 0: ind = np.where(kepid == kic) colret = sx[ind] rowret = sy[ind] raret = ra[ind] decret = dec[ind] kepmagret = kepmag[ind] kepidret = kepid[ind] else: inds = np.where(kepmag != 0.0) colret = sx[inds] rowret = sy[inds] raret = ra[inds] decret = dec[inds] kepmagret = kepmag[inds] kepidret = kepid[inds] return colret,rowret,raret,decret,kepmagret,kepidret
def kepitermask(infile,outfile,plotfile,column,row,timescale,nsig,stepsize,winsize,npoly,niter, clobber,verbose,logfile,status,cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPITERMASK -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'plotfile='+plotfile+' ' call += 'column='+str(column)+' ' call += 'row='+str(row)+' ' call += 'timescale='+str(timescale)+' ' call += 'nsig='+str(nsig)+' ' call += 'stepsize='+str(stepsize)+' ' call += 'winsize='+str(winsize)+' ' call += 'npoly='+str(npoly)+' ' call += 'niter='+str(niter)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPITERMASK started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPITERMASK: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile,message,verbose) # open TPF FITS file if status == 0: try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, co, ro, kepmag, xdim, ydim, work1, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) except: message = 'ERROR -- KEPPRF: is %s a Target Pixel File? ' % infile status = kepmsg.err(logfile,message,verbose) # print target data if status == 0: print('') print(' KepID: %s' % kepid) print(' RA (J2000): %s' % ra) print('Dec (J2000): %s' % dec) print(' KepMag: %s' % kepmag) print(' SkyGroup: %2s' % skygroup) print(' Season: %2s' % str(season)) print(' Channel: %2s' % channel) print(' Module: %2s' % module) print(' Output: %1s' % output) print('') # read mask defintion data from TPF file if status == 0: maskmap, pixcoordx, pixcoordy, status = kepio.readMaskDefinition(infile,logfile,verbose) pixcoordx = rot90(pixcoordx) pixcoordy = flipud(rot90(pixcoordy)) maskmap[:,:] = 0.0 # which pixel does the target reside on? if status == 0: x = where(pixcoordx == float(column))[1][0] y = where(pixcoordy == float(row))[0][0] maskmap[y,x] = 1.0 # read time series data if status == 0: instr = pyfits.open(infile,mode='readonly',memmap=True) work1 = instr[1].data.field('TIME')[:] work2 = instr[1].data.field('FLUX')[:] work3 = instr[1].data.field('QUALITY')[:] # how many quality = 0 rows? if status == 0: npts = 0 nrows = len(work1) for i in range(nrows): if work3[i] == 0 and numpy.isfinite(work1[i]): npts += 1 time = empty((npts)) flux = empty((npts,ydim,xdim)) quality = empty((npts)) # construct pixel light curves from quality = 0 data if status == 0: n = 0 for i in range(nrows): if work3[i] == 0 and numpy.isfinite(work1[i]): time[n] = work1[i] flux[n] = work2[i,:,:] quality[n] = work3[i] n +=1 # light curves from central pixel if status == 0: (pr, pc) = where(maskmap == 1.0) best_lc = flux[:,pr[0],pc[0]] # calculate median CDPP if status == 0: best_median_cdpp, best_cdpp, status = \ GetCDPP(time,best_lc,npoly,nsig,niter,winsize,stepsize, timescale,logfile,verbose,status) # does another pixel improve CDPP of the target? if status == 0: trial_med = best_median_cdpp # while best_median_cdpp == trial_med: for i in range(70): trial_lc, trial_cdpp, trial_med, xpix, ypix, status = \ AddPixelToAperture(time,flux,maskmap,best_lc,npoly,nsig,niter,winsize, stepsize,timescale,logfile,verbose) # if trial_med < best_median_cdpp: if trial_med < 1e10: best_lc = trial_lc best_cdpp = trial_cdpp best_median_cdpp = trial_med maskmap[ypix,xpix] = 1.0 print(maskmap) print(i, best_median_cdpp) # plot style if status == 0: try: params = {'backend': 'png', 'axes.linewidth': 2.0, 'axes.labelsize': 32, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 12, 'ytick.labelsize': 12} pylab.rcParams.update(params) except: pass # tmp pylab.plot(time,best_lc,color='#0000ff',linestyle='-',linewidth=1.0) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() if plotfile.lower() != 'none': pylab.savefig(plotfile) # stop time if status == 0: kepmsg.clock('KEPITERMASK ended at',logfile,verbose) return
def kepdeltapix(infile,nexp,columns,rows,fluxes,prfdir,interpolation,tolerance,fittype,imscale, colmap,verbose,logfile,status,cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPDELTAPIX -- ' call += 'infile='+infile+' ' call += 'nexp='+str(nexp)+' ' call += 'columns='+columns+' ' call += 'rows='+rows+' ' call += 'fluxes='+fluxes+' ' call += 'prfdir='+prfdir+' ' call += 'interpolation='+interpolation+' ' call += 'tolerance='+str(tolerance)+' ' call += 'fittype='+str(fittype)+' ' call += 'imscale='+imscale+' ' call += 'colmap='+colmap+' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # test log file logfile = kepmsg.test(logfile) # start time kepmsg.clock('KEPDELTAPIX started at',logfile,verbose) # reference color map if colmap == 'browse': status = cmap_plot(cmdLine) # open TPF FITS file if status == 0: try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) except: message = 'ERROR -- KEPDELTAPIX: is %s a Target Pixel File? ' % infile status = kepmsg.err(logfile,message,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # print target data if status == 0 and verbose: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # determine suitable PRF calibration file if status == 0: if int(module) < 10: prefix = 'kplr0' else: prefix = 'kplr' prfglob = prfdir + '/' + prefix + str(module) + '.' + str(output) + '*' + '_prf.fits' try: prffile = glob.glob(prfglob)[0] except: message = 'ERROR -- KEPDELTAPIX: No PRF file found in ' + prfdir status = kepmsg.err(logfile,message,verbose) # read PRF images if status == 0: prfn = [0,0,0,0,0] crpix1p = numpy.zeros((5),dtype='float32') crpix2p = numpy.zeros((5),dtype='float32') crval1p = numpy.zeros((5),dtype='float32') crval2p = numpy.zeros((5),dtype='float32') cdelt1p = numpy.zeros((5),dtype='float32') cdelt2p = numpy.zeros((5),dtype='float32') for i in range(5): prfn[i], crpix1p[i], crpix2p[i], crval1p[i], crval2p[i], cdelt1p[i], cdelt2p[i], status \ = kepio.readPRFimage(prffile,i+1,logfile,verbose) # choose rows in the TPF table at random if status == 0: i = 0 rownum = [] while i < nexp: work = int(random.random() * len(barytime)) if numpy.isfinite(barytime[work]) and numpy.isfinite(fluxpixels[work,ydim*xdim/2]): rownum.append(work) i += 1 # construct input pixel image if status == 0: fscat = numpy.empty((len(fluxes),nexp),dtype='float32') xscat = numpy.empty((len(columns),nexp),dtype='float32') yscat = numpy.empty((len(rows),nexp),dtype='float32') for irow in range(nexp): flux = fluxpixels[rownum[irow],:] # image scale and intensity limits of pixel data if status == 0: flux_pl, zminfl, zmaxfl = kepplot.intScale1D(flux,imscale) n = 0 imgflux_pl = empty((ydim,xdim)) for i in range(ydim): for j in range(xdim): imgflux_pl[i,j] = flux_pl[n] n += 1 # fit PRF model to pixel data if status == 0: start = time.time() f,y,x,prfMod,prfFit,prfRes = kepfit.fitMultiPRF(flux,ydim,xdim,column,row,prfn,crval1p, crval2p,cdelt1p,cdelt2p,interpolation,tolerance,fluxes,columns,rows,fittype, verbose,logfile) if verbose: print '\nConvergence time = %.1fs' % (time.time() - start) # best fit parameters if status == 0: for i in range(len(f)): fscat[i,irow] = f[i] xscat[i,irow] = x[i] yscat[i,irow] = y[i] # replace starting guess with previous fit parameters if status == 0: fluxes = copy(f) columns = copy(x) rows = copy(y) # mean and rms results if status == 0: fmean = []; fsig = [] xmean = []; xsig = [] ymean = []; ysig = [] for i in range(len(f)): fmean.append(numpy.mean(fscat[i,:])) xmean.append(numpy.mean(xscat[i,:])) ymean.append(numpy.mean(yscat[i,:])) fsig.append(numpy.std(fscat[i,:])) xsig.append(numpy.std(xscat[i,:])) ysig.append(numpy.std(yscat[i,:])) txt = 'Flux = %10.2f e-/s ' % fmean[-1] txt += 'X = %7.4f +/- %6.4f pix ' % (xmean[-1], xsig[i]) txt += 'Y = %7.4f +/- %6.4f pix' % (ymean[-1], ysig[i]) kepmsg.log(logfile,txt,True) # output results for kepprfphot if status == 0: txt1 = 'columns=0.0' txt2 = ' rows=0.0' for i in range(1,len(f)): txt1 += ',%.4f' % (xmean[i] - xmean[0]) txt2 += ',%.4f' % (ymean[i] - ymean[0]) kepmsg.log(logfile,'\nkepprfphot input fields:',True) kepmsg.log(logfile,txt1,True) kepmsg.log(logfile,txt2,True) # image scale and intensity limits for PRF model image if status == 0: imgprf_pl, zminpr, zmaxpr = kepplot.intScale2D(prfMod,imscale) # image scale and intensity limits for PRF fit image if status == 0: imgfit_pl, zminfi, zmaxfi = kepplot.intScale2D(prfFit,imscale) # image scale and intensity limits for data - fit residual if status == 0: imgres_pl, zminre, zmaxre = kepplot.intScale2D(prfRes,imscale) # plot style if status == 0: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 10, 'ytick.labelsize': 10} pylab.rcParams.update(params) except: pass pylab.figure(figsize=[10,10]) pylab.clf() plotimage(imgflux_pl,zminfl,zmaxfl,1,row,column,xdim,ydim,0.06,0.52,'flux',colmap) plotimage(imgfit_pl,zminfl,zmaxfl,3,row,column,xdim,ydim,0.06,0.06,'fit',colmap) plotimage(imgres_pl,zminfl,zmaxfl,4,row,column,xdim,ydim,0.52,0.06,'residual',colmap) plotimage(imgprf_pl,zminpr,zmaxpr*0.9,2,row,column,xdim,ydim,0.52,0.52,'model',colmap) for i in range(len(f)): pylab.plot(xscat[i,:],yscat[i,:],'o',color='k') # Plot creep of target position over time, relative to the central source # barytime0 = float(int(barytime[0] / 100) * 100.0) # barytime -= barytime0 # xlab = 'BJD $-$ %d' % barytime0 # xmin = numpy.nanmin(barytime) # xmax = numpy.nanmax(barytime) # y1min = numpy.nanmin(data) # y1max = numpy.nanmax(data) # xr = xmax - xmin # yr = ymax - ymin # barytime = insert(barytime,[0],[barytime[0]]) # barytime = append(barytime,[barytime[-1]]) # data = insert(data,[0],[0.0]) # data = append(data,0.0) # # pylab.figure(2,figsize=[10,10]) # pylab.clf() # ax = pylab.subplot(211) # pylab.subplots_adjust(0.1,0.5,0.88,0.42) # pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # labels = ax.get_yticklabels() # setp(labels, 'rotation', 90, fontsize=ticksize) # for i in range(1,len(f)): # pylab.plot(rownum,xscat[i,:]-xscat[0,:],'o') # pylab.ylabel('$\Delta$Columns', {'color' : 'k'}) # ax = pylab.subplot(211) # pylab.subplots_adjust(0.1,0.1,0.88,0.42) # pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # labels = ax.get_yticklabels() # setp(labels, 'rotation', 90, fontsize=ticksize) # for i in range(1,len(f)): # pylab.plot(rownum,yscat[i,:]-yscat[0,:],'o') # pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) # if ymin-yr*0.01 <= 0.0 or fullrange: # pylab.ylim(1.0e-10,ymax+yr*0.01) # else: # pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) # pylab.ylabel('$\Delta$Rows', {'color' : 'k'}) # pylab.xlabel(xlab, {'color' : 'k'}) # render plot if status == 0: if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # stop time kepmsg.clock('\nKEPDELTAPIX ended at',logfile,verbose) return
def kepprf(infile, columns, rows, fluxes, rownum=0, border=0, background=0, focus=0, prfdir='../KeplerPRF', xtol=1.e-6, ftol=1.e-6, imscale='linear', cmap='YlOrBr', lcolor='k', acolor='b', logfile='kepcrowd.log', CrowdTPF=np.nan, srcinfo=None, **kwargs): # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, True) call = 'KEPPRF -- ' call += 'infile=' + infile + ' ' call += 'rownum=' + str(rownum) + ' ' call += 'columns=' + columns + ' ' call += 'rows=' + rows + ' ' call += 'fluxes=' + fluxes + ' ' call += 'border=' + str(border) + ' ' bground = 'n' if (background): bground = 'y' call += 'background=' + bground + ' ' focs = 'n' if (focus): focs = 'y' call += 'focus=' + focs + ' ' call += 'prfdir=' + prfdir + ' ' call += 'xtol=' + str(xtol) + ' ' call += 'ftol=' + str(xtol) + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', True) guess = [] try: f = fluxes.strip().split(',') x = columns.strip().split(',') y = rows.strip().split(',') for i in range(len(f)): f[i] = float(f[i]) except: f = fluxes x = columns y = rows nsrc = len(f) for i in range(nsrc): try: guess.append(float(f[i])) except: message = 'ERROR -- KEPPRF: Fluxes must be floating point numbers' kepmsg.err(logfile, message, True) return None if len(x) != nsrc or len(y) != nsrc: message = 'ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and ' message += 'fluxes must have the same number of sources' kepmsg.err(logfile, message, True) return None for i in range(nsrc): try: guess.append(float(x[i])) except: message = 'ERROR -- KEPPRF: Columns must be floating point numbers' kepmsg.err(logfile, message, True) return None for i in range(nsrc): try: guess.append(float(y[i])) except: message = 'ERROR -- KEPPRF: Rows must be floating point numbers' kepmsg.err(logfile, message, True) return None if background: if border == 0: guess.append(0.0) else: for i in range((border + 1) * 2): guess.append(0.0) if focus: guess.append(1.0) guess.append(1.0) guess.append(0.0) # open TPF FITS file try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,True) except: message = 'ERROR -- KEPPRF: is %s a Target Pixel File? ' % infile kepmsg.err(logfile, message, True) return None kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,True) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,True) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,True) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,True) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,True) # read mask defintion data from TPF file maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition( infile, logfile, True) npix = np.size(np.nonzero(maskimg)[0]) print('') print(' KepID: %s' % kepid) print(' BJD: %.2f' % (barytime[rownum - 1] + 2454833.0)) print(' RA (J2000): %s' % ra) print('Dec (J2000): %s' % dec) print(' KepMag: %s' % kepmag) print(' SkyGroup: %2s' % skygroup) print(' Season: %2s' % str(season)) print(' Channel: %2s' % channel) print(' Module: %2s' % module) print(' Output: %1s' % output) print('') # is this a good row with finite timestamp and pixels? if not np.isfinite(barytime[rownum - 1]) or np.nansum( fluxpixels[rownum - 1, :]) == np.nan: message = 'ERROR -- KEPFIELD: Row ' + str( rownum) + ' is a bad quality timestamp' status = kepmsg.err(logfile, message, True) # construct input pixel image flux = fluxpixels[rownum - 1, :] ferr = errpixels[rownum - 1, :] DATx = np.arange(column, column + xdim) DATy = np.arange(row, row + ydim) # image scale and intensity limits of pixel data n = 0 DATimg = np.empty((ydim, xdim)) ERRimg = np.empty((ydim, xdim)) for i in range(ydim): for j in range(xdim): DATimg[i, j] = flux[n] ERRimg[i, j] = ferr[n] n += 1 # determine suitable PRF calibration file if int(module) < 10: prefix = 'kplr0' else: prefix = 'kplr' prfglob = prfdir + '/' + prefix + str(module) + '.' + str( output) + '*' + '_prf.fits' try: prffile = glob.glob(prfglob)[0] except: message = 'ERROR -- KEPPRF: No PRF file found in ' + prfdir kepmsg.err(logfile, message, True) return None # read PRF images prfn = [0, 0, 0, 0, 0] crpix1p = np.zeros((5), dtype='float32') crpix2p = np.zeros((5), dtype='float32') crval1p = np.zeros((5), dtype='float32') crval2p = np.zeros((5), dtype='float32') cdelt1p = np.zeros((5), dtype='float32') cdelt2p = np.zeros((5), dtype='float32') for i in range(5): prfn[i], crpix1p[i], crpix2p[i], crval1p[i], crval2p[i], cdelt1p[i], cdelt2p[i], status \ = kepio.readPRFimage(prffile,i+1,logfile,True) prfn = np.array(prfn) PRFx = np.arange(0.5, np.shape(prfn[0])[1] + 0.5) PRFy = np.arange(0.5, np.shape(prfn[0])[0] + 0.5) PRFx = (PRFx - np.size(PRFx) / 2) * cdelt1p[0] PRFy = (PRFy - np.size(PRFy) / 2) * cdelt2p[0] # interpolate the calibrated PRF shape to the target position prf = np.zeros(np.shape(prfn[0]), dtype='float32') prfWeight = np.zeros((5), dtype='float32') for i in range(5): prfWeight[i] = np.sqrt((column - crval1p[i])**2 + (row - crval2p[i])**2) if prfWeight[i] == 0.0: prfWeight[i] = 1.0e-6 prf = prf + prfn[i] / prfWeight[i] prf = prf / np.nansum(prf) / cdelt1p[0] / cdelt2p[0] # location of the data image centered on the PRF image (in PRF pixel units) prfDimY = int(ydim / cdelt1p[0]) prfDimX = int(xdim / cdelt2p[0]) PRFy0 = (np.shape(prf)[0] - prfDimY) / 2 PRFx0 = (np.shape(prf)[1] - prfDimX) / 2 # interpolation function over the PRF splineInterpolation = scipy.interpolate.RectBivariateSpline( PRFx, PRFy, prf) # construct mesh for background model if background: bx = np.arange(1., float(xdim + 1)) by = np.arange(1., float(ydim + 1)) xx, yy = np.meshgrid(np.linspace(bx.min(), bx.max(), xdim), np.linspace(by.min(), by.max(), ydim)) # fit PRF model to pixel data start = time.time() if focus and background: args = (DATx, DATy, DATimg, ERRimg, nsrc, border, xx, yy, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRFwithFocusAndBackground, guess, args=args, xtol=xtol, ftol=ftol, disp=False) elif focus and not background: args = (DATx, DATy, DATimg, ERRimg, nsrc, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRFwithFocus, guess, args=args, xtol=xtol, ftol=ftol, disp=False) elif background and not focus: args = (DATx, DATy, DATimg, ERRimg, nsrc, border, xx, yy, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRFwithBackground, guess, args=args, xtol=xtol, ftol=ftol, disp=False) else: args = (DATx, DATy, DATimg, ERRimg, nsrc, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRF, guess, args=args, xtol=xtol, ftol=ftol, disp=False) kepmsg.log(logfile, 'Convergence time = %.2fs\n' % (time.time() - start), True) # pad the PRF data if the PRF array is smaller than the data array flux = [] OBJx = [] OBJy = [] PRFmod = np.zeros((prfDimY, prfDimX)) if PRFy0 < 0 or PRFx0 < 0.0: PRFmod = np.zeros((prfDimY, prfDimX)) superPRF = np.zeros((prfDimY + 1, prfDimX + 1)) superPRF[np.abs(PRFy0):np.abs(PRFy0) + np.shape(prf)[0], np.abs(PRFx0):np.abs(PRFx0) + np.shape(prf)[1]] = prf prf = superPRF * 1.0 PRFy0 = 0 PRFx0 = 0 # rotate the PRF model around its center if focus: angle = ans[-1] prf = rotate(prf, -angle, reshape=False, mode='nearest') # iterate through the sources in the best fit PSF model for i in range(nsrc): flux.append(ans[i]) OBJx.append(ans[nsrc + i]) OBJy.append(ans[nsrc * 2 + i]) # calculate best-fit model y = (OBJy[i] - np.mean(DATy)) / cdelt1p[0] x = (OBJx[i] - np.mean(DATx)) / cdelt2p[0] prfTmp = shift(prf, [y, x], order=3, mode='constant') prfTmp = prfTmp[PRFy0:PRFy0 + prfDimY, PRFx0:PRFx0 + prfDimX] PRFmod = PRFmod + prfTmp * flux[i] wx = 1.0 wy = 1.0 angle = 0 b = 0.0 # write out best fit parameters txt = 'Flux = %10.2f e-/s ' % flux[i] txt += 'X = %9.4f pix ' % OBJx[i] txt += 'Y = %9.4f pix ' % OBJy[i] kepmsg.log(logfile, txt, True) if background: bterms = border + 1 if bterms == 1: b = ans[nsrc * 3] else: bcoeff = np.array([ ans[nsrc * 3:nsrc * 3 + bterms], ans[nsrc * 3 + bterms:nsrc * 3 + bterms * 2] ]) bkg = kepfunc.polyval2d(xx, yy, bcoeff) b = nanmean(bkg.reshape(bkg.size)) txt = '\n Mean background = %.2f e-/s' % b kepmsg.log(logfile, txt, True) if focus: wx = ans[-3] wy = ans[-2] angle = ans[-1] if not background: kepmsg.log(logfile, '', True) kepmsg.log(logfile, ' X/Y focus factors = %.3f/%.3f' % (wx, wy), True) kepmsg.log(logfile, 'PRF rotation angle = %.2f deg' % angle, True) # measure flux fraction and contamination # LUGER: This looks horribly bugged. ``PRFall`` is certainly NOT the sum of the all the sources. # Check out my comments in ``kepfunc.py``. PRFall = kepfunc.PRF2DET(flux, OBJx, OBJy, DATx, DATy, wx, wy, angle, splineInterpolation) PRFone = kepfunc.PRF2DET([flux[0]], [OBJx[0]], [OBJy[0]], DATx, DATy, wx, wy, angle, splineInterpolation) # LUGER: Add up contaminant fluxes PRFcont = np.zeros_like(PRFone) for ncont in range(1, len(flux)): PRFcont += kepfunc.PRF2DET([flux[ncont]], [OBJx[ncont]], [OBJy[ncont]], DATx, DATy, wx, wy, angle, splineInterpolation) PRFcont[np.where(PRFcont < 0)] = 0 FluxInMaskAll = np.nansum(PRFall) FluxInMaskOne = np.nansum(PRFone) FluxInAperAll = 0.0 FluxInAperOne = 0.0 FluxInAperAllTrue = 0.0 for i in range(1, ydim): for j in range(1, xdim): if kepstat.bitInBitmap(maskimg[i, j], 2): FluxInAperAll += PRFall[i, j] FluxInAperOne += PRFone[i, j] FluxInAperAllTrue += PRFone[i, j] + PRFcont[i, j] FluxFraction = FluxInAperOne / flux[0] try: Contamination = (FluxInAperAll - FluxInAperOne) / FluxInAperAll except: Contamination = 0.0 # LUGER: Pixel crowding metrics Crowding = PRFone / (PRFone + PRFcont) Crowding[np.where(Crowding < 0)] = np.nan # LUGER: Optimal aperture crowding metric CrowdAper = FluxInAperOne / FluxInAperAllTrue kepmsg.log( logfile, '\n Total flux in mask = %.2f e-/s' % FluxInMaskAll, True) kepmsg.log( logfile, ' Target flux in mask = %.2f e-/s' % FluxInMaskOne, True) kepmsg.log( logfile, ' Total flux in aperture = %.2f e-/s' % FluxInAperAll, True) kepmsg.log( logfile, ' Target flux in aperture = %.2f e-/s' % FluxInAperOne, True) kepmsg.log( logfile, ' Target flux fraction in aperture = %.2f%%' % (FluxFraction * 100.0), True) kepmsg.log( logfile, 'Contamination fraction in aperture = %.2f%%' % (Contamination * 100.0), True) kepmsg.log(logfile, ' Crowding metric in aperture = %.4f' % (CrowdAper), True) kepmsg.log(logfile, ' Crowding metric from TPF = %.4f' % (CrowdTPF), True) # constuct model PRF in detector coordinates PRFfit = PRFall + 0.0 if background and bterms == 1: PRFfit = PRFall + b if background and bterms > 1: PRFfit = PRFall + bkg # calculate residual of DATA - FIT PRFres = DATimg - PRFfit FLUXres = np.nansum(PRFres) / npix # calculate the sum squared difference between data and model Pearson = np.abs(np.nansum(np.square(DATimg - PRFfit) / PRFfit)) Chi2 = np.nansum(np.square(DATimg - PRFfit) / np.square(ERRimg)) DegOfFreedom = npix - len(guess) - 1 try: kepmsg.log(logfile, '\n Residual flux = %.2f e-/s' % FLUXres, True) kepmsg.log( logfile, 'Pearson\'s chi^2 test = %d for %d dof' % (Pearson, DegOfFreedom), True) except: pass kepmsg.log(logfile, ' Chi^2 test = %d for %d dof' % (Chi2, DegOfFreedom), True) # image scale and intensity limits for plotting images imgdat_pl, zminfl, zmaxfl = kepplot.intScale2D(DATimg, imscale) imgprf_pl, zminpr, zmaxpr = kepplot.intScale2D(PRFmod, imscale) imgfit_pl, zminfi, zmaxfi = kepplot.intScale2D(PRFfit, imscale) imgres_pl, zminre, zmaxre = kepplot.intScale2D(PRFres, 'linear') if imscale == 'linear': zmaxpr *= 0.9 elif imscale == 'logarithmic': zmaxpr = np.max(zmaxpr) zminpr = zmaxpr / 2 # plot pl.figure(figsize=[12, 10]) pl.clf() # data plotimage(imgdat_pl, zminfl, zmaxfl, 1, row, column, xdim, ydim, 0.07, 0.58, 'observation', cmap, lcolor) pl.text(0.05, 0.05, 'CROWDSAP: %.4f' % CrowdTPF, horizontalalignment='left', verticalalignment='center', fontsize=18, fontweight=500, color=lcolor, transform=pl.gca().transAxes) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, acolor, '--', 0.5) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, acolor, '-', 3.0) # model plotimage(imgprf_pl, zminpr, zmaxpr, 2, row, column, xdim, ydim, 0.445, 0.58, 'model', cmap, lcolor) pl.text(0.05, 0.05, 'Crowding: %.4f' % CrowdAper, horizontalalignment='left', verticalalignment='center', fontsize=18, fontweight=500, color=lcolor, transform=pl.gca().transAxes) for x, y in zip(OBJx, OBJy): pl.scatter(x, y, marker='x', color='w') kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, acolor, '--', 0.5) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, acolor, '-', 3.0) if srcinfo is not None: kepid, sx, sy, kepmag = srcinfo for i in range(len(sx) - 1, -1, -1): if kepid[i] != 0 and kepmag[i] != 0.0: size = max( np.array([ 80.0, 80.0 + (2.5**(18.0 - max(12.0, float(kepmag[i])))) * 250.0 ])) pl.scatter(sx[i], sy[i], s=size, facecolors='g', edgecolors='k', alpha=0.1) else: pl.scatter(sx[i], sy[i], s=80, facecolors='r', edgecolors='k', alpha=0.1) # binned model plotimage(imgfit_pl, zminfl, zmaxfl, 3, row, column, xdim, ydim, 0.07, 0.18, 'fit', cmap, lcolor, crowd=Crowding) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, acolor, '--', 0.5) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, acolor, '-', 3.0) # residuals reslim = max(np.abs(zminre), np.abs(zmaxre)) plotimage(imgres_pl, -reslim, reslim, 4, row, column, xdim, ydim, 0.445, 0.18, 'residual', 'coolwarm', lcolor) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, acolor, '--', 0.5) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, acolor, '-', 3.0) # plot data color bar barwin = pl.axes([0.84, 0.18, 0.03, 0.8]) if imscale == 'linear': brange = np.arange(zminfl, zmaxfl, (zmaxfl - zminfl) / 1000) elif imscale == 'logarithmic': brange = np.arange(10.0**zminfl, 10.0**zmaxfl, (10.0**zmaxfl - 10.0**zminfl) / 1000) elif imscale == 'squareroot': brange = np.arange(zminfl**2, zmaxfl**2, (zmaxfl**2 - zminfl**2) / 1000) if imscale == 'linear': barimg = np.resize(brange, (1000, 1)) elif imscale == 'logarithmic': barimg = np.log10(np.resize(brange, (1000, 1))) elif imscale == 'squareroot': barimg = np.sqrt(np.resize(brange, (1000, 1))) try: nrm = len(str(int(np.nanmax(brange)))) - 1 except: nrm = 0 brange = brange / 10**nrm pl.imshow(barimg, aspect='auto', interpolation='nearest', origin='lower', vmin=np.nanmin(barimg), vmax=np.nanmax(barimg), extent=(0.0, 1.0, brange[0], brange[-1]), cmap=cmap) barwin.yaxis.tick_right() barwin.yaxis.set_label_position('right') barwin.yaxis.set_major_locator(MaxNLocator(7)) pl.gca().yaxis.set_major_formatter(pl.ScalarFormatter(useOffset=False)) pl.gca().set_autoscale_on(False) pl.setp(pl.gca(), xticklabels=[], xticks=[]) pl.ylabel('Flux (10$^%d$ e$^-$ s$^{-1}$)' % nrm) pl.setp(barwin.get_yticklabels(), 'rotation', 90) barwin.yaxis.set_major_formatter(FormatStrFormatter('%.1f')) # plot residual color bar barwin = pl.axes([0.07, 0.08, 0.75, 0.03]) brange = np.arange(-reslim, reslim, reslim / 500) barimg = np.resize(brange, (1, 1000)) pl.imshow(barimg, aspect='auto', interpolation='nearest', origin='lower', vmin=np.nanmin(barimg), vmax=np.nanmax(barimg), extent=(brange[0], brange[-1], 0.0, 1.0), cmap='coolwarm') barwin.xaxis.set_major_locator(MaxNLocator(7)) pl.gca().xaxis.set_major_formatter(pl.ScalarFormatter(useOffset=False)) pl.gca().set_autoscale_on(False) pl.setp(pl.gca(), yticklabels=[], yticks=[]) pl.xlabel('Residuals (e$^-$ s$^{-1}$)') barwin.xaxis.set_major_formatter(FormatStrFormatter('%.1f')) # render plot pl.show(block=True) pl.close() # stop time kepmsg.clock('\nKEPPRF ended at', logfile, True) return Crowding
def __init__(self, infile, rownum=0, imscale='linear', cmap='YlOrBr', lcolor='k', acolor='b', query=True, logfile='kepcrowd.log', **kwargs): self.colrow = [] self.fluxes = [] self._text = [] # hide warnings np.seterr(all="ignore") # test log file logfile = kepmsg.test(logfile) # info hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, False) call = 'KEPFIELD -- ' call += 'infile=' + infile + ' ' call += 'rownum=' + str(rownum) kepmsg.log(logfile, call + '\n', False) try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile, 'TIME', logfile, False) except: message = 'ERROR -- KEPFIELD: is %s a Target Pixel File? ' % infile kepmsg.err(logfile, message, False) return "", "", "", None kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,False) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'rownumNO',logfile,False) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,False) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,False) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,False) # read mask defintion data from TPF file maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition( infile, logfile, False) # observed or simulated data? coa = False instr = pyfits.open(infile, mode='readonly', memmap=True) filever, status = kepkey.get(infile, instr[0], 'FILEVER', logfile, False) if filever == 'COA': coa = True # is this a good row with finite timestamp and pixels? if not np.isfinite(barytime[rownum - 1]) or not np.nansum( fluxpixels[rownum - 1, :]): message = 'ERROR -- KEPFIELD: Row ' + str( rownum) + ' is a bad quality timestamp' kepmsg.err(logfile, message, True) return "", "", "", None # construct input pixel image flux = fluxpixels[rownum - 1, :] # image scale and intensity limits of pixel data flux_pl, zminfl, zmaxfl = kepplot.intScale1D(flux, imscale) n = 0 imgflux_pl = np.empty((ydim + 2, xdim + 2)) for i in range(ydim + 2): for j in range(xdim + 2): imgflux_pl[i, j] = np.nan for i in range(ydim): for j in range(xdim): imgflux_pl[i + 1, j + 1] = flux_pl[n] n += 1 # cone search around target coordinates using the MAST target search form dr = max([ydim + 2, xdim + 2]) * 4.0 kepid, ra, dec, kepmag = MASTRADec(float(ra), float(dec), dr, query, logfile) # convert celestial coordinates to detector coordinates sx = np.array([]) sy = np.array([]) inf, status = kepio.openfits(infile, 'readonly', logfile, False) try: crpix1, crpix2, crval1, crval2, cdelt1, cdelt2, pc, status = \ kepkey.getWCSs(infile,inf['APERTURE'],logfile,False) crpix1p, crpix2p, crval1p, crval2p, cdelt1p, cdelt2p, status = \ kepkey.getWCSp(infile,inf['APERTURE'],logfile,False) for i in range(len(kepid)): dra = (ra[i] - crval1) * np.cos(np.radians(dec[i])) / cdelt1 ddec = (dec[i] - crval2) / cdelt2 if coa: sx = np.append( sx, -(pc[0, 0] * dra + pc[0, 1] * ddec) + crpix1 + crval1p - 1.0) else: sx = np.append( sx, pc[0, 0] * dra + pc[0, 1] * ddec + crpix1 + crval1p - 1.0) sy = np.append( sy, pc[1, 0] * dra + pc[1, 1] * ddec + crpix2 + crval2p - 1.0) except: message = 'ERROR -- KEPFIELD: Non-compliant WCS information within file %s' % infile kepmsg.err(logfile, message, True) return "", "", "", None # plot self.fig = pl.figure(figsize=[10, 10]) pl.clf() # pixel limits of the subimage ymin = np.copy(float(row)) ymax = ymin + ydim xmin = np.copy(float(column)) xmax = xmin + xdim # plot limits for flux image ymin = float(ymin) - 1.5 ymax = float(ymax) + 0.5 xmin = float(xmin) - 1.5 xmax = float(xmax) + 0.5 # plot the image window ax = pl.axes([0.1, 0.11, 0.88, 0.82]) pl.title('Select sources for fitting (KOI first)', fontsize=24) pl.imshow(imgflux_pl, aspect='auto', interpolation='nearest', origin='lower', vmin=zminfl, vmax=zmaxfl, extent=(xmin, xmax, ymin, ymax), cmap=cmap) pl.gca().set_autoscale_on(False) labels = ax.get_yticklabels() pl.setp(labels, 'rotation', 90) pl.gca().xaxis.set_major_formatter(pl.ScalarFormatter(useOffset=False)) pl.gca().yaxis.set_major_formatter(pl.ScalarFormatter(useOffset=False)) pl.xlabel('Pixel Column Number', {'color': 'k'}, fontsize=24) pl.ylabel('Pixel Row Number', {'color': 'k'}, fontsize=24) # plot mask borders kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, lcolor, '--', 0.5) # plot aperture borders kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, lcolor, '-', 4.0) # list sources with open(logfile, 'a') as lf: print('Column Row RA J2000 Dec J2000 Kp Kepler ID', file=lf) print('----------------------------------------------------', file=lf) for i in range(len(sx) - 1, -1, -1): if sx[i] >= xmin and sx[i] < xmax and sy[i] >= ymin and sy[ i] < ymax: if kepid[i] != 0 and kepmag[i] != 0.0: print('%6.1f %6.1f %9.5f %8.5f %5.2f KIC %d' % \ (float(sx[i]),float(sy[i]),float(ra[i]),float(dec[i]),float(kepmag[i]),int(kepid[i])), file = lf) elif kepid[i] != 0 and kepmag[i] == 0.0: print('%6.1f %6.1f %9.5f %8.5f KIC %d' % \ (float(sx[i]),float(sy[i]),float(ra[i]),float(dec[i]),int(kepid[i])), file = lf) else: print('%6.1f %6.1f %9.5f %8.5f' % (float( sx[i]), float(sy[i]), float(ra[i]), float(dec[i])), file=lf) # plot sources for i in range(len(sx) - 1, -1, -1): if kepid[i] != 0 and kepmag[i] != 0.0: size = max( np.array([ 80.0, 80.0 + (2.5**(18.0 - max(12.0, float(kepmag[i])))) * 250.0 ])) pl.scatter(sx[i], sy[i], s=size, facecolors='g', edgecolors='k', alpha=0.4) else: pl.scatter(sx[i], sy[i], s=80, facecolors='r', edgecolors='k', alpha=0.4) # Sizes for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(16) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(16) # render plot and activate source selection self.srcinfo = [kepid, sx, sy, kepmag] pl.connect('button_release_event', self.on_mouse_release) pl.show(block=True) pl.close()
def kepprfphot(infile,outroot,columns,rows,fluxes,border,background,focus,prfdir,ranges, tolerance,ftolerance,qualflags,plt,clobber,verbose,logfile,status,cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPPRFPHOT -- ' call += 'infile='+infile+' ' call += 'outroot='+outroot+' ' call += 'columns='+columns+' ' call += 'rows='+rows+' ' call += 'fluxes='+fluxes+' ' call += 'border='+str(border)+' ' bground = 'n' if (background): bground = 'y' call += 'background='+bground+' ' focs = 'n' if (focus): focs = 'y' call += 'focus='+focs+' ' call += 'prfdir='+prfdir+' ' call += 'ranges='+ranges+' ' call += 'xtol='+str(tolerance)+' ' call += 'ftol='+str(ftolerance)+' ' quality = 'n' if (qualflags): quality = 'y' call += 'qualflags='+quality+' ' plotit = 'n' if (plt): plotit = 'y' call += 'plot='+plotit+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # test log file logfile = kepmsg.test(logfile) # start time kepmsg.clock('KEPPRFPHOT started at',logfile,verbose) # number of sources if status == 0: work = fluxes.strip() work = re.sub(' ',',',work) work = re.sub(';',',',work) nsrc = len(work.split(',')) # construct inital guess vector for fit if status == 0: guess = [] try: f = fluxes.strip().split(',') x = columns.strip().split(',') y = rows.strip().split(',') for i in range(len(f)): f[i] = float(f[i]) except: f = fluxes x = columns y = rows nsrc = len(f) for i in range(nsrc): try: guess.append(float(f[i])) except: message = 'ERROR -- KEPPRF: Fluxes must be floating point numbers' status = kepmsg.err(logfile,message,verbose) if status == 0: if len(x) != nsrc or len(y) != nsrc: message = 'ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and ' message += 'fluxes must have the same number of sources' status = kepmsg.err(logfile,message,verbose) if status == 0: for i in range(nsrc): try: guess.append(float(x[i])) except: message = 'ERROR -- KEPPRF: Columns must be floating point numbers' status = kepmsg.err(logfile,message,verbose) if status == 0: for i in range(nsrc): try: guess.append(float(y[i])) except: message = 'ERROR -- KEPPRF: Rows must be floating point numbers' status = kepmsg.err(logfile,message,verbose) if status == 0 and background: if border == 0: guess.append(0.0) else: for i in range((border+1)*2): guess.append(0.0) if status == 0 and focus: guess.append(1.0); guess.append(1.0); guess.append(0.0) # clobber output file for i in range(nsrc): outfile = '%s_%d.fits' % (outroot, i) if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPPRFPHOT: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile,message,verbose) # open TPF FITS file if status == 0: try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) except: message = 'ERROR -- KEPPRFPHOT: is %s a Target Pixel File? ' % infile status = kepmsg.err(logfile,message,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, poscorr1, status = \ kepio.readTPF(infile,'POS_CORR1',logfile,verbose) if status != 0: poscorr1 = numpy.zeros((len(barytime)),dtype='float32') poscorr1[:] = numpy.nan status = 0 if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, poscorr2, status = \ kepio.readTPF(infile,'POS_CORR2',logfile,verbose) if status != 0: poscorr2 = numpy.zeros((len(barytime)),dtype='float32') poscorr2[:] = numpy.nan status = 0 if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) if status == 0: struct, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(struct,infile,logfile,verbose,status) # input file keywords and mask map if status == 0: cards0 = struct[0].header.cards cards1 = struct[1].header.cards cards2 = struct[2].header.cards maskmap = copy(struct[2].data) npix = numpy.size(numpy.nonzero(maskmap)[0]) # print target data if status == 0 and verbose: print('') print((' KepID: %s' % kepid)) print((' RA (J2000): %s' % ra)) print(('Dec (J2000): %s' % dec)) print((' KepMag: %s' % kepmag)) print((' SkyGroup: %2s' % skygroup)) print((' Season: %2s' % str(season))) print((' Channel: %2s' % channel)) print((' Module: %2s' % module)) print((' Output: %1s' % output)) print('') # determine suitable PRF calibration file if status == 0: if int(module) < 10: prefix = 'kplr0' else: prefix = 'kplr' prfglob = prfdir + '/' + prefix + str(module) + '.' + str(output) + '*' + '_prf.fits' try: prffile = glob.glob(prfglob)[0] except: message = 'ERROR -- KEPPRFPHOT: No PRF file found in ' + prfdir status = kepmsg.err(logfile,message,verbose) # read PRF images if status == 0: prfn = [0,0,0,0,0] crpix1p = numpy.zeros((5),dtype='float32') crpix2p = numpy.zeros((5),dtype='float32') crval1p = numpy.zeros((5),dtype='float32') crval2p = numpy.zeros((5),dtype='float32') cdelt1p = numpy.zeros((5),dtype='float32') cdelt2p = numpy.zeros((5),dtype='float32') for i in range(5): prfn[i], crpix1p[i], crpix2p[i], crval1p[i], crval2p[i], cdelt1p[i], cdelt2p[i], status \ = kepio.readPRFimage(prffile,i+1,logfile,verbose) PRFx = arange(0.5,shape(prfn[0])[1]+0.5) PRFy = arange(0.5,shape(prfn[0])[0]+0.5) PRFx = (PRFx - size(PRFx) / 2) * cdelt1p[0] PRFy = (PRFy - size(PRFy) / 2) * cdelt2p[0] # interpolate the calibrated PRF shape to the target position if status == 0: prf = zeros(shape(prfn[0]),dtype='float32') prfWeight = zeros((5),dtype='float32') for i in range(5): prfWeight[i] = sqrt((column - crval1p[i])**2 + (row - crval2p[i])**2) if prfWeight[i] == 0.0: prfWeight[i] = 1.0e6 prf = prf + prfn[i] / prfWeight[i] prf = prf / nansum(prf) prf = prf / cdelt1p[0] / cdelt2p[0] # location of the data image centered on the PRF image (in PRF pixel units) if status == 0: prfDimY = ydim / cdelt1p[0] prfDimX = xdim / cdelt2p[0] PRFy0 = (shape(prf)[0] - prfDimY) / 2 PRFx0 = (shape(prf)[1] - prfDimX) / 2 # construct input pixel image if status == 0: DATx = arange(column,column+xdim) DATy = arange(row,row+ydim) # interpolation function over the PRF if status == 0: splineInterpolation = scipy.interpolate.RectBivariateSpline(PRFx,PRFy,prf,kx=3,ky=3) # construct mesh for background model if status == 0: bx = numpy.arange(1.,float(xdim+1)) by = numpy.arange(1.,float(ydim+1)) xx, yy = numpy.meshgrid(numpy.linspace(bx.min(), bx.max(), xdim), numpy.linspace(by.min(), by.max(), ydim)) # Get time ranges for new photometry, flag good data if status == 0: barytime += bjdref tstart,tstop,status = kepio.timeranges(ranges,logfile,verbose) incl = numpy.zeros((len(barytime)),dtype='int') for rownum in range(len(barytime)): for winnum in range(len(tstart)): if barytime[rownum] >= tstart[winnum] and \ barytime[rownum] <= tstop[winnum] and \ (qual[rownum] == 0 or qualflags) and \ numpy.isfinite(barytime[rownum]) and \ numpy.isfinite(numpy.nansum(fluxpixels[rownum,:])): incl[rownum] = 1 if not numpy.in1d(1,incl): message = 'ERROR -- KEPPRFPHOT: No legal data within the range ' + ranges status = kepmsg.err(logfile,message,verbose) # filter out bad data if status == 0: n = 0 nincl = (incl == 1).sum() tim = zeros((nincl),'float64') tco = zeros((nincl),'float32') cad = zeros((nincl),'float32') flu = zeros((nincl,len(fluxpixels[0])),'float32') fer = zeros((nincl,len(fluxpixels[0])),'float32') pc1 = zeros((nincl),'float32') pc2 = zeros((nincl),'float32') qua = zeros((nincl),'float32') for rownum in range(len(barytime)): if incl[rownum] == 1: tim[n] = barytime[rownum] tco[n] = tcorr[rownum] cad[n] = cadno[rownum] flu[n,:] = fluxpixels[rownum] fer[n,:] = errpixels[rownum] pc1[n] = poscorr1[rownum] pc2[n] = poscorr2[rownum] qua[n] = qual[rownum] n += 1 barytime = tim * 1.0 tcorr = tco * 1.0 cadno = cad * 1.0 fluxpixels = flu * 1.0 errpixels = fer * 1.0 poscorr1 = pc1 * 1.0 poscorr2 = pc2 * 1.0 qual = qua * 1.0 # initialize plot arrays if status == 0: t = numpy.array([],dtype='float64') fl = []; dx = []; dy = []; bg = []; fx = []; fy = []; fa = []; rs = []; ch = [] for i in range(nsrc): fl.append(numpy.array([],dtype='float32')) dx.append(numpy.array([],dtype='float32')) dy.append(numpy.array([],dtype='float32')) # Preparing fit data message if status == 0: progress = numpy.arange(nincl) if verbose: txt = 'Preparing...' sys.stdout.write(txt) sys.stdout.flush() # single processor version if status == 0:# and not cmdLine: oldtime = 0.0 for rownum in range(numpy.min([80,len(barytime)])): try: if barytime[rownum] - oldtime > 0.5: ftol = 1.0e-10; xtol = 1.0e-10 except: pass args = (fluxpixels[rownum,:],errpixels[rownum,:],DATx,DATy,nsrc,border,xx,yy,PRFx,PRFy,splineInterpolation, guess,ftol,xtol,focus,background,rownum,80,float(x[i]),float(y[i]),False) guess = PRFfits(args) ftol = ftolerance; xtol = tolerance; oldtime = barytime[rownum] # Fit the time series: multi-processing if status == 0 and cmdLine: anslist = [] cad1 = 0; cad2 = 50 for i in range(int(nincl/50) + 1): try: fluxp = fluxpixels[cad1:cad2,:] errp = errpixels[cad1:cad2,:] progress = numpy.arange(cad1,cad2) except: fluxp = fluxpixels[cad1:nincl,:] errp = errpixels[cad1:nincl,:] progress = numpy.arange(cad1,nincl) try: args = zip(fluxp,errp,itertools.repeat(DATx),itertools.repeat(DATy), itertools.repeat(nsrc),itertools.repeat(border),itertools.repeat(xx), itertools.repeat(yy),itertools.repeat(PRFx),itertools.repeat(PRFy), itertools.repeat(splineInterpolation),itertools.repeat(guess), itertools.repeat(ftolerance),itertools.repeat(tolerance), itertools.repeat(focus),itertools.repeat(background),progress, itertools.repeat(numpy.arange(cad1,nincl)[-1]), itertools.repeat(float(x[0])), itertools.repeat(float(y[0])),itertools.repeat(True)) p = multiprocessing.Pool() model = [0.0] model = p.imap(PRFfits,args,chunksize=1) p.close() p.join() cad1 += 50; cad2 += 50 ans = array([array(item) for item in zip(*model)]) try: anslist = numpy.concatenate((anslist,ans.transpose()),axis=0) except: anslist = ans.transpose() guess = anslist[-1] ans = anslist.transpose() except: pass # single processor version if status == 0 and not cmdLine: oldtime = 0.0; ans = [] # for rownum in xrange(1,10): for rownum in range(nincl): proctime = time.time() try: if barytime[rownum] - oldtime > 0.5: ftol = 1.0e-10; xtol = 1.0e-10 except: pass args = (fluxpixels[rownum,:],errpixels[rownum,:],DATx,DATy,nsrc,border,xx,yy,PRFx,PRFy,splineInterpolation, guess,ftol,xtol,focus,background,rownum,nincl,float(x[0]),float(y[0]),True) guess = PRFfits(args) ans.append(guess) ftol = ftolerance; xtol = tolerance; oldtime = barytime[rownum] ans = array(ans).transpose() # unpack the best fit parameters if status == 0: flux = []; OBJx = []; OBJy = [] na = shape(ans)[1] for i in range(nsrc): flux.append(ans[i,:]) OBJx.append(ans[nsrc+i,:]) OBJy.append(ans[nsrc*2+i,:]) try: bterms = border + 1 if bterms == 1: b = ans[nsrc*3,:] else: b = array([]) bkg = [] for i in range(na): bcoeff = array([ans[nsrc*3:nsrc*3+bterms,i],ans[nsrc*3+bterms:nsrc*3+bterms*2,i]]) bkg.append(kepfunc.polyval2d(xx,yy,bcoeff)) b = numpy.append(b,nanmean(bkg[-1].reshape(bkg[-1].size))) except: b = zeros((na)) if focus: wx = ans[-3,:]; wy = ans[-2,:]; angle = ans[-1,:] else: wx = ones((na)); wy = ones((na)); angle = zeros((na)) # constuct model PRF in detector coordinates if status == 0: residual = []; chi2 = [] for i in range(na): f = empty((nsrc)) x = empty((nsrc)) y = empty((nsrc)) for j in range(nsrc): f[j] = flux[j][i] x[j] = OBJx[j][i] y[j] = OBJy[j][i] PRFfit = kepfunc.PRF2DET(f,x,y,DATx,DATy,wx[i],wy[i],angle[i],splineInterpolation) if background and bterms == 1: PRFfit = PRFfit + b[i] if background and bterms > 1: PRFfit = PRFfit + bkg[i] # calculate residual of DATA - FIT xdim = shape(xx)[1] ydim = shape(yy)[0] DATimg = numpy.empty((ydim,xdim)) n = 0 for k in range(ydim): for j in range(xdim): DATimg[k,j] = fluxpixels[i,n] n += 1 PRFres = DATimg - PRFfit residual.append(numpy.nansum(PRFres) / npix) # calculate the sum squared difference between data and model chi2.append(abs(numpy.nansum(numpy.square(DATimg - PRFfit) / PRFfit))) # load the output arrays if status == 0: otime = barytime - bjdref otimecorr = tcorr ocadenceno = cadno opos_corr1 = poscorr1 opos_corr2 = poscorr2 oquality = qual opsf_bkg = b opsf_focus1 = wx opsf_focus2 = wy opsf_rotation = angle opsf_residual = residual opsf_chi2 = chi2 opsf_flux_err = numpy.empty((na)); opsf_flux_err.fill(numpy.nan) opsf_centr1_err = numpy.empty((na)); opsf_centr1_err.fill(numpy.nan) opsf_centr2_err = numpy.empty((na)); opsf_centr2_err.fill(numpy.nan) opsf_bkg_err = numpy.empty((na)); opsf_bkg_err.fill(numpy.nan) opsf_flux = [] opsf_centr1 = [] opsf_centr2 = [] for i in range(nsrc): opsf_flux.append(flux[i]) opsf_centr1.append(OBJx[i]) opsf_centr2.append(OBJy[i]) # load the plot arrays if status == 0: t = barytime for i in range(nsrc): fl[i] = flux[i] dx[i] = OBJx[i] dy[i] = OBJy[i] bg = b fx = wx fy = wy fa = angle rs = residual ch = chi2 # construct output primary extension if status == 0: for j in range(nsrc): hdu0 = pyfits.PrimaryHDU() for i in range(len(cards0)): if cards0[i].key not in list(hdu0.header.keys()): hdu0.header.update(cards0[i].key, cards0[i].value, cards0[i].comment) else: hdu0.header.cards[cards0[i].key].comment = cards0[i].comment status = kepkey.history(call,hdu0,outfile,logfile,verbose) outstr = HDUList(hdu0) # construct output light curve extension col1 = Column(name='TIME',format='D',unit='BJD - 2454833',array=otime) col2 = Column(name='TIMECORR',format='E',unit='d',array=otimecorr) col3 = Column(name='CADENCENO',format='J',array=ocadenceno) col4 = Column(name='PSF_FLUX',format='E',unit='e-/s',array=opsf_flux[j]) col5 = Column(name='PSF_FLUX_ERR',format='E',unit='e-/s',array=opsf_flux_err) col6 = Column(name='PSF_BKG',format='E',unit='e-/s/pix',array=opsf_bkg) col7 = Column(name='PSF_BKG_ERR',format='E',unit='e-/s',array=opsf_bkg_err) col8 = Column(name='PSF_CENTR1',format='E',unit='pixel',array=opsf_centr1[j]) col9 = Column(name='PSF_CENTR1_ERR',format='E',unit='pixel',array=opsf_centr1_err) col10 = Column(name='PSF_CENTR2',format='E',unit='pixel',array=opsf_centr2[j]) col11 = Column(name='PSF_CENTR2_ERR',format='E',unit='pixel',array=opsf_centr2_err) col12 = Column(name='PSF_FOCUS1',format='E',array=opsf_focus1) col13 = Column(name='PSF_FOCUS2',format='E',array=opsf_focus2) col14 = Column(name='PSF_ROTATION',format='E',unit='deg',array=opsf_rotation) col15 = Column(name='PSF_RESIDUAL',format='E',unit='e-/s',array=opsf_residual) col16 = Column(name='PSF_CHI2',format='E',array=opsf_chi2) col17 = Column(name='POS_CORR1',format='E',unit='pixel',array=opos_corr1) col18 = Column(name='POS_CORR2',format='E',unit='pixel',array=opos_corr2) col19 = Column(name='SAP_QUALITY',format='J',array=oquality) cols = ColDefs([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11, col12,col13,col14,col15,col16,col17,col18,col19]) hdu1 = new_table(cols) for i in range(len(cards1)): if (cards1[i].key not in list(hdu1.header.keys()) and cards1[i].key[:4] not in ['TTYP','TFOR','TUNI','TDIS','TDIM','WCAX','1CTY', '2CTY','1CRP','2CRP','1CRV','2CRV','1CUN','2CUN', '1CDE','2CDE','1CTY','2CTY','1CDL','2CDL','11PC', '12PC','21PC','22PC']): hdu1.header.update(cards1[i].key, cards1[i].value, cards1[i].comment) outstr.append(hdu1) # construct output mask bitmap extension hdu2 = ImageHDU(maskmap) for i in range(len(cards2)): if cards2[i].key not in list(hdu2.header.keys()): hdu2.header.update(cards2[i].key, cards2[i].value, cards2[i].comment) else: hdu2.header.cards[cards2[i].key].comment = cards2[i].comment outstr.append(hdu2) # write output file outstr.writeto(outroot + '_' + str(j) + '.fits',checksum=True) # close input structure status = kepio.closefits(struct,logfile,verbose) # clean up x-axis unit if status == 0: barytime0 = float(int(t[0] / 100) * 100.0) t -= barytime0 t = numpy.insert(t,[0],[t[0]]) t = numpy.append(t,[t[-1]]) xlab = 'BJD $-$ %d' % barytime0 # plot the light curves if status == 0: bg = numpy.insert(bg,[0],[-1.0e10]) bg = numpy.append(bg,-1.0e10) fx = numpy.insert(fx,[0],[fx[0]]) fx = numpy.append(fx,fx[-1]) fy = numpy.insert(fy,[0],[fy[0]]) fy = numpy.append(fy,fy[-1]) fa = numpy.insert(fa,[0],[fa[0]]) fa = numpy.append(fa,fa[-1]) rs = numpy.insert(rs,[0],[-1.0e10]) rs = numpy.append(rs,-1.0e10) ch = numpy.insert(ch,[0],[-1.0e10]) ch = numpy.append(ch,-1.0e10) for i in range(nsrc): # clean up y-axis units nrm = math.ceil(math.log10(numpy.nanmax(fl[i]))) - 1.0 fl[i] /= 10**nrm if nrm == 0: ylab1 = 'e$^-$ s$^{-1}$' else: ylab1 = '10$^{%d}$ e$^-$ s$^{-1}$' % nrm xx = copy(dx[i]) yy = copy(dy[i]) ylab2 = 'offset (pixels)' # data limits xmin = numpy.nanmin(t) xmax = numpy.nanmax(t) ymin1 = numpy.nanmin(fl[i]) ymax1 = numpy.nanmax(fl[i]) ymin2 = numpy.nanmin(xx) ymax2 = numpy.nanmax(xx) ymin3 = numpy.nanmin(yy) ymax3 = numpy.nanmax(yy) ymin4 = numpy.nanmin(bg[1:-1]) ymax4 = numpy.nanmax(bg[1:-1]) ymin5 = numpy.nanmin([numpy.nanmin(fx),numpy.nanmin(fy)]) ymax5 = numpy.nanmax([numpy.nanmax(fx),numpy.nanmax(fy)]) ymin6 = numpy.nanmin(fa[1:-1]) ymax6 = numpy.nanmax(fa[1:-1]) ymin7 = numpy.nanmin(rs[1:-1]) ymax7 = numpy.nanmax(rs[1:-1]) ymin8 = numpy.nanmin(ch[1:-1]) ymax8 = numpy.nanmax(ch[1:-1]) xr = xmax - xmin yr1 = ymax1 - ymin1 yr2 = ymax2 - ymin2 yr3 = ymax3 - ymin3 yr4 = ymax4 - ymin4 yr5 = ymax5 - ymin5 yr6 = ymax6 - ymin6 yr7 = ymax7 - ymin7 yr8 = ymax8 - ymin8 fl[i] = numpy.insert(fl[i],[0],[0.0]) fl[i] = numpy.append(fl[i],0.0) # plot style try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 12, 'ytick.labelsize': 12} pylab.rcParams.update(params) except: pass # define size of plot on monitor screen pylab.figure(str(i+1) + ' ' + str(time.asctime(time.localtime())),figsize=[12,16]) # delete any fossil plots in the matplotlib window pylab.clf() # position first axes inside the plotting window ax = pylab.axes([0.11,0.523,0.78,0.45]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # no x-label pylab.setp(pylab.gca(),xticklabels=[]) # plot flux vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,fl[i][j]) else: pylab.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') pylab.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0) # plot the fill color below data time series, with no data gaps pylab.fill(t,fl[i],fc='#ffff00',linewidth=0.0,alpha=0.2) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin1 - yr1 * 0.01 <= 0.0: pylab.ylim(1.0e-10, ymax1 + yr1 * 0.01) else: pylab.ylim(ymin1 - yr1 * 0.01, ymax1 + yr1 * 0.01) # plot labels # pylab.xlabel(xlab, {'color' : 'k'}) try: pylab.ylabel('Source (' + ylab1 + ')', {'color' : 'k'}) except: ylab1 = '10**%d e-/s' % nrm pylab.ylabel('Source (' + ylab1 + ')', {'color' : 'k'}) # make grid on plot pylab.grid() # plot centroid tracks - position second axes inside the plotting window if focus and background: axs = [0.11,0.433,0.78,0.09] elif background or focus: axs = [0.11,0.388,0.78,0.135] else: axs = [0.11,0.253,0.78,0.27] ax1 = pylab.axes(axs) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.setp(pylab.gca(),xticklabels=[]) # plot dx vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,xx[j-1]) else: ax1.plot(ltime,ldata,color='r',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax1.plot(ltime,ldata,color='r',linestyle='-',linewidth=1.0) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin2 - yr2 * 0.03, ymax2 + yr2 * 0.03) # plot labels ax1.set_ylabel('X-' + ylab2, color='k', fontsize=11) # position second axes inside the plotting window ax2 = ax1.twinx() # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.setp(pylab.gca(),xticklabels=[]) # plot dy vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,yy[j-1]) else: ax2.plot(ltime,ldata,color='g',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax2.plot(ltime,ldata,color='g',linestyle='-',linewidth=1.0) # define plot y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin3 - yr3 * 0.03, ymax3 + yr3 * 0.03) # plot labels ax2.set_ylabel('Y-' + ylab2, color='k',fontsize=11) # background - position third axes inside the plotting window if background and focus: axs = [0.11,0.343,0.78,0.09] if background and not focus: axs = [0.11,0.253,0.78,0.135] if background: ax1 = pylab.axes(axs) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.setp(pylab.gca(),xticklabels=[]) # plot background vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,bg[j]) else: ax1.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax1.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0) # plot the fill color below data time series, with no data gaps pylab.fill(t,bg,fc='#ffff00',linewidth=0.0,alpha=0.2) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin4 - yr4 * 0.03, ymax4 + yr4 * 0.03) # plot labels ax1.set_ylabel('Background \n(e$^-$ s$^{-1}$ pix$^{-1}$)', multialignment='center', color='k',fontsize=11) # make grid on plot pylab.grid() # position focus axes inside the plotting window if focus and background: axs = [0.11,0.253,0.78,0.09] if focus and not background: axs = [0.11,0.253,0.78,0.135] if focus: ax1 = pylab.axes(axs) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.setp(pylab.gca(),xticklabels=[]) # plot x-axis PSF width vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,fx[j]) else: ax1.plot(ltime,ldata,color='r',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax1.plot(ltime,ldata,color='r',linestyle='-',linewidth=1.0) # plot y-axis PSF width vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,fy[j]) else: ax1.plot(ltime,ldata,color='g',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax1.plot(ltime,ldata,color='g',linestyle='-',linewidth=1.0) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin5 - yr5 * 0.03, ymax5 + yr5 * 0.03) # plot labels ax1.set_ylabel('Pixel Scale\nFactor', multialignment='center', color='k',fontsize=11) # Focus rotation - position second axes inside the plotting window ax2 = ax1.twinx() # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.setp(pylab.gca(),xticklabels=[]) # plot dy vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,fa[j]) else: ax2.plot(ltime,ldata,color='#000080',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax2.plot(ltime,ldata,color='#000080',linestyle='-',linewidth=1.0) # define plot y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin6 - yr6 * 0.03, ymax6 + yr6 * 0.03) # plot labels ax2.set_ylabel('Rotation (deg)', color='k',fontsize=11) # fit residuals - position fifth axes inside the plotting window axs = [0.11,0.163,0.78,0.09] ax1 = pylab.axes(axs) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.setp(pylab.gca(),xticklabels=[]) # plot residual vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,rs[j]) else: ax1.plot(ltime,ldata,color='b',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax1.plot(ltime,ldata,color='b',linestyle='-',linewidth=1.0) # plot the fill color below data time series, with no data gaps pylab.fill(t,rs,fc='#ffff00',linewidth=0.0,alpha=0.2) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin7 - yr7 * 0.03, ymax7 + yr7 * 0.03) # plot labels ax1.set_ylabel('Residual \n(e$^-$ s$^{-1}$)', multialignment='center', color='k',fontsize=11) # make grid on plot pylab.grid() # fit chi square - position sixth axes inside the plotting window axs = [0.11,0.073,0.78,0.09] ax1 = pylab.axes(axs) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # plot background vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,ch[j]) else: ax1.plot(ltime,ldata,color='b',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax1.plot(ltime,ldata,color='b',linestyle='-',linewidth=1.0) # plot the fill color below data time series, with no data gaps pylab.fill(t,ch,fc='#ffff00',linewidth=0.0,alpha=0.2) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin8 - yr8 * 0.03, ymax8 + yr8 * 0.03) # plot labels ax1.set_ylabel('$\chi^2$ (%d dof)' % (npix-len(guess)-1),color='k',fontsize=11) pylab.xlabel(xlab, {'color' : 'k'}) # make grid on plot pylab.grid() # render plot if status == 0: pylab.savefig(outroot + '_' + str(i) + '.png') if status == 0 and plt: if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # stop time kepmsg.clock('\n\nKEPPRFPHOT ended at',logfile,verbose) return
def kepprfphot(infile,outroot,columns,rows,fluxes,border,background,focus,prfdir,ranges, tolerance,ftolerance,qualflags,plt,clobber,verbose,logfile,status,cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPPRFPHOT -- ' call += 'infile='+infile+' ' call += 'outroot='+outroot+' ' call += 'columns='+columns+' ' call += 'rows='+rows+' ' call += 'fluxes='+fluxes+' ' call += 'border='+str(border)+' ' bground = 'n' if (background): bground = 'y' call += 'background='+bground+' ' focs = 'n' if (focus): focs = 'y' call += 'focus='+focs+' ' call += 'prfdir='+prfdir+' ' call += 'ranges='+ranges+' ' call += 'xtol='+str(tolerance)+' ' call += 'ftol='+str(ftolerance)+' ' quality = 'n' if (qualflags): quality = 'y' call += 'qualflags='+quality+' ' plotit = 'n' if (plt): plotit = 'y' call += 'plot='+plotit+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # test log file logfile = kepmsg.test(logfile) # start time kepmsg.clock('KEPPRFPHOT started at',logfile,verbose) # number of sources if status == 0: work = fluxes.strip() work = re.sub(' ',',',work) work = re.sub(';',',',work) nsrc = len(work.split(',')) # construct inital guess vector for fit if status == 0: guess = [] try: f = fluxes.strip().split(',') x = columns.strip().split(',') y = rows.strip().split(',') for i in xrange(len(f)): f[i] = float(f[i]) except: f = fluxes x = columns y = rows nsrc = len(f) for i in xrange(nsrc): try: guess.append(float(f[i])) except: message = 'ERROR -- KEPPRF: Fluxes must be floating point numbers' status = kepmsg.err(logfile,message,verbose) if status == 0: if len(x) != nsrc or len(y) != nsrc: message = 'ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and ' message += 'fluxes must have the same number of sources' status = kepmsg.err(logfile,message,verbose) if status == 0: for i in xrange(nsrc): try: guess.append(float(x[i])) except: message = 'ERROR -- KEPPRF: Columns must be floating point numbers' status = kepmsg.err(logfile,message,verbose) if status == 0: for i in xrange(nsrc): try: guess.append(float(y[i])) except: message = 'ERROR -- KEPPRF: Rows must be floating point numbers' status = kepmsg.err(logfile,message,verbose) if status == 0 and background: if border == 0: guess.append(0.0) else: for i in range((border+1)*2): guess.append(0.0) if status == 0 and focus: guess.append(1.0); guess.append(1.0); guess.append(0.0) # clobber output file for i in range(nsrc): outfile = '%s_%d.fits' % (outroot, i) if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPPRFPHOT: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile,message,verbose) # open TPF FITS file if status == 0: try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) except: message = 'ERROR -- KEPPRFPHOT: is %s a Target Pixel File? ' % infile status = kepmsg.err(logfile,message,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, poscorr1, status = \ kepio.readTPF(infile,'POS_CORR1',logfile,verbose) if status != 0: poscorr1 = numpy.zeros((len(barytime)),dtype='float32') poscorr1[:] = numpy.nan status = 0 if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, poscorr2, status = \ kepio.readTPF(infile,'POS_CORR2',logfile,verbose) if status != 0: poscorr2 = numpy.zeros((len(barytime)),dtype='float32') poscorr2[:] = numpy.nan status = 0 if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) if status == 0: struct, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(struct,infile,logfile,verbose,status) # input file keywords and mask map if status == 0: cards0 = struct[0].header.cards cards1 = struct[1].header.cards cards2 = struct[2].header.cards maskmap = copy(struct[2].data) npix = numpy.size(numpy.nonzero(maskmap)[0]) # print target data if status == 0 and verbose: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # determine suitable PRF calibration file if status == 0: if int(module) < 10: prefix = 'kplr0' else: prefix = 'kplr' prfglob = prfdir + '/' + prefix + str(module) + '.' + str(output) + '*' + '_prf.fits' try: prffile = glob.glob(prfglob)[0] except: message = 'ERROR -- KEPPRFPHOT: No PRF file found in ' + prfdir status = kepmsg.err(logfile,message,verbose) # read PRF images if status == 0: prfn = [0,0,0,0,0] crpix1p = numpy.zeros((5),dtype='float32') crpix2p = numpy.zeros((5),dtype='float32') crval1p = numpy.zeros((5),dtype='float32') crval2p = numpy.zeros((5),dtype='float32') cdelt1p = numpy.zeros((5),dtype='float32') cdelt2p = numpy.zeros((5),dtype='float32') for i in range(5): prfn[i], crpix1p[i], crpix2p[i], crval1p[i], crval2p[i], cdelt1p[i], cdelt2p[i], status \ = kepio.readPRFimage(prffile,i+1,logfile,verbose) PRFx = arange(0.5,shape(prfn[0])[1]+0.5) PRFy = arange(0.5,shape(prfn[0])[0]+0.5) PRFx = (PRFx - size(PRFx) / 2) * cdelt1p[0] PRFy = (PRFy - size(PRFy) / 2) * cdelt2p[0] # interpolate the calibrated PRF shape to the target position if status == 0: prf = zeros(shape(prfn[0]),dtype='float32') prfWeight = zeros((5),dtype='float32') for i in xrange(5): prfWeight[i] = sqrt((column - crval1p[i])**2 + (row - crval2p[i])**2) if prfWeight[i] == 0.0: prfWeight[i] = 1.0e6 prf = prf + prfn[i] / prfWeight[i] prf = prf / nansum(prf) prf = prf / cdelt1p[0] / cdelt2p[0] # location of the data image centered on the PRF image (in PRF pixel units) if status == 0: prfDimY = ydim / cdelt1p[0] prfDimX = xdim / cdelt2p[0] PRFy0 = (shape(prf)[0] - prfDimY) / 2 PRFx0 = (shape(prf)[1] - prfDimX) / 2 # construct input pixel image if status == 0: DATx = arange(column,column+xdim) DATy = arange(row,row+ydim) # interpolation function over the PRF if status == 0: splineInterpolation = scipy.interpolate.RectBivariateSpline(PRFx,PRFy,prf,kx=3,ky=3) # construct mesh for background model if status == 0: bx = numpy.arange(1.,float(xdim+1)) by = numpy.arange(1.,float(ydim+1)) xx, yy = numpy.meshgrid(numpy.linspace(bx.min(), bx.max(), xdim), numpy.linspace(by.min(), by.max(), ydim)) # Get time ranges for new photometry, flag good data if status == 0: barytime += bjdref tstart,tstop,status = kepio.timeranges(ranges,logfile,verbose) incl = numpy.zeros((len(barytime)),dtype='int') for rownum in xrange(len(barytime)): for winnum in xrange(len(tstart)): if barytime[rownum] >= tstart[winnum] and \ barytime[rownum] <= tstop[winnum] and \ (qual[rownum] == 0 or qualflags) and \ numpy.isfinite(barytime[rownum]) and \ numpy.isfinite(numpy.nansum(fluxpixels[rownum,:])): incl[rownum] = 1 if not numpy.in1d(1,incl): message = 'ERROR -- KEPPRFPHOT: No legal data within the range ' + ranges status = kepmsg.err(logfile,message,verbose) # filter out bad data if status == 0: n = 0 nincl = (incl == 1).sum() tim = zeros((nincl),'float64') tco = zeros((nincl),'float32') cad = zeros((nincl),'float32') flu = zeros((nincl,len(fluxpixels[0])),'float32') fer = zeros((nincl,len(fluxpixels[0])),'float32') pc1 = zeros((nincl),'float32') pc2 = zeros((nincl),'float32') qua = zeros((nincl),'float32') for rownum in xrange(len(barytime)): if incl[rownum] == 1: tim[n] = barytime[rownum] tco[n] = tcorr[rownum] cad[n] = cadno[rownum] flu[n,:] = fluxpixels[rownum] fer[n,:] = errpixels[rownum] pc1[n] = poscorr1[rownum] pc2[n] = poscorr2[rownum] qua[n] = qual[rownum] n += 1 barytime = tim * 1.0 tcorr = tco * 1.0 cadno = cad * 1.0 fluxpixels = flu * 1.0 errpixels = fer * 1.0 poscorr1 = pc1 * 1.0 poscorr2 = pc2 * 1.0 qual = qua * 1.0 # initialize plot arrays if status == 0: t = numpy.array([],dtype='float64') fl = []; dx = []; dy = []; bg = []; fx = []; fy = []; fa = []; rs = []; ch = [] for i in range(nsrc): fl.append(numpy.array([],dtype='float32')) dx.append(numpy.array([],dtype='float32')) dy.append(numpy.array([],dtype='float32')) # Preparing fit data message if status == 0: progress = numpy.arange(nincl) if verbose: txt = 'Preparing...' sys.stdout.write(txt) sys.stdout.flush() # single processor version if status == 0:# and not cmdLine: oldtime = 0.0 for rownum in xrange(numpy.min([80,len(barytime)])): try: if barytime[rownum] - oldtime > 0.5: ftol = 1.0e-10; xtol = 1.0e-10 except: pass args = (fluxpixels[rownum,:],errpixels[rownum,:],DATx,DATy,nsrc,border,xx,yy,PRFx,PRFy,splineInterpolation, guess,ftol,xtol,focus,background,rownum,80,float(x[i]),float(y[i]),False) guess = PRFfits(args) ftol = ftolerance; xtol = tolerance; oldtime = barytime[rownum] # Fit the time series: multi-processing if status == 0 and cmdLine: anslist = [] cad1 = 0; cad2 = 50 for i in range(int(nincl/50) + 1): try: fluxp = fluxpixels[cad1:cad2,:] errp = errpixels[cad1:cad2,:] progress = numpy.arange(cad1,cad2) except: fluxp = fluxpixels[cad1:nincl,:] errp = errpixels[cad1:nincl,:] progress = numpy.arange(cad1,nincl) try: args = itertools.izip(fluxp,errp,itertools.repeat(DATx),itertools.repeat(DATy), itertools.repeat(nsrc),itertools.repeat(border),itertools.repeat(xx), itertools.repeat(yy),itertools.repeat(PRFx),itertools.repeat(PRFy), itertools.repeat(splineInterpolation),itertools.repeat(guess), itertools.repeat(ftolerance),itertools.repeat(tolerance), itertools.repeat(focus),itertools.repeat(background),progress, itertools.repeat(numpy.arange(cad1,nincl)[-1]), itertools.repeat(float(x[0])), itertools.repeat(float(y[0])),itertools.repeat(True)) p = multiprocessing.Pool() model = [0.0] model = p.imap(PRFfits,args,chunksize=1) p.close() p.join() cad1 += 50; cad2 += 50 ans = array([array(item) for item in zip(*model)]) try: anslist = numpy.concatenate((anslist,ans.transpose()),axis=0) except: anslist = ans.transpose() guess = anslist[-1] ans = anslist.transpose() except: pass # single processor version if status == 0 and not cmdLine: oldtime = 0.0; ans = [] # for rownum in xrange(1,10): for rownum in xrange(nincl): proctime = time.time() try: if barytime[rownum] - oldtime > 0.5: ftol = 1.0e-10; xtol = 1.0e-10 except: pass args = (fluxpixels[rownum,:],errpixels[rownum,:],DATx,DATy,nsrc,border,xx,yy,PRFx,PRFy,splineInterpolation, guess,ftol,xtol,focus,background,rownum,nincl,float(x[0]),float(y[0]),True) guess = PRFfits(args) ans.append(guess) ftol = ftolerance; xtol = tolerance; oldtime = barytime[rownum] ans = array(ans).transpose() # unpack the best fit parameters if status == 0: flux = []; OBJx = []; OBJy = [] na = shape(ans)[1] for i in range(nsrc): flux.append(ans[i,:]) OBJx.append(ans[nsrc+i,:]) OBJy.append(ans[nsrc*2+i,:]) try: bterms = border + 1 if bterms == 1: b = ans[nsrc*3,:] else: b = array([]) bkg = [] for i in range(na): bcoeff = array([ans[nsrc*3:nsrc*3+bterms,i],ans[nsrc*3+bterms:nsrc*3+bterms*2,i]]) bkg.append(kepfunc.polyval2d(xx,yy,bcoeff)) b = numpy.append(b,nanmean(bkg[-1].reshape(bkg[-1].size))) except: b = zeros((na)) if focus: wx = ans[-3,:]; wy = ans[-2,:]; angle = ans[-1,:] else: wx = ones((na)); wy = ones((na)); angle = zeros((na)) # constuct model PRF in detector coordinates if status == 0: residual = []; chi2 = [] for i in range(na): f = empty((nsrc)) x = empty((nsrc)) y = empty((nsrc)) for j in range(nsrc): f[j] = flux[j][i] x[j] = OBJx[j][i] y[j] = OBJy[j][i] PRFfit = kepfunc.PRF2DET(f,x,y,DATx,DATy,wx[i],wy[i],angle[i],splineInterpolation) if background and bterms == 1: PRFfit = PRFfit + b[i] if background and bterms > 1: PRFfit = PRFfit + bkg[i] # calculate residual of DATA - FIT xdim = shape(xx)[1] ydim = shape(yy)[0] DATimg = numpy.empty((ydim,xdim)) n = 0 for k in range(ydim): for j in range(xdim): DATimg[k,j] = fluxpixels[i,n] n += 1 PRFres = DATimg - PRFfit residual.append(numpy.nansum(PRFres) / npix) # calculate the sum squared difference between data and model chi2.append(abs(numpy.nansum(numpy.square(DATimg - PRFfit) / PRFfit))) # load the output arrays if status == 0: otime = barytime - bjdref otimecorr = tcorr ocadenceno = cadno opos_corr1 = poscorr1 opos_corr2 = poscorr2 oquality = qual opsf_bkg = b opsf_focus1 = wx opsf_focus2 = wy opsf_rotation = angle opsf_residual = residual opsf_chi2 = chi2 opsf_flux_err = numpy.empty((na)); opsf_flux_err.fill(numpy.nan) opsf_centr1_err = numpy.empty((na)); opsf_centr1_err.fill(numpy.nan) opsf_centr2_err = numpy.empty((na)); opsf_centr2_err.fill(numpy.nan) opsf_bkg_err = numpy.empty((na)); opsf_bkg_err.fill(numpy.nan) opsf_flux = [] opsf_centr1 = [] opsf_centr2 = [] for i in range(nsrc): opsf_flux.append(flux[i]) opsf_centr1.append(OBJx[i]) opsf_centr2.append(OBJy[i]) # load the plot arrays if status == 0: t = barytime for i in range(nsrc): fl[i] = flux[i] dx[i] = OBJx[i] dy[i] = OBJy[i] bg = b fx = wx fy = wy fa = angle rs = residual ch = chi2 # construct output primary extension if status == 0: for j in range(nsrc): hdu0 = pyfits.PrimaryHDU() for i in range(len(cards0)): if cards0[i].key not in hdu0.header.keys(): hdu0.header.update(cards0[i].key, cards0[i].value, cards0[i].comment) else: hdu0.header.cards[cards0[i].key].comment = cards0[i].comment status = kepkey.history(call,hdu0,outfile,logfile,verbose) outstr = HDUList(hdu0) # construct output light curve extension col1 = Column(name='TIME',format='D',unit='BJD - 2454833',array=otime) col2 = Column(name='TIMECORR',format='E',unit='d',array=otimecorr) col3 = Column(name='CADENCENO',format='J',array=ocadenceno) col4 = Column(name='PSF_FLUX',format='E',unit='e-/s',array=opsf_flux[j]) col5 = Column(name='PSF_FLUX_ERR',format='E',unit='e-/s',array=opsf_flux_err) col6 = Column(name='PSF_BKG',format='E',unit='e-/s/pix',array=opsf_bkg) col7 = Column(name='PSF_BKG_ERR',format='E',unit='e-/s',array=opsf_bkg_err) col8 = Column(name='PSF_CENTR1',format='E',unit='pixel',array=opsf_centr1[j]) col9 = Column(name='PSF_CENTR1_ERR',format='E',unit='pixel',array=opsf_centr1_err) col10 = Column(name='PSF_CENTR2',format='E',unit='pixel',array=opsf_centr2[j]) col11 = Column(name='PSF_CENTR2_ERR',format='E',unit='pixel',array=opsf_centr2_err) col12 = Column(name='PSF_FOCUS1',format='E',array=opsf_focus1) col13 = Column(name='PSF_FOCUS2',format='E',array=opsf_focus2) col14 = Column(name='PSF_ROTATION',format='E',unit='deg',array=opsf_rotation) col15 = Column(name='PSF_RESIDUAL',format='E',unit='e-/s',array=opsf_residual) col16 = Column(name='PSF_CHI2',format='E',array=opsf_chi2) col17 = Column(name='POS_CORR1',format='E',unit='pixel',array=opos_corr1) col18 = Column(name='POS_CORR2',format='E',unit='pixel',array=opos_corr2) col19 = Column(name='SAP_QUALITY',format='J',array=oquality) cols = ColDefs([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11, col12,col13,col14,col15,col16,col17,col18,col19]) hdu1 = new_table(cols) for i in range(len(cards1)): if (cards1[i].key not in hdu1.header.keys() and cards1[i].key[:4] not in ['TTYP','TFOR','TUNI','TDIS','TDIM','WCAX','1CTY', '2CTY','1CRP','2CRP','1CRV','2CRV','1CUN','2CUN', '1CDE','2CDE','1CTY','2CTY','1CDL','2CDL','11PC', '12PC','21PC','22PC']): hdu1.header.update(cards1[i].key, cards1[i].value, cards1[i].comment) outstr.append(hdu1) # construct output mask bitmap extension hdu2 = ImageHDU(maskmap) for i in range(len(cards2)): if cards2[i].key not in hdu2.header.keys(): hdu2.header.update(cards2[i].key, cards2[i].value, cards2[i].comment) else: hdu2.header.cards[cards2[i].key].comment = cards2[i].comment outstr.append(hdu2) # write output file outstr.writeto(outroot + '_' + str(j) + '.fits',checksum=True) # close input structure status = kepio.closefits(struct,logfile,verbose) # clean up x-axis unit if status == 0: barytime0 = float(int(t[0] / 100) * 100.0) t -= barytime0 t = numpy.insert(t,[0],[t[0]]) t = numpy.append(t,[t[-1]]) xlab = 'BJD $-$ %d' % barytime0 # plot the light curves if status == 0: bg = numpy.insert(bg,[0],[-1.0e10]) bg = numpy.append(bg,-1.0e10) fx = numpy.insert(fx,[0],[fx[0]]) fx = numpy.append(fx,fx[-1]) fy = numpy.insert(fy,[0],[fy[0]]) fy = numpy.append(fy,fy[-1]) fa = numpy.insert(fa,[0],[fa[0]]) fa = numpy.append(fa,fa[-1]) rs = numpy.insert(rs,[0],[-1.0e10]) rs = numpy.append(rs,-1.0e10) ch = numpy.insert(ch,[0],[-1.0e10]) ch = numpy.append(ch,-1.0e10) for i in range(nsrc): # clean up y-axis units nrm = math.ceil(math.log10(numpy.nanmax(fl[i]))) - 1.0 fl[i] /= 10**nrm if nrm == 0: ylab1 = 'e$^-$ s$^{-1}$' else: ylab1 = '10$^{%d}$ e$^-$ s$^{-1}$' % nrm xx = copy(dx[i]) yy = copy(dy[i]) ylab2 = 'offset (pixels)' # data limits xmin = numpy.nanmin(t) xmax = numpy.nanmax(t) ymin1 = numpy.nanmin(fl[i]) ymax1 = numpy.nanmax(fl[i]) ymin2 = numpy.nanmin(xx) ymax2 = numpy.nanmax(xx) ymin3 = numpy.nanmin(yy) ymax3 = numpy.nanmax(yy) ymin4 = numpy.nanmin(bg[1:-1]) ymax4 = numpy.nanmax(bg[1:-1]) ymin5 = numpy.nanmin([numpy.nanmin(fx),numpy.nanmin(fy)]) ymax5 = numpy.nanmax([numpy.nanmax(fx),numpy.nanmax(fy)]) ymin6 = numpy.nanmin(fa[1:-1]) ymax6 = numpy.nanmax(fa[1:-1]) ymin7 = numpy.nanmin(rs[1:-1]) ymax7 = numpy.nanmax(rs[1:-1]) ymin8 = numpy.nanmin(ch[1:-1]) ymax8 = numpy.nanmax(ch[1:-1]) xr = xmax - xmin yr1 = ymax1 - ymin1 yr2 = ymax2 - ymin2 yr3 = ymax3 - ymin3 yr4 = ymax4 - ymin4 yr5 = ymax5 - ymin5 yr6 = ymax6 - ymin6 yr7 = ymax7 - ymin7 yr8 = ymax8 - ymin8 fl[i] = numpy.insert(fl[i],[0],[0.0]) fl[i] = numpy.append(fl[i],0.0) # plot style try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 12, 'ytick.labelsize': 12} pylab.rcParams.update(params) except: pass # define size of plot on monitor screen pylab.figure(str(i+1) + ' ' + str(time.asctime(time.localtime())),figsize=[12,16]) # delete any fossil plots in the matplotlib window pylab.clf() # position first axes inside the plotting window ax = pylab.axes([0.11,0.523,0.78,0.45]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # no x-label pylab.setp(pylab.gca(),xticklabels=[]) # plot flux vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,fl[i][j]) else: pylab.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') pylab.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0) # plot the fill color below data time series, with no data gaps pylab.fill(t,fl[i],fc='#ffff00',linewidth=0.0,alpha=0.2) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin1 - yr1 * 0.01 <= 0.0: pylab.ylim(1.0e-10, ymax1 + yr1 * 0.01) else: pylab.ylim(ymin1 - yr1 * 0.01, ymax1 + yr1 * 0.01) # plot labels # pylab.xlabel(xlab, {'color' : 'k'}) try: pylab.ylabel('Source (' + ylab1 + ')', {'color' : 'k'}) except: ylab1 = '10**%d e-/s' % nrm pylab.ylabel('Source (' + ylab1 + ')', {'color' : 'k'}) # make grid on plot pylab.grid() # plot centroid tracks - position second axes inside the plotting window if focus and background: axs = [0.11,0.433,0.78,0.09] elif background or focus: axs = [0.11,0.388,0.78,0.135] else: axs = [0.11,0.253,0.78,0.27] ax1 = pylab.axes(axs) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.setp(pylab.gca(),xticklabels=[]) # plot dx vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,xx[j-1]) else: ax1.plot(ltime,ldata,color='r',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax1.plot(ltime,ldata,color='r',linestyle='-',linewidth=1.0) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin2 - yr2 * 0.03, ymax2 + yr2 * 0.03) # plot labels ax1.set_ylabel('X-' + ylab2, color='k', fontsize=11) # position second axes inside the plotting window ax2 = ax1.twinx() # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.setp(pylab.gca(),xticklabels=[]) # plot dy vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,yy[j-1]) else: ax2.plot(ltime,ldata,color='g',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax2.plot(ltime,ldata,color='g',linestyle='-',linewidth=1.0) # define plot y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin3 - yr3 * 0.03, ymax3 + yr3 * 0.03) # plot labels ax2.set_ylabel('Y-' + ylab2, color='k',fontsize=11) # background - position third axes inside the plotting window if background and focus: axs = [0.11,0.343,0.78,0.09] if background and not focus: axs = [0.11,0.253,0.78,0.135] if background: ax1 = pylab.axes(axs) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.setp(pylab.gca(),xticklabels=[]) # plot background vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,bg[j]) else: ax1.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax1.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0) # plot the fill color below data time series, with no data gaps pylab.fill(t,bg,fc='#ffff00',linewidth=0.0,alpha=0.2) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin4 - yr4 * 0.03, ymax4 + yr4 * 0.03) # plot labels ax1.set_ylabel('Background \n(e$^-$ s$^{-1}$ pix$^{-1}$)', multialignment='center', color='k',fontsize=11) # make grid on plot pylab.grid() # position focus axes inside the plotting window if focus and background: axs = [0.11,0.253,0.78,0.09] if focus and not background: axs = [0.11,0.253,0.78,0.135] if focus: ax1 = pylab.axes(axs) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.setp(pylab.gca(),xticklabels=[]) # plot x-axis PSF width vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,fx[j]) else: ax1.plot(ltime,ldata,color='r',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax1.plot(ltime,ldata,color='r',linestyle='-',linewidth=1.0) # plot y-axis PSF width vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,fy[j]) else: ax1.plot(ltime,ldata,color='g',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax1.plot(ltime,ldata,color='g',linestyle='-',linewidth=1.0) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin5 - yr5 * 0.03, ymax5 + yr5 * 0.03) # plot labels ax1.set_ylabel('Pixel Scale\nFactor', multialignment='center', color='k',fontsize=11) # Focus rotation - position second axes inside the plotting window ax2 = ax1.twinx() # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.setp(pylab.gca(),xticklabels=[]) # plot dy vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,fa[j]) else: ax2.plot(ltime,ldata,color='#000080',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax2.plot(ltime,ldata,color='#000080',linestyle='-',linewidth=1.0) # define plot y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin6 - yr6 * 0.03, ymax6 + yr6 * 0.03) # plot labels ax2.set_ylabel('Rotation (deg)', color='k',fontsize=11) # fit residuals - position fifth axes inside the plotting window axs = [0.11,0.163,0.78,0.09] ax1 = pylab.axes(axs) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.setp(pylab.gca(),xticklabels=[]) # plot residual vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,rs[j]) else: ax1.plot(ltime,ldata,color='b',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax1.plot(ltime,ldata,color='b',linestyle='-',linewidth=1.0) # plot the fill color below data time series, with no data gaps pylab.fill(t,rs,fc='#ffff00',linewidth=0.0,alpha=0.2) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin7 - yr7 * 0.03, ymax7 + yr7 * 0.03) # plot labels ax1.set_ylabel('Residual \n(e$^-$ s$^{-1}$)', multialignment='center', color='k',fontsize=11) # make grid on plot pylab.grid() # fit chi square - position sixth axes inside the plotting window axs = [0.11,0.073,0.78,0.09] ax1 = pylab.axes(axs) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # plot background vs time ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = numpy.append(ltime,t[j]) ldata = numpy.append(ldata,ch[j]) else: ax1.plot(ltime,ldata,color='b',linestyle='-',linewidth=1.0) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') ax1.plot(ltime,ldata,color='b',linestyle='-',linewidth=1.0) # plot the fill color below data time series, with no data gaps pylab.fill(t,ch,fc='#ffff00',linewidth=0.0,alpha=0.2) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) pylab.ylim(ymin8 - yr8 * 0.03, ymax8 + yr8 * 0.03) # plot labels ax1.set_ylabel('$\chi^2$ (%d dof)' % (npix-len(guess)-1),color='k',fontsize=11) pylab.xlabel(xlab, {'color' : 'k'}) # make grid on plot pylab.grid() # render plot if status == 0: pylab.savefig(outroot + '_' + str(i) + '.png') if status == 0 and plt: if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # stop time kepmsg.clock('\n\nKEPPRFPHOT ended at',logfile,verbose) return
def kepitermask(infile, outfile, plotfile, column, row, timescale, nsig, stepsize, winsize, npoly, niter, clobber, verbose, logfile, status, cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPITERMASK -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'plotfile=' + plotfile + ' ' call += 'column=' + str(column) + ' ' call += 'row=' + str(row) + ' ' call += 'timescale=' + str(timescale) + ' ' call += 'nsig=' + str(nsig) + ' ' call += 'stepsize=' + str(stepsize) + ' ' call += 'winsize=' + str(winsize) + ' ' call += 'npoly=' + str(npoly) + ' ' call += 'niter=' + str(niter) + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPITERMASK started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPITERMASK: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile, message, verbose) # open TPF FITS file if status == 0: try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, co, ro, kepmag, xdim, ydim, work1, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) except: message = 'ERROR -- KEPPRF: is %s a Target Pixel File? ' % infile status = kepmsg.err(logfile, message, verbose) # print target data if status == 0: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # read mask defintion data from TPF file if status == 0: maskmap, pixcoordx, pixcoordy, status = kepio.readMaskDefinition( infile, logfile, verbose) pixcoordx = rot90(pixcoordx) pixcoordy = flipud(rot90(pixcoordy)) maskmap[:, :] = 0.0 # which pixel does the target reside on? if status == 0: x = where(pixcoordx == float(column))[1][0] y = where(pixcoordy == float(row))[0][0] maskmap[y, x] = 1.0 # read time series data if status == 0: instr = pyfits.open(infile, mode='readonly', memmap=True) work1 = instr[1].data.field('TIME')[:] work2 = instr[1].data.field('FLUX')[:] work3 = instr[1].data.field('QUALITY')[:] # how many quality = 0 rows? if status == 0: npts = 0 nrows = len(work1) for i in range(nrows): if work3[i] == 0 and numpy.isfinite(work1[i]): npts += 1 time = empty((npts)) flux = empty((npts, ydim, xdim)) quality = empty((npts)) # construct pixel light curves from quality = 0 data if status == 0: n = 0 for i in range(nrows): if work3[i] == 0 and numpy.isfinite(work1[i]): time[n] = work1[i] flux[n] = work2[i, :, :] quality[n] = work3[i] n += 1 # light curves from central pixel if status == 0: (pr, pc) = where(maskmap == 1.0) best_lc = flux[:, pr[0], pc[0]] # calculate median CDPP if status == 0: best_median_cdpp, best_cdpp, status = \ GetCDPP(time,best_lc,npoly,nsig,niter,winsize,stepsize, timescale,logfile,verbose,status) # does another pixel improve CDPP of the target? if status == 0: trial_med = best_median_cdpp # while best_median_cdpp == trial_med: for i in range(70): trial_lc, trial_cdpp, trial_med, xpix, ypix, status = \ AddPixelToAperture(time,flux,maskmap,best_lc,npoly,nsig,niter,winsize, stepsize,timescale,logfile,verbose) # if trial_med < best_median_cdpp: if trial_med < 1e10: best_lc = trial_lc best_cdpp = trial_cdpp best_median_cdpp = trial_med maskmap[ypix, xpix] = 1.0 print maskmap print i, best_median_cdpp # plot style if status == 0: try: params = { 'backend': 'png', 'axes.linewidth': 2.0, 'axes.labelsize': 32, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 12, 'ytick.labelsize': 12 } pylab.rcParams.update(params) except: pass # tmp pylab.plot(time, best_lc, color='#0000ff', linestyle='-', linewidth=1.0) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() if plotfile.lower() != 'none': pylab.savefig(plotfile) # stop time if status == 0: kepmsg.clock('KEPITERMASK ended at', logfile, verbose) return
def keppixseries(infile, outfile, plotfile, plottype, filter, function, cutoff, clobber, verbose, logfile, status, cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPPIXSERIES -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'plotfile=' + plotfile + ' ' call += 'plottype=' + plottype + ' ' filt = 'n' if (filter): filt = 'y' call += 'filter=' + filt + ' ' call += 'function=' + function + ' ' call += 'cutoff=' + str(cutoff) + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPPIXSERIES started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPPIXSERIES: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile, message, verbose) # open TPF FITS file if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition( infile, logfile, verbose) # print target data if status == 0: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # how many quality = 0 rows? if status == 0: npts = 0 nrows = len(fluxpixels) for i in range(nrows): if qual[i] == 0 and \ numpy.isfinite(barytime[i]) and \ numpy.isfinite(fluxpixels[i,ydim*xdim/2]): npts += 1 time = empty((npts)) timecorr = empty((npts)) cadenceno = empty((npts)) quality = empty((npts)) pixseries = empty((ydim, xdim, npts)) errseries = empty((ydim, xdim, npts)) # construct output light curves if status == 0: np = 0 for i in range(ydim): for j in range(xdim): npts = 0 for k in range(nrows): if qual[k] == 0 and \ numpy.isfinite(barytime[k]) and \ numpy.isfinite(fluxpixels[k,ydim*xdim/2]): time[npts] = barytime[k] timecorr[npts] = tcorr[k] cadenceno[npts] = cadno[k] quality[npts] = qual[k] pixseries[i, j, npts] = fluxpixels[k, np] errseries[i, j, npts] = errpixels[k, np] npts += 1 np += 1 # define data sampling if status == 0 and filter: tpf, status = kepio.openfits(infile, 'readonly', logfile, verbose) if status == 0 and filter: cadence, status = kepkey.cadence(tpf[1], infile, logfile, verbose) tr = 1.0 / (cadence / 86400) timescale = 1.0 / (cutoff / tr) # define convolution function if status == 0 and filter: if function == 'boxcar': filtfunc = numpy.ones(numpy.ceil(timescale)) elif function == 'gauss': timescale /= 2 dx = numpy.ceil(timescale * 10 + 1) filtfunc = kepfunc.gauss() filtfunc = filtfunc([1.0, dx / 2 - 1.0, timescale], linspace(0, dx - 1, dx)) elif function == 'sinc': dx = numpy.ceil(timescale * 12 + 1) fx = linspace(0, dx - 1, dx) fx = fx - dx / 2 + 0.5 fx /= timescale filtfunc = numpy.sinc(fx) filtfunc /= numpy.sum(filtfunc) # pad time series at both ends with noise model if status == 0 and filter: for i in range(ydim): for j in range(xdim): ave, sigma = kepstat.stdev(pixseries[i, j, :len(filtfunc)]) padded = numpy.append(kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma), pixseries[i,j,:]) ave, sigma = kepstat.stdev(pixseries[i, j, -len(filtfunc):]) padded = numpy.append(padded, kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma)) # convolve data if status == 0: convolved = convolve(padded, filtfunc, 'same') # remove padding from the output array if status == 0: outdata = convolved[len(filtfunc):-len(filtfunc)] # subtract low frequencies if status == 0: outmedian = median(outdata) pixseries[i, j, :] = pixseries[i, j, :] - outdata + outmedian # construct output file if status == 0 and ydim * xdim < 1000: instruct, status = kepio.openfits(infile, 'readonly', logfile, verbose) status = kepkey.history(call, instruct[0], outfile, logfile, verbose) hdulist = HDUList(instruct[0]) cols = [] cols.append( Column(name='TIME', format='D', unit='BJD - 2454833', disp='D12.7', array=time)) cols.append( Column(name='TIMECORR', format='E', unit='d', disp='E13.6', array=timecorr)) cols.append( Column(name='CADENCENO', format='J', disp='I10', array=cadenceno)) cols.append(Column(name='QUALITY', format='J', array=quality)) for i in range(ydim): for j in range(xdim): colname = 'COL%d_ROW%d' % (i + column, j + row) cols.append( Column(name=colname, format='E', disp='E13.6', array=pixseries[i, j, :])) hdu1 = new_table(ColDefs(cols)) try: hdu1.header.update('INHERIT', True, 'inherit the primary header') except: status = 0 try: hdu1.header.update('EXTNAME', 'PIXELSERIES', 'name of extension') except: status = 0 try: hdu1.header.update( 'EXTVER', instruct[1].header['EXTVER'], 'extension version number (not format version)') except: status = 0 try: hdu1.header.update('TELESCOP', instruct[1].header['TELESCOP'], 'telescope') except: status = 0 try: hdu1.header.update('INSTRUME', instruct[1].header['INSTRUME'], 'detector type') except: status = 0 try: hdu1.header.update('OBJECT', instruct[1].header['OBJECT'], 'string version of KEPLERID') except: status = 0 try: hdu1.header.update('KEPLERID', instruct[1].header['KEPLERID'], 'unique Kepler target identifier') except: status = 0 try: hdu1.header.update('RADESYS', instruct[1].header['RADESYS'], 'reference frame of celestial coordinates') except: status = 0 try: hdu1.header.update('RA_OBJ', instruct[1].header['RA_OBJ'], '[deg] right ascension from KIC') except: status = 0 try: hdu1.header.update('DEC_OBJ', instruct[1].header['DEC_OBJ'], '[deg] declination from KIC') except: status = 0 try: hdu1.header.update('EQUINOX', instruct[1].header['EQUINOX'], 'equinox of celestial coordinate system') except: status = 0 try: hdu1.header.update('TIMEREF', instruct[1].header['TIMEREF'], 'barycentric correction applied to times') except: status = 0 try: hdu1.header.update('TASSIGN', instruct[1].header['TASSIGN'], 'where time is assigned') except: status = 0 try: hdu1.header.update('TIMESYS', instruct[1].header['TIMESYS'], 'time system is barycentric JD') except: status = 0 try: hdu1.header.update('BJDREFI', instruct[1].header['BJDREFI'], 'integer part of BJD reference date') except: status = 0 try: hdu1.header.update('BJDREFF', instruct[1].header['BJDREFF'], 'fraction of the day in BJD reference date') except: status = 0 try: hdu1.header.update('TIMEUNIT', instruct[1].header['TIMEUNIT'], 'time unit for TIME, TSTART and TSTOP') except: status = 0 try: hdu1.header.update('TSTART', instruct[1].header['TSTART'], 'observation start time in BJD-BJDREF') except: status = 0 try: hdu1.header.update('TSTOP', instruct[1].header['TSTOP'], 'observation stop time in BJD-BJDREF') except: status = 0 try: hdu1.header.update('LC_START', instruct[1].header['LC_START'], 'mid point of first cadence in MJD') except: status = 0 try: hdu1.header.update('LC_END', instruct[1].header['LC_END'], 'mid point of last cadence in MJD') except: status = 0 try: hdu1.header.update('TELAPSE', instruct[1].header['TELAPSE'], '[d] TSTOP - TSTART') except: status = 0 try: hdu1.header.update('LIVETIME', instruct[1].header['LIVETIME'], '[d] TELAPSE multiplied by DEADC') except: status = 0 try: hdu1.header.update('EXPOSURE', instruct[1].header['EXPOSURE'], '[d] time on source') except: status = 0 try: hdu1.header.update('DEADC', instruct[1].header['DEADC'], 'deadtime correction') except: status = 0 try: hdu1.header.update('TIMEPIXR', instruct[1].header['TIMEPIXR'], 'bin time beginning=0 middle=0.5 end=1') except: status = 0 try: hdu1.header.update('TIERRELA', instruct[1].header['TIERRELA'], '[d] relative time error') except: status = 0 try: hdu1.header.update('TIERABSO', instruct[1].header['TIERABSO'], '[d] absolute time error') except: status = 0 try: hdu1.header.update('INT_TIME', instruct[1].header['INT_TIME'], '[s] photon accumulation time per frame') except: status = 0 try: hdu1.header.update('READTIME', instruct[1].header['READTIME'], '[s] readout time per frame') except: status = 0 try: hdu1.header.update('FRAMETIM', instruct[1].header['FRAMETIM'], '[s] frame time (INT_TIME + READTIME)') except: status = 0 try: hdu1.header.update('NUM_FRM', instruct[1].header['NUM_FRM'], 'number of frames per time stamp') except: status = 0 try: hdu1.header.update('TIMEDEL', instruct[1].header['TIMEDEL'], '[d] time resolution of data') except: status = 0 try: hdu1.header.update('DATE-OBS', instruct[1].header['DATE-OBS'], 'TSTART as UTC calendar date') except: status = 0 try: hdu1.header.update('DATE-END', instruct[1].header['DATE-END'], 'TSTOP as UTC calendar date') except: status = 0 try: hdu1.header.update('BACKAPP', instruct[1].header['BACKAPP'], 'background is subtracted') except: status = 0 try: hdu1.header.update('DEADAPP', instruct[1].header['DEADAPP'], 'deadtime applied') except: status = 0 try: hdu1.header.update('VIGNAPP', instruct[1].header['VIGNAPP'], 'vignetting or collimator correction applied') except: status = 0 try: hdu1.header.update('GAIN', instruct[1].header['GAIN'], '[electrons/count] channel gain') except: status = 0 try: hdu1.header.update('READNOIS', instruct[1].header['READNOIS'], '[electrons] read noise') except: status = 0 try: hdu1.header.update('NREADOUT', instruct[1].header['NREADOUT'], 'number of read per cadence') except: status = 0 try: hdu1.header.update('TIMSLICE', instruct[1].header['TIMSLICE'], 'time-slice readout sequence section') except: status = 0 try: hdu1.header.update('MEANBLCK', instruct[1].header['MEANBLCK'], '[count] FSW mean black level') except: status = 0 hdulist.append(hdu1) hdulist.writeto(outfile) status = kepkey.new('EXTNAME', 'APERTURE', 'name of extension', instruct[2], outfile, logfile, verbose) pyfits.append(outfile, instruct[2].data, instruct[2].header) status = kepio.closefits(instruct, logfile, verbose) else: message = 'WARNING -- KEPPIXSERIES: output FITS file requires > 999 columns. Non-compliant with FITS convention.' kepmsg.warn(logfile, message) # plot style if status == 0: try: params = { 'backend': 'png', 'axes.linewidth': 2.0, 'axes.labelsize': 32, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 12, 'ytick.labelsize': 12 } pylab.rcParams.update(params) except: pass # plot pixel array fmin = 1.0e33 fmax = -1.033 if status == 0: pylab.figure(num=None, figsize=[12, 12]) pylab.clf() dx = 0.93 / xdim dy = 0.94 / ydim ax = pylab.axes([0.06, 0.05, 0.93, 0.94]) pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().xaxis.set_major_locator( matplotlib.ticker.MaxNLocator(integer=True)) pylab.gca().yaxis.set_major_locator( matplotlib.ticker.MaxNLocator(integer=True)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.xlim(numpy.min(pixcoord1) - 0.5, numpy.max(pixcoord1) + 0.5) pylab.ylim(numpy.min(pixcoord2) - 0.5, numpy.max(pixcoord2) + 0.5) pylab.xlabel('time', {'color': 'k'}) pylab.ylabel('arbitrary flux', {'color': 'k'}) for i in range(ydim): for j in range(xdim): tmin = amin(time) tmax = amax(time) try: numpy.isfinite(amin(pixseries[i, j, :])) numpy.isfinite(amin(pixseries[i, j, :])) fmin = amin(pixseries[i, j, :]) fmax = amax(pixseries[i, j, :]) except: ugh = 1 xmin = tmin - (tmax - tmin) / 40 xmax = tmax + (tmax - tmin) / 40 ymin = fmin - (fmax - fmin) / 20 ymax = fmax + (fmax - fmin) / 20 if kepstat.bitInBitmap(maskimg[i, j], 2): pylab.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy], axisbg='lightslategray') elif maskimg[i, j] == 0: pylab.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy], axisbg='black') else: pylab.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy]) if j == int(xdim / 2) and i == 0: pylab.setp(pylab.gca(), xticklabels=[], yticklabels=[]) elif j == 0 and i == int(ydim / 2): pylab.setp(pylab.gca(), xticklabels=[], yticklabels=[]) else: pylab.setp(pylab.gca(), xticklabels=[], yticklabels=[]) ptime = time * 1.0 ptime = numpy.insert(ptime, [0], ptime[0]) ptime = numpy.append(ptime, ptime[-1]) pflux = pixseries[i, j, :] * 1.0 pflux = numpy.insert(pflux, [0], -1000.0) pflux = numpy.append(pflux, -1000.0) pylab.plot(time, pixseries[i, j, :], color='#0000ff', linestyle='-', linewidth=0.5) if not kepstat.bitInBitmap(maskimg[i, j], 2): pylab.fill(ptime, pflux, fc='lightslategray', linewidth=0.0, alpha=1.0) pylab.fill(ptime, pflux, fc='#FFF380', linewidth=0.0, alpha=1.0) if 'loc' in plottype: pylab.xlim(xmin, xmax) pylab.ylim(ymin, ymax) if 'glob' in plottype: pylab.xlim(xmin, xmax) pylab.ylim(1.0e-10, numpy.nanmax(pixseries) * 1.05) if 'full' in plottype: pylab.xlim(xmin, xmax) pylab.ylim(1.0e-10, ymax * 1.05) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() if plotfile.lower() != 'none': pylab.savefig(plotfile) # stop time if status == 0: kepmsg.clock('KEPPIXSERIES ended at', logfile, verbose) return
def kepdiffim(infile,outfile,plotfile,imscale,colmap,filter,function,cutoff,clobber,verbose,logfile,status,cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPDIFFIM -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'plotfile='+plotfile+' ' call += 'imscale='+imscale+' ' call += 'colmap='+colmap+' ' filt = 'n' if (filter): filt = 'y' call += 'filter='+filt+ ' ' call += 'function='+function+' ' call += 'cutoff='+str(cutoff)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPDIFFIM started at: ',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPDIFFIM: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile,message,verbose) # reference color map if colmap == 'browse': status = cmap_plot() # open TPF FITS file if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition(infile,logfile,verbose) # print target data if status == 0: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # how many quality = 0 rows? if status == 0: npts = 0 nrows = len(fluxpixels) for i in range(nrows): if qual[i] == 0 and \ numpy.isfinite(barytime[i]) and \ numpy.isfinite(fluxpixels[i,ydim*xdim/2]): npts += 1 time = empty((npts)) timecorr = empty((npts)) cadenceno = empty((npts)) quality = empty((npts)) pixseries = empty((ydim*xdim,npts)) errseries = empty((ydim*xdim,npts)) # construct output light curves if status == 0: np = 0 for i in range(ydim*xdim): npts = 0 for k in range(nrows): if qual[k] == 0 and \ numpy.isfinite(barytime[k]) and \ numpy.isfinite(fluxpixels[k,ydim*xdim/2]): time[npts] = barytime[k] timecorr[npts] = tcorr[k] cadenceno[npts] = cadno[k] quality[npts] = qual[k] pixseries[i,npts] = fluxpixels[k,np] errseries[i,npts] = errpixels[k,np] npts += 1 np += 1 # define data sampling if status == 0 and filter: tpf, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0 and filter: cadence, status = kepkey.cadence(tpf[1],infile,logfile,verbose) tr = 1.0 / (cadence / 86400) timescale = 1.0 / (cutoff / tr) # define convolution function if status == 0 and filter: if function == 'boxcar': filtfunc = numpy.ones(numpy.ceil(timescale)) elif function == 'gauss': timescale /= 2 dx = numpy.ceil(timescale * 10 + 1) filtfunc = kepfunc.gauss() filtfunc = filtfunc([1.0,dx/2-1.0,timescale],linspace(0,dx-1,dx)) elif function == 'sinc': dx = numpy.ceil(timescale * 12 + 1) fx = linspace(0,dx-1,dx) fx = fx - dx / 2 + 0.5 fx /= timescale filtfunc = numpy.sinc(fx) filtfunc /= numpy.sum(filtfunc) # pad time series at both ends with noise model if status == 0 and filter: for i in range(ydim*xdim): ave, sigma = kepstat.stdev(pixseries[i,:len(filtfunc)]) padded = numpy.append(kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma), pixseries[i,:]) ave, sigma = kepstat.stdev(pixseries[i,-len(filtfunc):]) padded = numpy.append(padded, kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma)) # convolve data if status == 0: convolved = convolve(padded,filtfunc,'same') # remove padding from the output array if status == 0: outdata = convolved[len(filtfunc):-len(filtfunc)] # subtract low frequencies if status == 0: outmedian = median(outdata) pixseries[i,:] = pixseries[i,:] - outdata + outmedian # sum pixels over cadence if status == 0: np = 0 nrows = len(fluxpixels) pixsum = zeros((ydim*xdim)) errsum = zeros((ydim*xdim)) for i in range(npts): if quality[i] == 0: pixsum += pixseries[:,i] errsum += errseries[:,i]**2 np += 1 pixsum /= np errsum = sqrt(errsum) / np # calculate standard deviation pixels if status == 0: pixvar = zeros((ydim*xdim)) for i in range(npts): if quality[i] == 0: pixvar += (pixsum - pixseries[:,i] / errseries[:,i])**2 pixvar = numpy.sqrt(pixvar) # median pixel errors if status == 0: errmed = empty((ydim*xdim)) for i in range(ydim*xdim): errmed[i] = numpy.median(errseries[:,i]) # calculate chi distribution pixels if status == 0: pixdev = zeros((ydim*xdim)) for i in range(npts): if quality[i] == 0: pixdev += ((pixsum - pixseries[:,i]) / pixsum)**2 pixdev = numpy.sqrt(pixdev) # pixdev = numpy.sqrt(pixvar) / errsum #errmed # image scale and intensity limits if status == 0: pixsum_pl, zminsum, zmaxsum = kepplot.intScale1D(pixsum,imscale) pixvar_pl, zminvar, zmaxvar = kepplot.intScale1D(pixvar,imscale) pixdev_pl, zmindev, zmaxdev = kepplot.intScale1D(pixdev,imscale) # construct output summed image if status == 0: imgsum = empty((ydim,xdim)) imgvar = empty((ydim,xdim)) imgdev = empty((ydim,xdim)) imgsum_pl = empty((ydim,xdim)) imgvar_pl = empty((ydim,xdim)) imgdev_pl = empty((ydim,xdim)) n = 0 for i in range(ydim): for j in range(xdim): imgsum[i,j] = pixsum[n] imgvar[i,j] = pixvar[n] imgdev[i,j] = pixdev[n] imgsum_pl[i,j] = pixsum_pl[n] imgvar_pl[i,j] = pixvar_pl[n] imgdev_pl[i,j] = pixdev_pl[n] n += 1 # construct output file if status == 0: instruct, status = kepio.openfits(infile,'readonly',logfile,verbose) status = kepkey.history(call,instruct[0],outfile,logfile,verbose) hdulist = HDUList(instruct[0]) hdulist.writeto(outfile) status = kepkey.new('EXTNAME','FLUX','name of extension',instruct[2],outfile,logfile,verbose) pyfits.append(outfile,imgsum,instruct[2].header) status = kepkey.new('EXTNAME','CHI','name of extension',instruct[2],outfile,logfile,verbose) pyfits.append(outfile,imgvar,instruct[2].header) status = kepkey.new('EXTNAME','STDDEV','name of extension',instruct[2],outfile,logfile,verbose) pyfits.append(outfile,imgdev,instruct[2].header) status = kepkey.new('EXTNAME','APERTURE','name of extension',instruct[2],outfile,logfile,verbose) pyfits.append(outfile,instruct[2].data,instruct[2].header) status = kepio.closefits(instruct,logfile,verbose) # pixel limits of the subimage if status == 0: ymin = row ymax = ymin + ydim xmin = column xmax = xmin + xdim # plot limits for summed image ymin = float(ymin) - 0.5 ymax = float(ymax) - 0.5 xmin = float(xmin) - 0.5 xmax = float(xmax) - 0.5 # plot style try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 10, 'ytick.labelsize': 10} pylab.rcParams.update(params) except: 'ERROR -- KEPDIFFIM: install latex for scientific plotting' status = 1 if status == 0: plotimage(imgsum_pl,imgvar_pl,imgdev_pl,zminsum,zminvar,zmindev, zmaxsum,zmaxvar,zmaxdev,xmin,xmax,ymin,ymax,colmap,plotfile,cmdLine) # stop time kepmsg.clock('KEPDIFFIM ended at: ',logfile,verbose) return
def keppixseries(infile,outfile,plotfile,plottype,filter,function,cutoff,clobber,verbose,logfile,status, cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPPIXSERIES -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'plotfile='+plotfile+' ' call += 'plottype='+plottype+' ' filt = 'n' if (filter): filt = 'y' call += 'filter='+filt+ ' ' call += 'function='+function+' ' call += 'cutoff='+str(cutoff)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPPIXSERIES started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPPIXSERIES: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile,message,verbose) # open TPF FITS file if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition(infile,logfile,verbose) # print target data if status == 0: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # how many quality = 0 rows? if status == 0: npts = 0 nrows = len(fluxpixels) for i in range(nrows): if qual[i] == 0 and \ numpy.isfinite(barytime[i]) and \ numpy.isfinite(fluxpixels[i,ydim*xdim/2]): npts += 1 time = empty((npts)) timecorr = empty((npts)) cadenceno = empty((npts)) quality = empty((npts)) pixseries = empty((ydim,xdim,npts)) errseries = empty((ydim,xdim,npts)) # construct output light curves if status == 0: np = 0 for i in range(ydim): for j in range(xdim): npts = 0 for k in range(nrows): if qual[k] == 0 and \ numpy.isfinite(barytime[k]) and \ numpy.isfinite(fluxpixels[k,ydim*xdim/2]): time[npts] = barytime[k] timecorr[npts] = tcorr[k] cadenceno[npts] = cadno[k] quality[npts] = qual[k] pixseries[i,j,npts] = fluxpixels[k,np] errseries[i,j,npts] = errpixels[k,np] npts += 1 np += 1 # define data sampling if status == 0 and filter: tpf, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0 and filter: cadence, status = kepkey.cadence(tpf[1],infile,logfile,verbose) tr = 1.0 / (cadence / 86400) timescale = 1.0 / (cutoff / tr) # define convolution function if status == 0 and filter: if function == 'boxcar': filtfunc = numpy.ones(numpy.ceil(timescale)) elif function == 'gauss': timescale /= 2 dx = numpy.ceil(timescale * 10 + 1) filtfunc = kepfunc.gauss() filtfunc = filtfunc([1.0,dx/2-1.0,timescale],linspace(0,dx-1,dx)) elif function == 'sinc': dx = numpy.ceil(timescale * 12 + 1) fx = linspace(0,dx-1,dx) fx = fx - dx / 2 + 0.5 fx /= timescale filtfunc = numpy.sinc(fx) filtfunc /= numpy.sum(filtfunc) # pad time series at both ends with noise model if status == 0 and filter: for i in range(ydim): for j in range(xdim): ave, sigma = kepstat.stdev(pixseries[i,j,:len(filtfunc)]) padded = numpy.append(kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma), pixseries[i,j,:]) ave, sigma = kepstat.stdev(pixseries[i,j,-len(filtfunc):]) padded = numpy.append(padded, kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma)) # convolve data if status == 0: convolved = convolve(padded,filtfunc,'same') # remove padding from the output array if status == 0: outdata = convolved[len(filtfunc):-len(filtfunc)] # subtract low frequencies if status == 0: outmedian = median(outdata) pixseries[i,j,:] = pixseries[i,j,:] - outdata + outmedian # construct output file if status == 0 and ydim*xdim < 1000: instruct, status = kepio.openfits(infile,'readonly',logfile,verbose) status = kepkey.history(call,instruct[0],outfile,logfile,verbose) hdulist = HDUList(instruct[0]) cols = [] cols.append(Column(name='TIME',format='D',unit='BJD - 2454833',disp='D12.7',array=time)) cols.append(Column(name='TIMECORR',format='E',unit='d',disp='E13.6',array=timecorr)) cols.append(Column(name='CADENCENO',format='J',disp='I10',array=cadenceno)) cols.append(Column(name='QUALITY',format='J',array=quality)) for i in range(ydim): for j in range(xdim): colname = 'COL%d_ROW%d' % (i+column,j+row) cols.append(Column(name=colname,format='E',disp='E13.6',array=pixseries[i,j,:])) hdu1 = new_table(ColDefs(cols)) try: hdu1.header.update('INHERIT',True,'inherit the primary header') except: status = 0 try: hdu1.header.update('EXTNAME','PIXELSERIES','name of extension') except: status = 0 try: hdu1.header.update('EXTVER',instruct[1].header['EXTVER'],'extension version number (not format version)') except: status = 0 try: hdu1.header.update('TELESCOP',instruct[1].header['TELESCOP'],'telescope') except: status = 0 try: hdu1.header.update('INSTRUME',instruct[1].header['INSTRUME'],'detector type') except: status = 0 try: hdu1.header.update('OBJECT',instruct[1].header['OBJECT'],'string version of KEPLERID') except: status = 0 try: hdu1.header.update('KEPLERID',instruct[1].header['KEPLERID'],'unique Kepler target identifier') except: status = 0 try: hdu1.header.update('RADESYS',instruct[1].header['RADESYS'],'reference frame of celestial coordinates') except: status = 0 try: hdu1.header.update('RA_OBJ',instruct[1].header['RA_OBJ'],'[deg] right ascension from KIC') except: status = 0 try: hdu1.header.update('DEC_OBJ',instruct[1].header['DEC_OBJ'],'[deg] declination from KIC') except: status = 0 try: hdu1.header.update('EQUINOX',instruct[1].header['EQUINOX'],'equinox of celestial coordinate system') except: status = 0 try: hdu1.header.update('TIMEREF',instruct[1].header['TIMEREF'],'barycentric correction applied to times') except: status = 0 try: hdu1.header.update('TASSIGN',instruct[1].header['TASSIGN'],'where time is assigned') except: status = 0 try: hdu1.header.update('TIMESYS',instruct[1].header['TIMESYS'],'time system is barycentric JD') except: status = 0 try: hdu1.header.update('BJDREFI',instruct[1].header['BJDREFI'],'integer part of BJD reference date') except: status = 0 try: hdu1.header.update('BJDREFF',instruct[1].header['BJDREFF'],'fraction of the day in BJD reference date') except: status = 0 try: hdu1.header.update('TIMEUNIT',instruct[1].header['TIMEUNIT'],'time unit for TIME, TSTART and TSTOP') except: status = 0 try: hdu1.header.update('TSTART',instruct[1].header['TSTART'],'observation start time in BJD-BJDREF') except: status = 0 try: hdu1.header.update('TSTOP',instruct[1].header['TSTOP'],'observation stop time in BJD-BJDREF') except: status = 0 try: hdu1.header.update('LC_START',instruct[1].header['LC_START'],'mid point of first cadence in MJD') except: status = 0 try: hdu1.header.update('LC_END',instruct[1].header['LC_END'],'mid point of last cadence in MJD') except: status = 0 try: hdu1.header.update('TELAPSE',instruct[1].header['TELAPSE'],'[d] TSTOP - TSTART') except: status = 0 try: hdu1.header.update('LIVETIME',instruct[1].header['LIVETIME'],'[d] TELAPSE multiplied by DEADC') except: status = 0 try: hdu1.header.update('EXPOSURE',instruct[1].header['EXPOSURE'],'[d] time on source') except: status = 0 try: hdu1.header.update('DEADC',instruct[1].header['DEADC'],'deadtime correction') except: status = 0 try: hdu1.header.update('TIMEPIXR',instruct[1].header['TIMEPIXR'],'bin time beginning=0 middle=0.5 end=1') except: status = 0 try: hdu1.header.update('TIERRELA',instruct[1].header['TIERRELA'],'[d] relative time error') except: status = 0 try: hdu1.header.update('TIERABSO',instruct[1].header['TIERABSO'],'[d] absolute time error') except: status = 0 try: hdu1.header.update('INT_TIME',instruct[1].header['INT_TIME'],'[s] photon accumulation time per frame') except: status = 0 try: hdu1.header.update('READTIME',instruct[1].header['READTIME'],'[s] readout time per frame') except: status = 0 try: hdu1.header.update('FRAMETIM',instruct[1].header['FRAMETIM'],'[s] frame time (INT_TIME + READTIME)') except: status = 0 try: hdu1.header.update('NUM_FRM',instruct[1].header['NUM_FRM'],'number of frames per time stamp') except: status = 0 try: hdu1.header.update('TIMEDEL',instruct[1].header['TIMEDEL'],'[d] time resolution of data') except: status = 0 try: hdu1.header.update('DATE-OBS',instruct[1].header['DATE-OBS'],'TSTART as UTC calendar date') except: status = 0 try: hdu1.header.update('DATE-END',instruct[1].header['DATE-END'],'TSTOP as UTC calendar date') except: status = 0 try: hdu1.header.update('BACKAPP',instruct[1].header['BACKAPP'],'background is subtracted') except: status = 0 try: hdu1.header.update('DEADAPP',instruct[1].header['DEADAPP'],'deadtime applied') except: status = 0 try: hdu1.header.update('VIGNAPP',instruct[1].header['VIGNAPP'],'vignetting or collimator correction applied') except: status = 0 try: hdu1.header.update('GAIN',instruct[1].header['GAIN'],'[electrons/count] channel gain') except: status = 0 try: hdu1.header.update('READNOIS',instruct[1].header['READNOIS'],'[electrons] read noise') except: status = 0 try: hdu1.header.update('NREADOUT',instruct[1].header['NREADOUT'],'number of read per cadence') except: status = 0 try: hdu1.header.update('TIMSLICE',instruct[1].header['TIMSLICE'],'time-slice readout sequence section') except: status = 0 try: hdu1.header.update('MEANBLCK',instruct[1].header['MEANBLCK'],'[count] FSW mean black level') except: status = 0 hdulist.append(hdu1) hdulist.writeto(outfile) status = kepkey.new('EXTNAME','APERTURE','name of extension',instruct[2],outfile,logfile,verbose) pyfits.append(outfile,instruct[2].data,instruct[2].header) status = kepio.closefits(instruct,logfile,verbose) else: message = 'WARNING -- KEPPIXSERIES: output FITS file requires > 999 columns. Non-compliant with FITS convention.' kepmsg.warn(logfile,message) # plot style if status == 0: try: params = {'backend': 'png', 'axes.linewidth': 2.0, 'axes.labelsize': 32, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 12, 'ytick.labelsize': 12} pylab.rcParams.update(params) except: pass # plot pixel array fmin = 1.0e33 fmax = -1.033 if status == 0: pylab.figure(num=None,figsize=[12,12]) pylab.clf() dx = 0.93 / xdim dy = 0.94 / ydim ax = pylab.axes([0.06,0.05,0.93,0.94]) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True)) pylab.gca().yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.xlim(numpy.min(pixcoord1) - 0.5,numpy.max(pixcoord1) + 0.5) pylab.ylim(numpy.min(pixcoord2) - 0.5,numpy.max(pixcoord2) + 0.5) pylab.xlabel('time', {'color' : 'k'}) pylab.ylabel('arbitrary flux', {'color' : 'k'}) for i in range(ydim): for j in range(xdim): tmin = amin(time) tmax = amax(time) try: numpy.isfinite(amin(pixseries[i,j,:])) numpy.isfinite(amin(pixseries[i,j,:])) fmin = amin(pixseries[i,j,:]) fmax = amax(pixseries[i,j,:]) except: ugh = 1 xmin = tmin - (tmax - tmin) / 40 xmax = tmax + (tmax - tmin) / 40 ymin = fmin - (fmax - fmin) / 20 ymax = fmax + (fmax - fmin) / 20 if kepstat.bitInBitmap(maskimg[i,j],2): pylab.axes([0.06+float(j)*dx,0.05+i*dy,dx,dy],axisbg='lightslategray') elif maskimg[i,j] == 0: pylab.axes([0.06+float(j)*dx,0.05+i*dy,dx,dy],axisbg='black') else: pylab.axes([0.06+float(j)*dx,0.05+i*dy,dx,dy]) if j == int(xdim / 2) and i == 0: pylab.setp(pylab.gca(),xticklabels=[],yticklabels=[]) elif j == 0 and i == int(ydim / 2): pylab.setp(pylab.gca(),xticklabels=[],yticklabels=[]) else: pylab.setp(pylab.gca(),xticklabels=[],yticklabels=[]) ptime = time * 1.0 ptime = numpy.insert(ptime,[0],ptime[0]) ptime = numpy.append(ptime,ptime[-1]) pflux = pixseries[i,j,:] * 1.0 pflux = numpy.insert(pflux,[0],-1000.0) pflux = numpy.append(pflux,-1000.0) pylab.plot(time,pixseries[i,j,:],color='#0000ff',linestyle='-',linewidth=0.5) if not kepstat.bitInBitmap(maskimg[i,j],2): pylab.fill(ptime,pflux,fc='lightslategray',linewidth=0.0,alpha=1.0) pylab.fill(ptime,pflux,fc='#FFF380',linewidth=0.0,alpha=1.0) if 'loc' in plottype: pylab.xlim(xmin,xmax) pylab.ylim(ymin,ymax) if 'glob' in plottype: pylab.xlim(xmin,xmax) pylab.ylim(1.0e-10,numpy.nanmax(pixseries) * 1.05) if 'full' in plottype: pylab.xlim(xmin,xmax) pylab.ylim(1.0e-10,ymax * 1.05) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() if plotfile.lower() != 'none': pylab.savefig(plotfile) # stop time if status == 0: kepmsg.clock('KEPPIXSERIES ended at',logfile,verbose) return
def kephalophot(infile, outfile, plotfile, plottype, filter, function, cutoff, clobber, verbose, logfile, status, cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPHALOPHOT -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'plotfile=' + plotfile + ' ' call += 'plottype=' + plottype + ' ' filt = 'n' if (filter): filt = 'y' call += 'filter=' + filt + ' ' call += 'function=' + function + ' ' call += 'cutoff=' + str(cutoff) + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPHALOPHOT started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPHALOPHOT: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile, message, verbose) # open TPF FITS file if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition( infile, logfile, verbose) # print target data if status == 0: print('') print(' KepID: %s' % kepid) print(' RA (J2000): %s' % ra) print('Dec (J2000): %s' % dec) print(' KepMag: %s' % kepmag) print(' SkyGroup: %2s' % skygroup) print(' Season: %2s' % str(season)) print(' Channel: %2s' % channel) print(' Module: %2s' % module) print(' Output: %1s' % output) print('') # how many quality = 0 rows? how many pixels? if status == 0: np = ydim * xdim nrows = len(fluxpixels) npts = 0 for i in range(nrows): if qual[i] < 1e4 and \ numpy.isfinite(barytime[i]) and \ numpy.isfinite(fluxpixels[i,ydim*xdim/2]): npts += 1 time = empty((npts)) timecorr = empty((npts)) cadenceno = empty((npts)) quality = empty((npts)) pixseries = zeros((npts, np)) errseries = zeros((npts, np)) # pixseries = empty((ydim,xdim,npts)) # errseries = empty((ydim,xdim,npts)) # construct output light curves if status == 0: for i in range(np): npts = 0 for j in range(nrows): if qual[j] < 1e4 and \ numpy.isfinite(barytime[j]) and \ numpy.isfinite(fluxpixels[j,i]): time[npts] = barytime[j] timecorr[npts] = tcorr[j] cadenceno[npts] = cadno[j] quality[npts] = qual[j] pixseries[npts, i] = fluxpixels[j, i] errseries[npts, i] = errpixels[j, i] npts += 1 # define data sampling if status == 0 and filter: tpf, status = kepio.openfits(infile, 'readonly', logfile, verbose) if status == 0 and filter: cadence, status = kepkey.cadence(tpf[1], infile, logfile, verbose) tr = 1.0 / (cadence / 86400) timescale = 1.0 / (cutoff / tr) # define convolution function if status == 0 and filter: if function == 'boxcar': filtfunc = numpy.ones(numpy.ceil(timescale)) elif function == 'gauss': timescale /= 2 dx = numpy.ceil(timescale * 10 + 1) filtfunc = kepfunc.gauss() filtfunc = filtfunc([1.0, dx / 2 - 1.0, timescale], linspace(0, dx - 1, dx)) elif function == 'sinc': dx = numpy.ceil(timescale * 12 + 1) fx = linspace(0, dx - 1, dx) fx = fx - dx / 2 + 0.5 fx /= timescale filtfunc = numpy.sinc(fx) filtfunc /= numpy.sum(filtfunc) # pad time series at both ends with noise model if status == 0 and filter: for i in range(ydim): for j in range(xdim): ave, sigma = kepstat.stdev(pixseries[i, j, :len(filtfunc)]) padded = numpy.append(kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma), pixseries[i,j,:]) ave, sigma = kepstat.stdev(pixseries[i, j, -len(filtfunc):]) padded = numpy.append(padded, kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma)) # convolve data if status == 0: convolved = convolve(padded, filtfunc, 'same') # remove padding from the output array if status == 0: outdata = convolved[len(filtfunc):-len(filtfunc)] # subtract low frequencies if status == 0: outmedian = median(outdata) pixseries[i, j, :] = pixseries[i, j, :] - outdata + outmedian # construct weighted time series if status == 0: wgt = numpy.ones((np, 3)) twgt = numpy.ones((np, 3)) wgt /= sum(wgt, axis=0) satlvl = 0.8 * numpy.max(numpy.max(pixseries, axis=1)) brk1 = 9.7257203 brk2 = 45. ind1 = where(time - time[0] < brk1) ind2 = where((time - time[0] >= brk1) & (time - time[0] < brk2)) ind3 = where(time - time[0] >= brk2) z = numpy.array([0.0, 0.0, 0.0]) for i in range(np): if max(pixseries[ind1, i].flatten()) > satlvl or max( pixseries[ind1, i].flatten()) <= 100: wgt[i, 0] = 0 z[0] += 1 if max(pixseries[ind2, i].flatten()) > satlvl or max( pixseries[ind2, i].flatten()) <= 100: wgt[i, 1] = 0 z[1] += 1 if max(pixseries[ind3, i].flatten()) > satlvl or max( pixseries[ind3, i].flatten()) <= 100: wgt[i, 2] = 0 z[2] += 1 print(z) print(np - z) sf1 = numpy.dot(pixseries[ind1, :], wgt[:, 0]).flatten() sf2 = numpy.dot(pixseries[ind2, :], wgt[:, 1]).flatten() sf3 = numpy.dot(pixseries[ind3, :], wgt[:, 2]).flatten() sf1 /= numpy.median(sf1) sf2 /= numpy.median(sf2) sf3 /= numpy.median(sf3) originalflux = numpy.concatenate([sf1, sf2, sf3]) # a=numpy.array([0.0,0.0,0.0]) # t=0 # ca = numpy.array([0.0,0.0,0.0]) # ct = 0 # sig1 = numpy.std(sf1) # sig2 = numpy.std(sf2) # sig3 = numpy.std(sf3) # while 1: # j = int(numpy.floor(numpy.random.random()*np)) # if sum(wgt[j,:]) == 0: continue # if ct == 1000: # print(ca) # if ca[0] < 333 and ca[1] < 333 and ca[2] < 333: break # ca = numpy.array([0.0,0.0,0.0]) # ct = 0 # t += 1 # ct += 1 # wgt /= sum(wgt,axis=0) # twgt=copy(wgt) # twgt[j,:]*=numpy.random.normal(1.0,0.05,3) # twgt /= sum(twgt,axis=0) # tsf1 = numpy.dot(pixseries[ind1,:],twgt[:,0]).flatten() # tsf2 = numpy.dot(pixseries[ind2,:],twgt[:,1]).flatten() # tsf3 = numpy.dot(pixseries[ind3,:],twgt[:,2]).flatten() # tsf1 /= numpy.median(tsf1) # tsf2 /= numpy.median(tsf2) # tsf3 /= numpy.median(tsf3) # tsig1 = numpy.std(tsf1) # tsig2 = numpy.std(tsf2) # tsig3 = numpy.std(tsf3) # if tsig1 < sig1: # wgt[:,0] = twgt[:,0] # sig1 = tsig1 # a[0] += 1 # ca[0] += 1 # if tsig2 < sig2: # wgt[:,1] = twgt[:,1] # sig2 = tsig2 # a[1] += 1 # ca[1] += 1 # if tsig3 < sig3: # wgt[:,2] = twgt[:,2] # sig3 = tsig3 # a[2] += 1 # ca[2] += 1 # print(100*a/t) # sf1 = numpy.dot(pixseries[ind1,:],wgt[:,0]).flatten() # sf2 = numpy.dot(pixseries[ind2,:],wgt[:,1]).flatten() # sf3 = numpy.dot(pixseries[ind3,:],wgt[:,2]).flatten() # sf1 /= numpy.median(sf1) # sf2 /= numpy.median(sf2) # sf3 /= numpy.median(sf3) # # a=numpy.array([0.0,0.0,0.0]) # t=0 # ca = numpy.array([0.0,0.0,0.0]) # ct = 0 # sig1 = sum(numpy.fabs(sf1[1:]-sf1[:-1])) # sig2 = sum(numpy.fabs(sf2[1:]-sf2[:-1])) # sig3 = sum(numpy.fabs(sf3[1:]-sf3[:-1])) # while 1: # j = int(numpy.floor(numpy.random.random()*np)) # if sum(wgt[j,:]) == 0: continue # if ct == 1000: # print(ca) # if ca[0] < 167 and ca[1] < 167 and ca[2] < 167: break# # ca = numpy.array([0.0,0.0,0.0]) # ct = 0 # t += 1 # ct += 1 # wgt /= sum(wgt,axis=0) # twgt=copy(wgt) # twgt[j,:]*=numpy.random.normal(1.0,0.05,3) # twgt /= sum(twgt,axis=0) # tsf1 = numpy.dot(pixseries[ind1,:],twgt[:,0]).flatten() # tsf2 = numpy.dot(pixseries[ind2,:],twgt[:,1]).flatten() # tsf3 = numpy.dot(pixseries[ind3,:],twgt[:,2]).flatten() # tsf1 /= numpy.median(tsf1) # tsf2 /= numpy.median(tsf2) # tsf3 /= numpy.median(tsf3) # tsig1 = sum(numpy.fabs(tsf1[1:]-tsf1[:-1])) # tsig2 = sum(numpy.fabs(tsf2[1:]-tsf2[:-1])) # tsig3 = sum(numpy.fabs(tsf3[1:]-tsf3[:-1])) # if tsig1 < sig1: # wgt[:,0] = twgt[:,0] # sig1 = tsig1 # a[0] += 1 # ca[0] += 1 # if tsig2 < sig2: # wgt[:,1] = twgt[:,1] # sig2 = tsig2 # a[1] += 1 # ca[1] += 1 # if tsig3 < sig3: # wgt[:,2] = twgt[:,2] # sig3 = tsig3 # a[2] += 1 # ca[2] += 1 # print(100*a/t) # sf1 = numpy.dot(pixseries[ind1,:],wgt[:,0]).flatten() # sf2 = numpy.dot(pixseries[ind2,:],wgt[:,1]).flatten() # sf3 = numpy.dot(pixseries[ind3,:],wgt[:,2]).flatten() # sf1 /= numpy.median(sf1) # sf2 /= numpy.median(sf2) # sf3 /= numpy.median(sf3) a = numpy.array([0.0, 0.0, 0.0]) t = 0 ca = numpy.array([0.0, 0.0, 0.0]) ct = 0 sig1 = sum(numpy.fabs(sf1[2:] - 2 * sf1[1:-1] + sf1[:-2])) sig2 = sum(numpy.fabs(sf2[2:] - 2 * sf2[1:-1] + sf2[:-2])) sig3 = sum(numpy.fabs(sf3[2:] - 2 * sf3[1:-1] + sf3[:-2])) while 1: j = int(numpy.floor(numpy.random.random() * np)) if sum(wgt[j, :]) == 0: continue if ct == 1000: print(ca) if ca[0] < 20 and ca[1] < 20 and ca[2] < 20: break if t > 1000000: break ca = numpy.array([0.0, 0.0, 0.0]) ct = 0 t += 1 ct += 1 wgt /= sum(wgt, axis=0) twgt = copy(wgt) twgt[j, :] *= numpy.random.normal(1.0, 0.05, 3) twgt /= sum(twgt, axis=0) tsf1 = numpy.dot(pixseries[ind1, :], twgt[:, 0]).flatten() tsf2 = numpy.dot(pixseries[ind2, :], twgt[:, 1]).flatten() tsf3 = numpy.dot(pixseries[ind3, :], twgt[:, 2]).flatten() tsf1 /= numpy.median(tsf1) tsf2 /= numpy.median(tsf2) tsf3 /= numpy.median(tsf3) tsig1 = sum(numpy.fabs(tsf1[2:] - 2 * tsf1[1:-1] + tsf1[:-2])) tsig2 = sum(numpy.fabs(tsf2[2:] - 2 * tsf2[1:-1] + tsf2[:-2])) tsig3 = sum(numpy.fabs(tsf3[2:] - 2 * tsf3[1:-1] + tsf3[:-2])) if tsig1 < sig1: wgt[:, 0] = twgt[:, 0] sig1 = tsig1 a[0] += 1 ca[0] += 1 if tsig2 < sig2: wgt[:, 1] = twgt[:, 1] sig2 = tsig2 a[1] += 1 ca[1] += 1 if tsig3 < sig3: wgt[:, 2] = twgt[:, 2] sig3 = tsig3 a[2] += 1 ca[2] += 1 print(100 * a / t) sf1 = numpy.dot(pixseries[ind1, :], wgt[:, 0]).flatten() sf2 = numpy.dot(pixseries[ind2, :], wgt[:, 1]).flatten() sf3 = numpy.dot(pixseries[ind3, :], wgt[:, 2]).flatten() sf1 /= numpy.median(sf1) sf2 /= numpy.median(sf2) sf3 /= numpy.median(sf3) finalflux = numpy.concatenate([sf1, sf2, sf3]) # construct output file if status == 0: instruct, status = kepio.openfits(infile, 'readonly', logfile, verbose) status = kepkey.history(call, instruct[0], outfile, logfile, verbose) hdulist = HDUList(instruct[0]) cols = [] cols.append( Column(name='TIME', format='D', unit='BJD - 2454833', disp='D12.7', array=time)) cols.append( Column(name='TIMECORR', format='E', unit='d', disp='E13.6', array=timecorr)) cols.append( Column(name='CADENCENO', format='J', disp='I10', array=cadenceno)) cols.append(Column(name='QUALITY', format='J', array=quality)) cols.append( Column(name='ORGFLUX', format='E', disp='E13.6', array=originalflux)) cols.append( Column(name='FLUX', format='E', disp='E13.6', array=finalflux)) # for i in range(ydim): # for j in range(xdim): # colname = 'COL%d_ROW%d' % (i+column,j+row) # cols.append(Column(name=colname,format='E',disp='E13.6',array=pixseries[i,j,:])) hdu1 = new_table(ColDefs(cols)) try: hdu1.header.update('INHERIT', True, 'inherit the primary header') except: status = 0 try: hdu1.header.update('EXTNAME', 'PIXELSERIES', 'name of extension') except: status = 0 try: hdu1.header.update( 'EXTVER', instruct[1].header['EXTVER'], 'extension version number (not format version)') except: status = 0 try: hdu1.header.update('TELESCOP', instruct[1].header['TELESCOP'], 'telescope') except: status = 0 try: hdu1.header.update('INSTRUME', instruct[1].header['INSTRUME'], 'detector type') except: status = 0 try: hdu1.header.update('OBJECT', instruct[1].header['OBJECT'], 'string version of KEPLERID') except: status = 0 try: hdu1.header.update('KEPLERID', instruct[1].header['KEPLERID'], 'unique Kepler target identifier') except: status = 0 try: hdu1.header.update('RADESYS', instruct[1].header['RADESYS'], 'reference frame of celestial coordinates') except: status = 0 try: hdu1.header.update('RA_OBJ', instruct[1].header['RA_OBJ'], '[deg] right ascension from KIC') except: status = 0 try: hdu1.header.update('DEC_OBJ', instruct[1].header['DEC_OBJ'], '[deg] declination from KIC') except: status = 0 try: hdu1.header.update('EQUINOX', instruct[1].header['EQUINOX'], 'equinox of celestial coordinate system') except: status = 0 try: hdu1.header.update('TIMEREF', instruct[1].header['TIMEREF'], 'barycentric correction applied to times') except: status = 0 try: hdu1.header.update('TASSIGN', instruct[1].header['TASSIGN'], 'where time is assigned') except: status = 0 try: hdu1.header.update('TIMESYS', instruct[1].header['TIMESYS'], 'time system is barycentric JD') except: status = 0 try: hdu1.header.update('BJDREFI', instruct[1].header['BJDREFI'], 'integer part of BJD reference date') except: status = 0 try: hdu1.header.update('BJDREFF', instruct[1].header['BJDREFF'], 'fraction of the day in BJD reference date') except: status = 0 try: hdu1.header.update('TIMEUNIT', instruct[1].header['TIMEUNIT'], 'time unit for TIME, TSTART and TSTOP') except: status = 0 try: hdu1.header.update('TSTART', instruct[1].header['TSTART'], 'observation start time in BJD-BJDREF') except: status = 0 try: hdu1.header.update('TSTOP', instruct[1].header['TSTOP'], 'observation stop time in BJD-BJDREF') except: status = 0 try: hdu1.header.update('LC_START', instruct[1].header['LC_START'], 'mid point of first cadence in MJD') except: status = 0 try: hdu1.header.update('LC_END', instruct[1].header['LC_END'], 'mid point of last cadence in MJD') except: status = 0 try: hdu1.header.update('TELAPSE', instruct[1].header['TELAPSE'], '[d] TSTOP - TSTART') except: status = 0 try: hdu1.header.update('LIVETIME', instruct[1].header['LIVETIME'], '[d] TELAPSE multiplied by DEADC') except: status = 0 try: hdu1.header.update('EXPOSURE', instruct[1].header['EXPOSURE'], '[d] time on source') except: status = 0 try: hdu1.header.update('DEADC', instruct[1].header['DEADC'], 'deadtime correction') except: status = 0 try: hdu1.header.update('TIMEPIXR', instruct[1].header['TIMEPIXR'], 'bin time beginning=0 middle=0.5 end=1') except: status = 0 try: hdu1.header.update('TIERRELA', instruct[1].header['TIERRELA'], '[d] relative time error') except: status = 0 try: hdu1.header.update('TIERABSO', instruct[1].header['TIERABSO'], '[d] absolute time error') except: status = 0 try: hdu1.header.update('INT_TIME', instruct[1].header['INT_TIME'], '[s] photon accumulation time per frame') except: status = 0 try: hdu1.header.update('READTIME', instruct[1].header['READTIME'], '[s] readout time per frame') except: status = 0 try: hdu1.header.update('FRAMETIM', instruct[1].header['FRAMETIM'], '[s] frame time (INT_TIME + READTIME)') except: status = 0 try: hdu1.header.update('NUM_FRM', instruct[1].header['NUM_FRM'], 'number of frames per time stamp') except: status = 0 try: hdu1.header.update('TIMEDEL', instruct[1].header['TIMEDEL'], '[d] time resolution of data') except: status = 0 try: hdu1.header.update('DATE-OBS', instruct[1].header['DATE-OBS'], 'TSTART as UTC calendar date') except: status = 0 try: hdu1.header.update('DATE-END', instruct[1].header['DATE-END'], 'TSTOP as UTC calendar date') except: status = 0 try: hdu1.header.update('BACKAPP', instruct[1].header['BACKAPP'], 'background is subtracted') except: status = 0 try: hdu1.header.update('DEADAPP', instruct[1].header['DEADAPP'], 'deadtime applied') except: status = 0 try: hdu1.header.update('VIGNAPP', instruct[1].header['VIGNAPP'], 'vignetting or collimator correction applied') except: status = 0 try: hdu1.header.update('GAIN', instruct[1].header['GAIN'], '[electrons/count] channel gain') except: status = 0 try: hdu1.header.update('READNOIS', instruct[1].header['READNOIS'], '[electrons] read noise') except: status = 0 try: hdu1.header.update('NREADOUT', instruct[1].header['NREADOUT'], 'number of read per cadence') except: status = 0 try: hdu1.header.update('TIMSLICE', instruct[1].header['TIMSLICE'], 'time-slice readout sequence section') except: status = 0 try: hdu1.header.update('MEANBLCK', instruct[1].header['MEANBLCK'], '[count] FSW mean black level') except: status = 0 hdulist.append(hdu1) hdulist.writeto(outfile) status = kepkey.new('EXTNAME', 'APERTURE', 'name of extension', instruct[2], outfile, logfile, verbose) pyfits.append(outfile, instruct[2].data, instruct[2].header) wgt1 = numpy.reshape(wgt[:, 0], (ydim, xdim)) wgt2 = numpy.reshape(wgt[:, 1], (ydim, xdim)) wgt3 = numpy.reshape(wgt[:, 2], (ydim, xdim)) hdu3 = ImageHDU(data=wgt1, header=instruct[2].header, name='WEIGHTS1') hdu4 = ImageHDU(data=wgt2, header=instruct[2].header, name='WEIGHTS2') hdu5 = ImageHDU(data=wgt3, header=instruct[2].header, name='WEIGHTS3') pyfits.append(outfile, hdu3.data, hdu3.header) pyfits.append(outfile, hdu4.data, hdu4.header) pyfits.append(outfile, hdu5.data, hdu5.header) status = kepio.closefits(instruct, logfile, verbose) else: message = 'WARNING -- KEPHALOPHOT: output FITS file requires > 999 columns. Non-compliant with FITS convention.' kepmsg.warn(logfile, message) # plot style if status == 0: try: params = { 'backend': 'png', 'axes.linewidth': 2.0, 'axes.labelsize': 32, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 12, 'ytick.labelsize': 12 } pylab.rcParams.update(params) except: pass # plot pixel array fmin = 1.0e33 fmax = -1.033 if status == 0: pylab.figure(num=None, figsize=[12, 12]) pylab.clf() dx = 0.93 #/ xdim dy = 0.94 #/ ydim ax = pylab.axes([0.06, 0.05, 0.93, 0.94]) pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().xaxis.set_major_locator( matplotlib.ticker.MaxNLocator(integer=True)) pylab.gca().yaxis.set_major_locator( matplotlib.ticker.MaxNLocator(integer=True)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.xlim(numpy.min(pixcoord1) - 0.5, numpy.max(pixcoord1) + 0.5) pylab.ylim(numpy.min(pixcoord2) - 0.5, numpy.max(pixcoord2) + 0.5) pylab.xlabel('time', {'color': 'k'}) pylab.ylabel('arbitrary flux', {'color': 'k'}) tmin = amin(time) tmax = amax(time) try: numpy.isfinite(amin(finalflux)) numpy.isfinite(amin(finalflux)) fmin = amin(finalflux) fmax = amax(finalflux) except: ugh = 1 xmin = tmin - (tmax - tmin) / 40 xmax = tmax + (tmax - tmin) / 40 ymin = fmin - (fmax - fmin) / 20 ymax = fmax + (fmax - fmin) / 20 pylab.axes([0.06, 0.05, dx, dy]) pylab.setp(pylab.gca(), xticklabels=[], yticklabels=[]) ptime = time * 1.0 ptime = numpy.insert(ptime, [0], ptime[0]) ptime = numpy.append(ptime, ptime[-1]) pflux = finalflux * 1.0 pflux = numpy.insert(pflux, [0], -1000.0) pflux = numpy.append(pflux, -1000.0) pylab.plot(time, finalflux, color='#0000ff', linestyle='-', linewidth=0.5) pylab.fill(ptime, pflux, fc='#FFF380', linewidth=0.0, alpha=1.0) if 'loc' in plottype: pylab.xlim(xmin, xmax) pylab.ylim(ymin, ymax) if 'glob' in plottype: pylab.xlim(xmin, xmax) pylab.ylim(1.0e-10, numpy.nanmax(pixseries) * 1.05) if 'full' in plottype: pylab.xlim(xmin, xmax) pylab.ylim(1.0e-10, ymax * 1.05) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() if plotfile.lower() != 'none': pylab.savefig(plotfile) # stop time if status == 0: kepmsg.clock('KEPHALOPHOT ended at', logfile, verbose) return
def kepdiffim(infile, outfile, plotfile, imscale, colmap, filter, function, cutoff, clobber, verbose, logfile, status, cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPDIFFIM -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'plotfile=' + plotfile + ' ' call += 'imscale=' + imscale + ' ' call += 'colmap=' + colmap + ' ' filt = 'n' if (filter): filt = 'y' call += 'filter=' + filt + ' ' call += 'function=' + function + ' ' call += 'cutoff=' + str(cutoff) + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPDIFFIM started at: ', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPDIFFIM: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile, message, verbose) # reference color map if colmap == 'browse': status = cmap_plot() # open TPF FITS file if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition( infile, logfile, verbose) # print target data if status == 0: print('') print(' KepID: %s' % kepid) print(' RA (J2000): %s' % ra) print('Dec (J2000): %s' % dec) print(' KepMag: %s' % kepmag) print(' SkyGroup: %2s' % skygroup) print(' Season: %2s' % str(season)) print(' Channel: %2s' % channel) print(' Module: %2s' % module) print(' Output: %1s' % output) print('') # how many quality = 0 rows? if status == 0: npts = 0 nrows = len(fluxpixels) for i in range(nrows): if qual[i] == 0 and \ numpy.isfinite(barytime[i]) and \ numpy.isfinite(fluxpixels[i,ydim*xdim/2]): npts += 1 time = empty((npts)) timecorr = empty((npts)) cadenceno = empty((npts)) quality = empty((npts)) pixseries = empty((ydim * xdim, npts)) errseries = empty((ydim * xdim, npts)) # construct output light curves if status == 0: np = 0 for i in range(ydim * xdim): npts = 0 for k in range(nrows): if qual[k] == 0 and \ numpy.isfinite(barytime[k]) and \ numpy.isfinite(fluxpixels[k,ydim*xdim/2]): time[npts] = barytime[k] timecorr[npts] = tcorr[k] cadenceno[npts] = cadno[k] quality[npts] = qual[k] pixseries[i, npts] = fluxpixels[k, np] errseries[i, npts] = errpixels[k, np] npts += 1 np += 1 # define data sampling if status == 0 and filter: tpf, status = kepio.openfits(infile, 'readonly', logfile, verbose) if status == 0 and filter: cadence, status = kepkey.cadence(tpf[1], infile, logfile, verbose) tr = 1.0 / (cadence / 86400) timescale = 1.0 / (cutoff / tr) # define convolution function if status == 0 and filter: if function == 'boxcar': filtfunc = numpy.ones(numpy.ceil(timescale)) elif function == 'gauss': timescale /= 2 dx = numpy.ceil(timescale * 10 + 1) filtfunc = kepfunc.gauss() filtfunc = filtfunc([1.0, dx / 2 - 1.0, timescale], linspace(0, dx - 1, dx)) elif function == 'sinc': dx = numpy.ceil(timescale * 12 + 1) fx = linspace(0, dx - 1, dx) fx = fx - dx / 2 + 0.5 fx /= timescale filtfunc = numpy.sinc(fx) filtfunc /= numpy.sum(filtfunc) # pad time series at both ends with noise model if status == 0 and filter: for i in range(ydim * xdim): ave, sigma = kepstat.stdev(pixseries[i, :len(filtfunc)]) padded = numpy.append(kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma), pixseries[i,:]) ave, sigma = kepstat.stdev(pixseries[i, -len(filtfunc):]) padded = numpy.append(padded, kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma)) # convolve data if status == 0: convolved = convolve(padded, filtfunc, 'same') # remove padding from the output array if status == 0: outdata = convolved[len(filtfunc):-len(filtfunc)] # subtract low frequencies if status == 0: outmedian = median(outdata) pixseries[i, :] = pixseries[i, :] - outdata + outmedian # sum pixels over cadence if status == 0: np = 0 nrows = len(fluxpixels) pixsum = zeros((ydim * xdim)) errsum = zeros((ydim * xdim)) for i in range(npts): if quality[i] == 0: pixsum += pixseries[:, i] errsum += errseries[:, i]**2 np += 1 pixsum /= np errsum = sqrt(errsum) / np # calculate standard deviation pixels if status == 0: pixvar = zeros((ydim * xdim)) for i in range(npts): if quality[i] == 0: pixvar += (pixsum - pixseries[:, i] / errseries[:, i])**2 pixvar = numpy.sqrt(pixvar) # median pixel errors if status == 0: errmed = empty((ydim * xdim)) for i in range(ydim * xdim): errmed[i] = numpy.median(errseries[:, i]) # calculate chi distribution pixels if status == 0: pixdev = zeros((ydim * xdim)) for i in range(npts): if quality[i] == 0: pixdev += ((pixsum - pixseries[:, i]) / pixsum)**2 pixdev = numpy.sqrt(pixdev) # pixdev = numpy.sqrt(pixvar) / errsum #errmed # image scale and intensity limits if status == 0: pixsum_pl, zminsum, zmaxsum = kepplot.intScale1D(pixsum, imscale) pixvar_pl, zminvar, zmaxvar = kepplot.intScale1D(pixvar, imscale) pixdev_pl, zmindev, zmaxdev = kepplot.intScale1D(pixdev, imscale) # construct output summed image if status == 0: imgsum = empty((ydim, xdim)) imgvar = empty((ydim, xdim)) imgdev = empty((ydim, xdim)) imgsum_pl = empty((ydim, xdim)) imgvar_pl = empty((ydim, xdim)) imgdev_pl = empty((ydim, xdim)) n = 0 for i in range(ydim): for j in range(xdim): imgsum[i, j] = pixsum[n] imgvar[i, j] = pixvar[n] imgdev[i, j] = pixdev[n] imgsum_pl[i, j] = pixsum_pl[n] imgvar_pl[i, j] = pixvar_pl[n] imgdev_pl[i, j] = pixdev_pl[n] n += 1 # construct output file if status == 0: instruct, status = kepio.openfits(infile, 'readonly', logfile, verbose) status = kepkey.history(call, instruct[0], outfile, logfile, verbose) hdulist = HDUList(instruct[0]) hdulist.writeto(outfile) status = kepkey.new('EXTNAME', 'FLUX', 'name of extension', instruct[2], outfile, logfile, verbose) pyfits.append(outfile, imgsum, instruct[2].header) status = kepkey.new('EXTNAME', 'CHI', 'name of extension', instruct[2], outfile, logfile, verbose) pyfits.append(outfile, imgvar, instruct[2].header) status = kepkey.new('EXTNAME', 'STDDEV', 'name of extension', instruct[2], outfile, logfile, verbose) pyfits.append(outfile, imgdev, instruct[2].header) status = kepkey.new('EXTNAME', 'APERTURE', 'name of extension', instruct[2], outfile, logfile, verbose) pyfits.append(outfile, instruct[2].data, instruct[2].header) status = kepio.closefits(instruct, logfile, verbose) # pixel limits of the subimage if status == 0: ymin = row ymax = ymin + ydim xmin = column xmax = xmin + xdim # plot limits for summed image ymin = float(ymin) - 0.5 ymax = float(ymax) - 0.5 xmin = float(xmin) - 0.5 xmax = float(xmax) - 0.5 # plot style try: params = { 'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 10, 'ytick.labelsize': 10 } pylab.rcParams.update(params) except: 'ERROR -- KEPDIFFIM: install latex for scientific plotting' status = 1 if status == 0: plotimage(imgsum_pl, imgvar_pl, imgdev_pl, zminsum, zminvar, zmindev, zmaxsum, zmaxvar, zmaxdev, xmin, xmax, ymin, ymax, colmap, plotfile, cmdLine) # stop time kepmsg.clock('KEPDIFFIM ended at: ', logfile, verbose) return
def kepextract(infile,maskfile,outfile,subback,clobber,verbose,logfile,status): # startup parameters status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPEXTRACT -- ' call += 'infile='+infile+' ' call += 'maskfile='+maskfile+' ' call += 'outfile='+outfile+' ' backgr = 'n' if (subback): backgr = 'y' call += 'background='+backgr+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPEXTRACT started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPEXTRACT: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile,message,verbose) # open input file status = 0 instr = pyfits.open(infile,mode='readonly',memmap=True) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # input file data if status == 0: cards0 = instr[0].header.cards cards1 = instr[1].header.cards cards2 = instr[2].header.cards table = instr[1].data[:] maskmap = copy(instr[2].data) # input table data if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, time, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) time = numpy.array(time,dtype='float64') if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, timecorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) timecorr = numpy.array(timecorr,dtype='float32') if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadenceno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) cadenceno = numpy.array(cadenceno,dtype='int') if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, raw_cnts, status = \ kepio.readTPF(infile,'RAW_CNTS',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux_err, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux_bkg, status = \ kepio.readTPF(infile,'FLUX_BKG',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux_bkg_err, status = \ kepio.readTPF(infile,'FLUX_BKG_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cosmic_rays, status = \ kepio.readTPF(infile,'COSMIC_RAYS',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, quality, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) quality = numpy.array(quality,dtype='int') if status == 0: try: pos_corr1 = numpy.array(table.field('POS_CORR1'),dtype='float64') # ---for FITS wave #2 except: pos_corr1 = empty(len(time)); pos_corr1[:] = numpy.nan # ---temporary before FITS wave #2 try: pos_corr2 = numpy.array(table.field('POS_CORR2'),dtype='float64') # ---for FITS wave #2 except: pos_corr2 = empty(len(time)); pos_corr2[:] = numpy.nan # ---temporary before FITS wave #2 # dummy columns for output file psf_centr1 = empty(len(time)); psf_centr1[:] = numpy.nan psf_centr1_err = empty(len(time)); psf_centr1_err[:] = numpy.nan psf_centr2 = empty(len(time)); psf_centr2[:] = numpy.nan psf_centr2_err = empty(len(time)); psf_centr2_err[:] = numpy.nan # mom_centr1 = empty(len(time)); mom_centr1[:] = numpy.nan mom_centr1_err = empty(len(time)); mom_centr1_err[:] = numpy.nan # mom_centr2 = empty(len(time)); mom_centr2[:] = numpy.nan mom_centr2_err = empty(len(time)); mom_centr2_err[:] = numpy.nan # read mask definition file if status == 0 and 'aper' not in maskfile.lower() and maskfile.lower() != 'all': maskx = array([],'int') masky = array([],'int') lines, status = kepio.openascii(maskfile,'r',logfile,verbose) for line in lines: line = line.strip().split('|') if len(line) == 6: y0 = int(line[3]) x0 = int(line[4]) line = line[5].split(';') for items in line: try: masky = append(masky,y0 + int(items.split(',')[0])) maskx = append(maskx,x0 + int(items.split(',')[1])) except: continue status = kepio.closeascii(lines,logfile,verbose) if len(maskx) == 0 or len(masky) == 0: message = 'ERROR -- KEPEXTRACT: ' + maskfile + ' contains no pixels.' status = kepmsg.err(logfile,message,verbose) # subimage physical WCS data if status == 0: crpix1p = cards2['CRPIX1P'].value crpix2p = cards2['CRPIX2P'].value crval1p = cards2['CRVAL1P'].value crval2p = cards2['CRVAL2P'].value cdelt1p = cards2['CDELT1P'].value cdelt2p = cards2['CDELT2P'].value # define new subimage bitmap... if status == 0 and 'aper' not in maskfile.lower() and maskfile.lower() != 'all': aperx = array([],'int') apery = array([],'int') aperb = array([],'int') for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): aperx = append(aperx,crval1p + (j + 1 - crpix1p) * cdelt1p) apery = append(apery,crval2p + (i + 1 - crpix2p) * cdelt2p) if maskmap[i,j] == 0: aperb = append(aperb,0) else: aperb = append(aperb,1) maskmap[i,j] = 1 for k in range(len(maskx)): if aperx[-1] == maskx[k] and apery[-1] == masky[k]: aperb[-1] = 3 maskmap[i,j] = 3 # trap case where no aperture needs to be defined but pixel positions are still required for centroiding if status == 0 and maskfile.lower() == 'all': aperx = array([],'int') apery = array([],'int') for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): aperx = append(aperx,crval1p + (j + 1 - crpix1p) * cdelt1p) apery = append(apery,crval2p + (i + 1 - crpix2p) * cdelt2p) # ...or use old subimage bitmap if status == 0 and 'aper' in maskfile.lower(): aperx = array([],'int') apery = array([],'int') aperb = array([],'int') for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): aperb = append(aperb,maskmap[i,j]) aperx = append(aperx,crval1p + (j + 1 - crpix1p) * cdelt1p) apery = append(apery,crval2p + (i + 1 - crpix2p) * cdelt2p) # ...or use all pixels if status == 0 and maskfile.lower() == 'all': aperb = array([],'int') for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): if maskmap[i,j] == 0: aperb = append(aperb,0) else: aperb = append(aperb,3) maskmap[i,j] = 3 # subtract median pixel value for background? if status == 0: sky = array([],'float32') for i in range(len(time)): sky = append(sky,median(flux[i,:])) if not subback: sky[:] = 0.0 # legal mask defined? if status == 0: if len(aperb) == 0: message = 'ERROR -- KEPEXTRACT: no legal pixels within the subimage are defined.' status = kepmsg.err(logfile,message,verbose) # construct new table flux data if status == 0: naper = (aperb == 3).sum() ntime = len(time) sap_flux = array([],'float32') sap_flux_err = array([],'float32') sap_bkg = array([],'float32') sap_bkg_err = array([],'float32') raw_flux = array([],'float32') for i in range(len(time)): work1 = array([],'float64') work2 = array([],'float64') work3 = array([],'float64') work4 = array([],'float64') work5 = array([],'float64') for j in range(len(aperb)): if (aperb[j] == 3): work1 = append(work1,flux[i,j]-sky[i]) work2 = append(work2,flux_err[i,j]) work3 = append(work3,flux_bkg[i,j]) work4 = append(work4,flux_bkg_err[i,j]) work5 = append(work5,raw_cnts[i,j]) sap_flux = append(sap_flux,kepstat.sum(work1)) sap_flux_err = append(sap_flux_err,kepstat.sumerr(work2)) sap_bkg = append(sap_bkg,kepstat.sum(work3)) sap_bkg_err = append(sap_bkg_err,kepstat.sumerr(work4)) raw_flux = append(raw_flux,kepstat.sum(work5)) # construct new table moment data if status == 0: mom_centr1 = zeros(shape=(ntime)) mom_centr2 = zeros(shape=(ntime)) mom_centr1_err = zeros(shape=(ntime)) mom_centr2_err = zeros(shape=(ntime)) for i in range(ntime): xf = zeros(shape=(naper)) yf = zeros(shape=(naper)) f = zeros(shape=(naper)) xfe = zeros(shape=(naper)) yfe = zeros(shape=(naper)) fe = zeros(shape=(naper)) k = -1 for j in range(len(aperb)): if (aperb[j] == 3): k += 1 xf[k] = aperx[j] * flux[i,j] xfe[k] = aperx[j] * flux_err[i,j] yf[k] = apery[j] * flux[i,j] yfe[k] = apery[j] * flux_err[i,j] f[k] = flux[i,j] fe[k] = flux_err[i,j] xfsum = kepstat.sum(xf) yfsum = kepstat.sum(yf) fsum = kepstat.sum(f) xfsume = sqrt(kepstat.sum(square(xfe)) / naper) yfsume = sqrt(kepstat.sum(square(yfe)) / naper) fsume = sqrt(kepstat.sum(square(fe)) / naper) mom_centr1[i] = xfsum / fsum mom_centr2[i] = yfsum / fsum mom_centr1_err[i] = sqrt((xfsume / xfsum)**2 + ((fsume / fsum)**2)) mom_centr2_err[i] = sqrt((yfsume / yfsum)**2 + ((fsume / fsum)**2)) mom_centr1_err = mom_centr1_err * mom_centr1 mom_centr2_err = mom_centr2_err * mom_centr2 # construct new table PSF data if status == 0: psf_centr1 = zeros(shape=(ntime)) psf_centr2 = zeros(shape=(ntime)) psf_centr1_err = zeros(shape=(ntime)) psf_centr2_err = zeros(shape=(ntime)) modx = zeros(shape=(naper)) mody = zeros(shape=(naper)) k = -1 for j in range(len(aperb)): if (aperb[j] == 3): k += 1 modx[k] = aperx[j] mody[k] = apery[j] for i in range(ntime): modf = zeros(shape=(naper)) k = -1 guess = [mom_centr1[i], mom_centr2[i], nanmax(flux[i:]), 1.0, 1.0, 0.0, 0.0] for j in range(len(aperb)): if (aperb[j] == 3): k += 1 modf[k] = flux[i,j] args = (modx, mody, modf) try: ans = leastsq(kepfunc.PRFgauss2d,guess,args=args,xtol=1.0e-8,ftol=1.0e-4,full_output=True) s_sq = (ans[2]['fvec']**2).sum() / (ntime-len(guess)) psf_centr1[i] = ans[0][0] psf_centr2[i] = ans[0][1] except: pass try: psf_centr1_err[i] = sqrt(diag(ans[1] * s_sq))[0] except: psf_centr1_err[i] = numpy.nan try: psf_centr2_err[i] = sqrt(diag(ans[1] * s_sq))[1] except: psf_centr2_err[i] = numpy.nan # construct output primary extension if status == 0: hdu0 = pyfits.PrimaryHDU() for i in range(len(cards0)): if cards0[i].key not in hdu0.header.keys(): hdu0.header.update(cards0[i].key, cards0[i].value, cards0[i].comment) else: hdu0.header.cards[cards0[i].key].comment = cards0[i].comment status = kepkey.history(call,hdu0,outfile,logfile,verbose) outstr = HDUList(hdu0) # construct output light curve extension if status == 0: col1 = Column(name='TIME',format='D',unit='BJD - 2454833',array=time) col2 = Column(name='TIMECORR',format='E',unit='d',array=timecorr) col3 = Column(name='CADENCENO',format='J',array=cadenceno) col4 = Column(name='SAP_FLUX',format='E',array=sap_flux) col5 = Column(name='SAP_FLUX_ERR',format='E',array=sap_flux_err) col6 = Column(name='SAP_BKG',format='E',array=sap_bkg) col7 = Column(name='SAP_BKG_ERR',format='E',array=sap_bkg_err) col8 = Column(name='PDCSAP_FLUX',format='E',array=sap_flux) col9 = Column(name='PDCSAP_FLUX_ERR',format='E',array=sap_flux_err) col10 = Column(name='SAP_QUALITY',format='J',array=quality) col11 = Column(name='PSF_CENTR1',format='E',unit='pixel',array=psf_centr1) col12 = Column(name='PSF_CENTR1_ERR',format='E',unit='pixel',array=psf_centr1_err) col13 = Column(name='PSF_CENTR2',format='E',unit='pixel',array=psf_centr2) col14 = Column(name='PSF_CENTR2_ERR',format='E',unit='pixel',array=psf_centr2_err) col15 = Column(name='MOM_CENTR1',format='E',unit='pixel',array=mom_centr1) col16 = Column(name='MOM_CENTR1_ERR',format='E',unit='pixel',array=mom_centr1_err) col17 = Column(name='MOM_CENTR2',format='E',unit='pixel',array=mom_centr2) col18 = Column(name='MOM_CENTR2_ERR',format='E',unit='pixel',array=mom_centr2_err) col19 = Column(name='POS_CORR1',format='E',unit='pixel',array=pos_corr1) col20 = Column(name='POS_CORR2',format='E',unit='pixel',array=pos_corr2) col21 = Column(name='RAW_FLUX',format='E',array=raw_flux) cols = ColDefs([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11, \ col12,col13,col14,col15,col16,col17,col18,col19,col20,col21]) hdu1 = new_table(cols) hdu1.header.update('TTYPE1','TIME','column title: data time stamps') hdu1.header.update('TFORM1','D','data type: float64') hdu1.header.update('TUNIT1','BJD - 2454833','column units: barycenter corrected JD') hdu1.header.update('TDISP1','D12.7','column display format') hdu1.header.update('TTYPE2','TIMECORR','column title: barycentric-timeslice correction') hdu1.header.update('TFORM2','E','data type: float32') hdu1.header.update('TUNIT2','d','column units: days') hdu1.header.update('TTYPE3','CADENCENO','column title: unique cadence number') hdu1.header.update('TFORM3','J','column format: signed integer32') hdu1.header.update('TTYPE4','SAP_FLUX','column title: aperture photometry flux') hdu1.header.update('TFORM4','E','column format: float32') hdu1.header.update('TUNIT4','e-/s','column units: electrons per second') hdu1.header.update('TTYPE5','SAP_FLUX_ERR','column title: aperture phot. flux error') hdu1.header.update('TFORM5','E','column format: float32') hdu1.header.update('TUNIT5','e-/s','column units: electrons per second (1-sigma)') hdu1.header.update('TTYPE6','SAP_BKG','column title: aperture phot. background flux') hdu1.header.update('TFORM6','E','column format: float32') hdu1.header.update('TUNIT6','e-/s','column units: electrons per second') hdu1.header.update('TTYPE7','SAP_BKG_ERR','column title: ap. phot. background flux error') hdu1.header.update('TFORM7','E','column format: float32') hdu1.header.update('TUNIT7','e-/s','column units: electrons per second (1-sigma)') hdu1.header.update('TTYPE8','PDCSAP_FLUX','column title: PDC photometry flux') hdu1.header.update('TFORM8','E','column format: float32') hdu1.header.update('TUNIT8','e-/s','column units: electrons per second') hdu1.header.update('TTYPE9','PDCSAP_FLUX_ERR','column title: PDC flux error') hdu1.header.update('TFORM9','E','column format: float32') hdu1.header.update('TUNIT9','e-/s','column units: electrons per second (1-sigma)') hdu1.header.update('TTYPE10','SAP_QUALITY','column title: aperture photometry quality flag') hdu1.header.update('TFORM10','J','column format: signed integer32') hdu1.header.update('TTYPE11','PSF_CENTR1','column title: PSF fitted column centroid') hdu1.header.update('TFORM11','E','column format: float32') hdu1.header.update('TUNIT11','pixel','column units: pixel') hdu1.header.update('TTYPE12','PSF_CENTR1_ERR','column title: PSF fitted column error') hdu1.header.update('TFORM12','E','column format: float32') hdu1.header.update('TUNIT12','pixel','column units: pixel') hdu1.header.update('TTYPE13','PSF_CENTR2','column title: PSF fitted row centroid') hdu1.header.update('TFORM13','E','column format: float32') hdu1.header.update('TUNIT13','pixel','column units: pixel') hdu1.header.update('TTYPE14','PSF_CENTR2_ERR','column title: PSF fitted row error') hdu1.header.update('TFORM14','E','column format: float32') hdu1.header.update('TUNIT14','pixel','column units: pixel') hdu1.header.update('TTYPE15','MOM_CENTR1','column title: moment-derived column centroid') hdu1.header.update('TFORM15','E','column format: float32') hdu1.header.update('TUNIT15','pixel','column units: pixel') hdu1.header.update('TTYPE16','MOM_CENTR1_ERR','column title: moment-derived column error') hdu1.header.update('TFORM16','E','column format: float32') hdu1.header.update('TUNIT16','pixel','column units: pixel') hdu1.header.update('TTYPE17','MOM_CENTR2','column title: moment-derived row centroid') hdu1.header.update('TFORM17','E','column format: float32') hdu1.header.update('TUNIT17','pixel','column units: pixel') hdu1.header.update('TTYPE18','MOM_CENTR2_ERR','column title: moment-derived row error') hdu1.header.update('TFORM18','E','column format: float32') hdu1.header.update('TUNIT18','pixel','column units: pixel') hdu1.header.update('TTYPE19','POS_CORR1','column title: col correction for vel. abbern') hdu1.header.update('TFORM19','E','column format: float32') hdu1.header.update('TUNIT19','pixel','column units: pixel') hdu1.header.update('TTYPE20','POS_CORR2','column title: row correction for vel. abbern') hdu1.header.update('TFORM20','E','column format: float32') hdu1.header.update('TUNIT20','pixel','column units: pixel') hdu1.header.update('TTYPE21','RAW_FLUX','column title: raw aperture photometry flux') hdu1.header.update('TFORM21','E','column format: float32') hdu1.header.update('TUNIT21','e-/s','column units: electrons per second') hdu1.header.update('EXTNAME','LIGHTCURVE','name of extension') for i in range(len(cards1)): if (cards1[i].key not in hdu1.header.keys() and cards1[i].key[:4] not in ['TTYP','TFOR','TUNI','TDIS','TDIM','WCAX','1CTY', '2CTY','1CRP','2CRP','1CRV','2CRV','1CUN','2CUN', '1CDE','2CDE','1CTY','2CTY','1CDL','2CDL','11PC', '12PC','21PC','22PC']): hdu1.header.update(cards1[i].key, cards1[i].value, cards1[i].comment) outstr.append(hdu1) # construct output mask bitmap extension if status == 0: hdu2 = ImageHDU(maskmap) for i in range(len(cards2)): if cards2[i].key not in hdu2.header.keys(): hdu2.header.update(cards2[i].key, cards2[i].value, cards2[i].comment) else: hdu2.header.cards[cards2[i].key].comment = cards2[i].comment outstr.append(hdu2) # write output file if status == 0: outstr.writeto(outfile,checksum=True) # close input structure if status == 0: status = kepio.closefits(instr,logfile,verbose) # end time kepmsg.clock('KEPEXTRACT finished at',logfile,verbose)
def kepprf(infile, plotfile, rownum, columns, rows, fluxes, border, background, focus, prfdir, xtol, ftol, imscale, colmap, plt, verbose, logfile, status, cmdLine=False): # input arguments print "... input arguments" status = 0 seterr(all="ignore") # log the call print "... logging the call" hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPPRF -- ' call += 'infile=' + infile + ' ' call += 'plotfile=' + plotfile + ' ' call += 'rownum=' + str(rownum) + ' ' call += 'columns=' + columns + ' ' call += 'rows=' + rows + ' ' call += 'fluxes=' + fluxes + ' ' call += 'border=' + str(border) + ' ' bground = 'n' if (background): bground = 'y' call += 'background=' + bground + ' ' focs = 'n' if (focus): focs = 'y' call += 'focus=' + focs + ' ' call += 'prfdir=' + prfdir + ' ' call += 'xtol=' + str(xtol) + ' ' call += 'ftol=' + str(xtol) + ' ' call += 'imscale=' + imscale + ' ' call += 'colmap=' + colmap + ' ' plotit = 'n' if (plt): plotit = 'y' call += 'plot=' + plotit + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # test log file logfile = kepmsg.test(logfile) # start time print "... starting kepler time" kepmsg.clock('KEPPRF started at', logfile, verbose) # reference color map if colmap == 'browse': status = cmap_plot(cmdLine) # construct inital guess vector for fit print " status = " + str(status) print "... initial guess" if status == 0: guess = [] try: f = fluxes.strip().split(',') x = columns.strip().split(',') y = rows.strip().split(',') for i in xrange(len(f)): f[i] = float(f[i]) except: f = fluxes x = columns y = rows nsrc = len(f) for i in xrange(nsrc): try: guess.append(float(f[i])) except: message = 'ERROR -- KEPPRF: Fluxes must be floating point numbers' status = kepmsg.err(logfile, message, verbose) if status == 0: if len(x) != nsrc or len(y) != nsrc: message = 'ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and ' message += 'fluxes must have the same number of sources' status = kepmsg.err(logfile, message, verbose) if status == 0: for i in xrange(nsrc): try: guess.append(float(x[i])) except: message = 'ERROR -- KEPPRF: Columns must be floating point numbers' status = kepmsg.err(logfile, message, verbose) if status == 0: for i in xrange(nsrc): try: guess.append(float(y[i])) except: message = 'ERROR -- KEPPRF: Rows must be floating point numbers' status = kepmsg.err(logfile, message, verbose) if status == 0 and background: if border == 0: guess.append(0.0) else: for i in range((border + 1) * 2): guess.append(0.0) if status == 0 and focus: guess.append(1.0) guess.append(1.0) guess.append(0.0) # open TPF FITS file print "... open tpf file" if status == 0: try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) except: message = 'ERROR -- KEPPRF: is %s a Target Pixel File? ' % infile status = kepmsg.err(logfile, message, verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file print "... read mask definition" if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition( infile, logfile, verbose) npix = numpy.size(numpy.nonzero(maskimg)[0]) # print target data if status == 0 and verbose: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # is this a good row with finite timestamp and pixels? if status == 0: if not numpy.isfinite(barytime[rownum - 1]) or numpy.nansum( fluxpixels[rownum - 1, :]) == numpy.nan: message = 'ERROR -- KEPFIELD: Row ' + str( rownum) + ' is a bad quality timestamp' status = kepmsg.err(logfile, message, verbose) # construct input pixel image if status == 0: flux = fluxpixels[rownum - 1, :] ferr = errpixels[rownum - 1, :] DATx = arange(column, column + xdim) DATy = arange(row, row + ydim) # image scale and intensity limits of pixel data if status == 0: n = 0 DATimg = empty((ydim, xdim)) ERRimg = empty((ydim, xdim)) for i in range(ydim): for j in range(xdim): DATimg[i, j] = flux[n] ERRimg[i, j] = ferr[n] n += 1 # determine suitable PRF calibration file if status == 0: if int(module) < 10: prefix = 'kplr0' else: prefix = 'kplr' prfglob = prfdir + '/' + prefix + str(module) + '.' + str( output) + '*' + '_prf.fits' try: prffile = glob.glob(prfglob)[0] except: message = 'ERROR -- KEPPRF: No PRF file found in ' + prfdir status = kepmsg.err(logfile, message, verbose) # read PRF images if status == 0: prfn = [0, 0, 0, 0, 0] crpix1p = numpy.zeros((5), dtype='float32') crpix2p = numpy.zeros((5), dtype='float32') crval1p = numpy.zeros((5), dtype='float32') crval2p = numpy.zeros((5), dtype='float32') cdelt1p = numpy.zeros((5), dtype='float32') cdelt2p = numpy.zeros((5), dtype='float32') for i in range(5): prfn[i], crpix1p[i], crpix2p[i], crval1p[i], crval2p[i], cdelt1p[i], cdelt2p[i], status \ = kepio.readPRFimage(prffile,i+1,logfile,verbose) PRFx = arange(0.5, shape(prfn[0])[1] + 0.5) PRFy = arange(0.5, shape(prfn[0])[0] + 0.5) PRFx = (PRFx - size(PRFx) / 2) * cdelt1p[0] PRFy = (PRFy - size(PRFy) / 2) * cdelt2p[0] # interpolate the calibrated PRF shape to the target position if status == 0: prf = zeros(shape(prfn[0]), dtype='float32') prfWeight = zeros((5), dtype='float32') for i in xrange(5): prfWeight[i] = sqrt((column - crval1p[i])**2 + (row - crval2p[i])**2) if prfWeight[i] == 0.0: prfWeight[i] = 1.0e6 prf = prf + prfn[i] / prfWeight[i] prf = prf / nansum(prf) prf = prf / cdelt1p[0] / cdelt2p[0] # location of the data image centered on the PRF image (in PRF pixel units) if status == 0: prfDimY = int(ydim / cdelt1p[0]) prfDimX = int(xdim / cdelt2p[0]) PRFy0 = (shape(prf)[0] - prfDimY) / 2 PRFx0 = (shape(prf)[1] - prfDimX) / 2 # interpolation function over the PRF if status == 0: splineInterpolation = scipy.interpolate.RectBivariateSpline( PRFx, PRFy, prf) # construct mesh for background model if status == 0 and background: bx = numpy.arange(1., float(xdim + 1)) by = numpy.arange(1., float(ydim + 1)) xx, yy = numpy.meshgrid(numpy.linspace(bx.min(), bx.max(), xdim), numpy.linspace(by.min(), by.max(), ydim)) # fit PRF model to pixel data if status == 0: start = time.time() if focus and background: args = (DATx, DATy, DATimg, nsrc, border, xx, yy, PRFx, PRFy, splineInterpolation) ans = fmin_powell(kepfunc.PRFwithFocusAndBackground, guess, args=args, xtol=xtol, ftol=ftol, disp=False) elif focus and not background: args = (DATx, DATy, DATimg, nsrc, PRFx, PRFy, splineInterpolation) ans = fmin_powell(kepfunc.PRFwithFocus, guess, args=args, xtol=xtol, ftol=ftol, disp=False) elif background and not focus: args = (DATx, DATy, DATimg, nsrc, border, xx, yy, splineInterpolation) ans = fmin_powell(kepfunc.PRFwithBackground, guess, args=args, xtol=xtol, ftol=ftol, disp=False) else: args = (DATx, DATy, DATimg, splineInterpolation) ans = fmin_powell(kepfunc.PRF, guess, args=args, xtol=xtol, ftol=ftol, disp=False) print 'Convergence time = %.2fs\n' % (time.time() - start) # pad the PRF data if the PRF array is smaller than the data array if status == 0: flux = [] OBJx = [] OBJy = [] PRFmod = numpy.zeros((prfDimY, prfDimX)) if PRFy0 < 0 or PRFx0 < 0.0: PRFmod = numpy.zeros((prfDimY, prfDimX)) superPRF = zeros((prfDimY + 1, prfDimX + 1)) superPRF[abs(PRFy0):abs(PRFy0) + shape(prf)[0], abs(PRFx0):abs(PRFx0) + shape(prf)[1]] = prf prf = superPRF * 1.0 PRFy0 = 0 PRFx0 = 0 # rotate the PRF model around its center if focus: angle = ans[-1] prf = rotate(prf, -angle, reshape=False, mode='nearest') # iterate through the sources in the best fit PSF model for i in range(nsrc): flux.append(ans[i]) OBJx.append(ans[nsrc + i]) OBJy.append(ans[nsrc * 2 + i]) # calculate best-fit model y = (OBJy[i] - mean(DATy)) / cdelt1p[0] x = (OBJx[i] - mean(DATx)) / cdelt2p[0] prfTmp = shift(prf, [y, x], order=1, mode='constant') prfTmp = prfTmp[PRFy0:PRFy0 + prfDimY, PRFx0:PRFx0 + prfDimX] PRFmod = PRFmod + prfTmp * flux[i] wx = 1.0 wy = 1.0 angle = 0 b = 0.0 # write out best fit parameters if verbose: txt = 'Flux = %10.2f e-/s ' % flux[i] txt += 'X = %9.4f pix ' % OBJx[i] txt += 'Y = %9.4f pix ' % OBJy[i] kepmsg.log(logfile, txt, True) if verbose and background: bterms = border + 1 if bterms == 1: b = ans[nsrc * 3] else: bcoeff = array([ ans[nsrc * 3:nsrc * 3 + bterms], ans[nsrc * 3 + bterms:nsrc * 3 + bterms * 2] ]) bkg = kepfunc.polyval2d(xx, yy, bcoeff) b = nanmean(bkg.reshape(bkg.size)) txt = '\n Mean background = %.2f e-/s' % b kepmsg.log(logfile, txt, True) if focus: wx = ans[-3] wy = ans[-2] angle = ans[-1] if verbose and focus: if not background: kepmsg.log(logfile, '', True) kepmsg.log(logfile, ' X/Y focus factors = %.3f/%.3f' % (wx, wy), True) kepmsg.log(logfile, 'PRF rotation angle = %.2f deg' % angle, True) # constuct model PRF in detector coordinates if status == 0: PRFfit = kepfunc.PRF2DET(flux, OBJx, OBJy, DATx, DATy, wx, wy, angle, splineInterpolation) if background and bterms == 1: PRFfit = PRFfit + b if background and bterms > 1: PRFfit = PRFfit + bkg # calculate residual of DATA - FIT if status == 0: PRFres = DATimg - PRFfit FLUXres = numpy.nansum(PRFres) # calculate the sum squared difference between data and model if status == 0: Pearson = abs(numpy.nansum(numpy.square(DATimg - PRFfit) / PRFfit)) Chi2 = numpy.nansum( numpy.square(DATimg - PRFfit) / numpy.square(ERRimg)) DegOfFreedom = npix - len(guess) try: kepmsg.log(logfile, '\nResidual flux = %.6f e-/s' % FLUXres, True) kepmsg.log( logfile, 'Pearson\'s chi^2 test = %d for %d dof' % (Pearson, DegOfFreedom), True) except: pass # kepmsg.log(logfile,'Chi^2 test = %d for %d dof' % (Chi2,DegOfFreedom),True) # image scale and intensity limits for plotting images if status == 0: imgdat_pl, zminfl, zmaxfl = kepplot.intScale2D(DATimg, imscale) imgprf_pl, zminpr, zmaxpr = kepplot.intScale2D(PRFmod, imscale) imgfit_pl, zminfi, zmaxfi = kepplot.intScale2D(PRFfit, imscale) imgres_pl, zminre, zmaxre = kepplot.intScale2D(PRFres, imscale) if imscale == 'linear': zmaxpr *= 0.9 elif imscale == 'logarithmic': print zminpr, zmaxpr, numpy.max(zmaxpr) zmaxpr = numpy.max(zmaxpr) zminpr = zmaxpr / 2 # plot style if status == 0: try: params = { 'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 10, 'ytick.labelsize': 10 } pylab.rcParams.update(params) except: pass pylab.figure(figsize=[10, 10]) pylab.clf() plotimage(imgdat_pl, zminfl, zmaxfl, 1, row, column, xdim, ydim, 0.06, 0.52, 'flux', colmap) plotimage(imgprf_pl, zminpr, zmaxpr, 2, row, column, xdim, ydim, 0.52, 0.52, 'model', colmap) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, 'b', '--', 0.5) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, 'b', '-', 3.0) plotimage(imgfit_pl, zminfl, zmaxfl, 3, row, column, xdim, ydim, 0.06, 0.06, 'fit', colmap) plotimage(imgres_pl, zminfl, zmaxfl, 4, row, column, xdim, ydim, 0.52, 0.06, 'residual', colmap) # render plot if status == 0 and len(plotfile) > 0 and plotfile.lower() != 'none': pylab.savefig(plotfile) if status == 0 and plt: if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # stop time kepmsg.clock('\nKEPPRF ended at', logfile, verbose) return
def keppca(infile,maskfile,outfile,components,plotpca,nreps,clobber,verbose,logfile,status,cmdLine=False): try: import mdp except: msg = 'ERROR -- KEPPCA: this task has an external python dependency to MDP, a Modular toolkit for Data Processing (http://mdp-toolkit.sourceforge.net). In order to take advantage of this PCA task, the user must first install MDP with their current python distribution. Note carefully that you may have more than python installation on your machine, and ensure that MDP is installed with the same version of python that the PyKE tools employ. Installation instructions for MDP can be found at the URL provided above.' status = kepmsg.err(None,msg,True) # startup parameters status = 0 labelsize = 32 ticksize = 18 xsize = 16 ysize = 10 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 seterr(all="ignore") # log the call if status == 0: hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPPCA -- ' call += 'infile='+infile+' ' call += 'maskfile='+maskfile+' ' call += 'outfile='+outfile+' ' call += 'components='+components+' ' ppca = 'n' if (plotpca): ppca = 'y' call += 'plotpca='+ppca+ ' ' call += 'nmaps='+str(nreps)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time if status == 0: kepmsg.clock('KEPPCA started at',logfile,verbose) # test log file if status == 0: logfile = kepmsg.test(logfile) # clobber output file if status == 0: if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPPCA: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # Set output file names - text file with data and plot if status == 0: dataout = copy(outfile) repname = re.sub('.fits','.png',outfile) # open input file if status == 0: instr = pyfits.open(infile,mode='readonly',memmap=True) tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) # open TPF FITS file if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux_bkg, status = \ kepio.readTPF(infile,'FLUX_BKG',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux_bkg_err, status = \ kepio.readTPF(infile,'FLUX_BKG_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, pcorr1, status = \ kepio.readTPF(infile,'POS_CORR1',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, pcorr2, status = \ kepio.readTPF(infile,'POS_CORR2',logfile,verbose) # Save original data dimensions, in case of using maskfile if status == 0: xdimorig = xdim ydimorig = ydim # read mask definition file if it has been supplied if status == 0 and 'aper' not in maskfile.lower() and maskfile.lower() != 'all': maskx = array([],'int') masky = array([],'int') lines, status = kepio.openascii(maskfile,'r',logfile,verbose) for line in lines: line = line.strip().split('|') if len(line) == 6: y0 = int(line[3]) x0 = int(line[4]) line = line[5].split(';') for items in line: try: masky = numpy.append(masky,y0 + int(items.split(',')[0])) maskx = numpy.append(maskx,x0 + int(items.split(',')[1])) except: continue status = kepio.closeascii(lines,logfile,verbose) if len(maskx) == 0 or len(masky) == 0: message = 'ERROR -- KEPPCA: ' + maskfile + ' contains no pixels.' status = kepmsg.err(logfile,message,verbose) xdim = max(maskx) - min(maskx) + 1 # Find largest x dimension of mask ydim = max(masky) - min(masky) + 1 # Find largest y dimension of mask # pad mask to ensure it is rectangular workx = array([],'int') worky = array([],'int') for ip in arange(min(maskx),max(maskx) + 1): for jp in arange(min(masky),max(masky) + 1): workx = append(workx,ip) worky = append(worky,jp) maskx = workx masky = worky # define new subimage bitmap... if status == 0 and maskfile.lower() != 'all': aperx = numpy.array([],'int') apery = numpy.array([],'int') aperb = maskx - x0 + xdimorig * (masky - y0) # aperb is an array that contains the pixel numbers in the mask npix = len(aperb) # ...or use all pixels if status == 0 and maskfile.lower() == 'all': npix = xdimorig*ydimorig aperb = array([],'int') aperb = numpy.r_[0:npix] # legal mask defined? if status == 0: if len(aperb) == 0: message = 'ERROR -- KEPPCA: no legal pixels within the subimage are defined.' status = kepmsg.err(logfile,message,verbose) # Identify principal components desired if status == 0: pcaout = [] txt = components.strip().split(',') for work1 in txt: try: pcaout.append(int(work1.strip())) except: work2 = work1.strip().split('-') try: for work3 in range(int(work2[0]),int(work2[1]) + 1): pcaout.append(work3) except: message = 'ERROR -- KEPPCA: cannot understand principal component list requested' status = kepmsg.err(logfile,message,verbose) if status == 0: pcaout = set(sort(pcaout)) pcarem = array(list(pcaout))-1 # The list of pca component numbers to be removed # Initialize arrays and variables, and apply pixel mask to the data if status == 0: ntim = 0 time = numpy.array([],dtype='float64') timecorr = numpy.array([],dtype='float32') cadenceno = numpy.array([],dtype='int') pixseries = numpy.array([],dtype='float32') errseries = numpy.array([],dtype='float32') bkgseries = numpy.array([],dtype='float32') berseries = numpy.array([],dtype='float32') quality = numpy.array([],dtype='float32') pos_corr1 = numpy.array([],dtype='float32') pos_corr2 = numpy.array([],dtype='float32') nrows = numpy.size(fluxpixels,0) # Apply the pixel mask so we are left with only the desired pixels if status == 0: pixseriesb = fluxpixels[:,aperb] errseriesb = errpixels[:,aperb] bkgseriesb = flux_bkg[:,aperb] berseriesb = flux_bkg_err[:,aperb] # Read in the data to various arrays if status == 0: for i in range(nrows): if qual[i] < 10000 and \ numpy.isfinite(barytime[i]) and \ numpy.isfinite(fluxpixels[i,int(ydim*xdim/2+0.5)]) and \ numpy.isfinite(fluxpixels[i,1+int(ydim*xdim/2+0.5)]): ntim += 1 time = numpy.append(time,barytime[i]) timecorr = numpy.append(timecorr,tcorr[i]) cadenceno = numpy.append(cadenceno,cadno[i]) pixseries = numpy.append(pixseries,pixseriesb[i]) errseries = numpy.append(errseries,errseriesb[i]) bkgseries = numpy.append(bkgseries,bkgseriesb[i]) berseries = numpy.append(berseries,berseriesb[i]) quality = numpy.append(quality,qual[i]) pos_corr1 = numpy.append(pos_corr1,pcorr1[i]) pos_corr2 = numpy.append(pos_corr2,pcorr2[i]) pixseries = numpy.reshape(pixseries,(ntim,npix)) errseries = numpy.reshape(errseries,(ntim,npix)) bkgseries = numpy.reshape(bkgseries,(ntim,npix)) berseries = numpy.reshape(berseries,(ntim,npix)) tmp = numpy.median(pixseries,axis=1) for i in range(len(tmp)): pixseries[i] = pixseries[i] - tmp[i] # Figure out which pixels are undefined/nan and remove them. Keep track for adding back in later if status == 0: nanpixels = numpy.array([],dtype='int') i = 0 while (i < npix): if numpy.isnan(pixseries[0,i]): nanpixels = numpy.append(nanpixels,i) npix = npix - 1 i = i + 1 pixseries = numpy.delete(pixseries,nanpixels,1) errseries = numpy.delete(errseries,nanpixels,1) pixseries[numpy.isnan(pixseries)] = random.gauss(100,10) errseries[numpy.isnan(errseries)] = 10 # Compute statistical weights, means, standard deviations if status == 0: weightseries = (pixseries/errseries)**2 pixMean = numpy.average(pixseries,axis=0,weights=weightseries) pixStd = numpy.std(pixseries,axis=0) # Normalize the input by subtracting the mean and divising by the standard deviation. # This makes it a correlation-based PCA, which is what we want. if status == 0: pixseriesnorm = (pixseries - pixMean)/pixStd # Number of principal components to compute. Setting it equal to the number of pixels if status == 0: nvecin = npix # Run PCA using the MDP Whitening PCA, which produces normalized PCA components (zero mean and unit variance) if status == 0: pcan = mdp.nodes.WhiteningNode(svd=True) pcar = pcan.execute(pixseriesnorm) eigvec = pcan.get_recmatrix() model = pcar # Re-insert nan columns as zeros if status == 0: for i in range(0,len(nanpixels)): nanpixels[i] = nanpixels[i]-i eigvec = numpy.insert(eigvec,nanpixels,0,1) pixMean = numpy.insert(pixMean,nanpixels,0,0) # Make output eigenvectors (correlation images) into xpix by ypix images if status == 0: eigvec = eigvec.reshape(nvecin,ydim,xdim) # Calculate sum of all pixels to display as raw lightcurve and other quantities if status == 0: pixseriessum = sum(pixseries,axis=1) nrem=len(pcarem) # Number of components to remove nplot = npix # Number of pcas to plot - currently set to plot all components, but could set # nplot = nrem to just plot as many components as is being removed # Subtract components by fitting them to the summed light curve if status == 0: x0 = numpy.tile(-1.0,1) for k in range(0,nrem): def f(x): fluxcor = pixseriessum for k in range(0,len(x)): fluxcor = fluxcor - x[k]*model[:,pcarem[k]] return mad(fluxcor) if k==0: x0 = array([-1.0]) else: x0 = numpy.append(x0,1.0) myfit = scipy.optimize.fmin(f,x0,maxiter=50000,maxfun=50000,disp=False) x0 = myfit # Now that coefficients for all components have been found, subtract them to produce a calibrated time-series, # and then divide by the robust mean to produce a normalized time series as well if status == 0: c = myfit fluxcor = pixseriessum for k in range(0,nrem): fluxcor = fluxcor - c[k]*model[:,pcarem[k]] normfluxcor = fluxcor/mean(reject_outliers(fluxcor,2)) # input file data if status == 0: cards0 = instr[0].header.cards cards1 = instr[1].header.cards cards2 = instr[2].header.cards table = instr[1].data[:] maskmap = copy(instr[2].data) # subimage physical WCS data if status == 0: crpix1p = cards2['CRPIX1P'].value crpix2p = cards2['CRPIX2P'].value crval1p = cards2['CRVAL1P'].value crval2p = cards2['CRVAL2P'].value cdelt1p = cards2['CDELT1P'].value cdelt2p = cards2['CDELT2P'].value # dummy columns for output file if status == 0: sap_flux_err = numpy.empty(len(time)); sap_flux_err[:] = numpy.nan sap_bkg = numpy.empty(len(time)); sap_bkg[:] = numpy.nan sap_bkg_err = numpy.empty(len(time)); sap_bkg_err[:] = numpy.nan pdc_flux = numpy.empty(len(time)); pdc_flux[:] = numpy.nan pdc_flux_err = numpy.empty(len(time)); pdc_flux_err[:] = numpy.nan psf_centr1 = numpy.empty(len(time)); psf_centr1[:] = numpy.nan psf_centr1_err = numpy.empty(len(time)); psf_centr1_err[:] = numpy.nan psf_centr2 = numpy.empty(len(time)); psf_centr2[:] = numpy.nan psf_centr2_err = numpy.empty(len(time)); psf_centr2_err[:] = numpy.nan mom_centr1 = numpy.empty(len(time)); mom_centr1[:] = numpy.nan mom_centr1_err = numpy.empty(len(time)); mom_centr1_err[:] = numpy.nan mom_centr2 = numpy.empty(len(time)); mom_centr2[:] = numpy.nan mom_centr2_err = numpy.empty(len(time)); mom_centr2_err[:] = numpy.nan # mask bitmap if status == 0 and 'aper' not in maskfile.lower() and maskfile.lower() != 'all': for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): aperx = append(aperx,crval1p + (j + 1 - crpix1p) * cdelt1p) apery = append(apery,crval2p + (i + 1 - crpix2p) * cdelt2p) if maskmap[i,j] == 0: pass else: maskmap[i,j] = 1 for k in range(len(maskx)): if aperx[-1] == maskx[k] and apery[-1] == masky[k]: maskmap[i,j] = 3 # construct output primary extension if status == 0: hdu0 = pyfits.PrimaryHDU() for i in range(len(cards0)): if cards0[i].keyword not in hdu0.header.keys(): hdu0.header[cards0[i].keyword] = (cards0[i].value, cards0[i].comment) else: hdu0.header.cards[cards0[i].keyword].comment = cards0[i].comment status = kepkey.history(call,hdu0,outfile,logfile,verbose) outstr = HDUList(hdu0) # construct output light curve extension if status == 0: col1 = Column(name='TIME',format='D',unit='BJD - 2454833',array=time) col2 = Column(name='TIMECORR',format='E',unit='d',array=timecorr) col3 = Column(name='CADENCENO',format='J',array=cadenceno) col4 = Column(name='SAP_FLUX',format='E',unit='e-/s',array=pixseriessum) col5 = Column(name='SAP_FLUX_ERR',format='E',unit='e-/s',array=sap_flux_err) col6 = Column(name='SAP_BKG',format='E',unit='e-/s',array=sap_bkg) col7 = Column(name='SAP_BKG_ERR',format='E',unit='e-/s',array=sap_bkg_err) col8 = Column(name='PDCSAP_FLUX',format='E',unit='e-/s',array=pdc_flux) col9 = Column(name='PDCSAP_FLUX_ERR',format='E',unit='e-/s',array=pdc_flux_err) col10 = Column(name='SAP_QUALITY',format='J',array=quality) col11 = Column(name='PSF_CENTR1',format='E',unit='pixel',array=psf_centr1) col12 = Column(name='PSF_CENTR1_ERR',format='E',unit='pixel',array=psf_centr1_err) col13 = Column(name='PSF_CENTR2',format='E',unit='pixel',array=psf_centr2) col14 = Column(name='PSF_CENTR2_ERR',format='E',unit='pixel',array=psf_centr2_err) col15 = Column(name='MOM_CENTR1',format='E',unit='pixel',array=mom_centr1) col16 = Column(name='MOM_CENTR1_ERR',format='E',unit='pixel',array=mom_centr1_err) col17 = Column(name='MOM_CENTR2',format='E',unit='pixel',array=mom_centr2) col18 = Column(name='MOM_CENTR2_ERR',format='E',unit='pixel',array=mom_centr2_err) col19 = Column(name='POS_CORR1',format='E',unit='pixel',array=pos_corr1) col20 = Column(name='POS_CORR2',format='E',unit='pixel',array=pos_corr2) col21 = Column(name='PCA_FLUX',format='E',unit='e-/s',array=fluxcor) col22 = Column(name='PCA_FLUX_NRM',format='E',array=normfluxcor) cols = ColDefs([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11, \ col12,col13,col14,col15,col16,col17,col18,col19,col20,col21,col22]) hdu1 = new_table(cols) hdu1.header['TTYPE1'] = ('TIME','column title: data time stamps') hdu1.header['TFORM1'] = ('D','data type: float64') hdu1.header['TUNIT1'] = ('BJD - 2454833','column units: barycenter corrected JD') hdu1.header['TDISP1'] = ('D12.7','column display format') hdu1.header['TTYPE2'] = ('TIMECORR','column title: barycentric-timeslice correction') hdu1.header['TFORM2'] = ('E','data type: float32') hdu1.header['TUNIT2'] = ('d','column units: days') hdu1.header['TTYPE3'] = ('CADENCENO','column title: unique cadence number') hdu1.header['TFORM3'] = ('J','column format: signed integer32') hdu1.header['TTYPE4'] = ('SAP_FLUX','column title: aperture photometry flux') hdu1.header['TFORM4'] = ('E','column format: float32') hdu1.header['TUNIT4'] = ('e-/s','column units: electrons per second') hdu1.header['TTYPE5'] = ('SAP_FLUX_ERR','column title: aperture phot. flux error') hdu1.header['TFORM5'] = ('E','column format: float32') hdu1.header['TUNIT5'] = ('e-/s','column units: electrons per second (1-sigma)') hdu1.header['TTYPE6'] = ('SAP_BKG','column title: aperture phot. background flux') hdu1.header['TFORM6'] = ('E','column format: float32') hdu1.header['TUNIT6'] = ('e-/s','column units: electrons per second') hdu1.header['TTYPE7'] = ('SAP_BKG_ERR','column title: ap. phot. background flux error') hdu1.header['TFORM7'] = ('E','column format: float32') hdu1.header['TUNIT7'] = ('e-/s','column units: electrons per second (1-sigma)') hdu1.header['TTYPE8'] = ('PDCSAP_FLUX','column title: PDC photometry flux') hdu1.header['TFORM8'] = ('E','column format: float32') hdu1.header['TUNIT8'] = ('e-/s','column units: electrons per second') hdu1.header['TTYPE9'] = ('PDCSAP_FLUX_ERR','column title: PDC flux error') hdu1.header['TFORM9'] = ('E','column format: float32') hdu1.header['TUNIT9'] = ('e-/s','column units: electrons per second (1-sigma)') hdu1.header['TTYPE10'] = ('SAP_QUALITY','column title: aperture photometry quality flag') hdu1.header['TFORM10'] = ('J','column format: signed integer32') hdu1.header['TTYPE11'] = ('PSF_CENTR1','column title: PSF fitted column centroid') hdu1.header['TFORM11'] = ('E','column format: float32') hdu1.header['TUNIT11'] = ('pixel','column units: pixel') hdu1.header['TTYPE12'] = ('PSF_CENTR1_ERR','column title: PSF fitted column error') hdu1.header['TFORM12'] = ('E','column format: float32') hdu1.header['TUNIT12'] = ('pixel','column units: pixel') hdu1.header['TTYPE13'] = ('PSF_CENTR2','column title: PSF fitted row centroid') hdu1.header['TFORM13'] = ('E','column format: float32') hdu1.header['TUNIT13'] = ('pixel','column units: pixel') hdu1.header['TTYPE14'] = ('PSF_CENTR2_ERR','column title: PSF fitted row error') hdu1.header['TFORM14'] = ('E','column format: float32') hdu1.header['TUNIT14'] = ('pixel','column units: pixel') hdu1.header['TTYPE15'] = ('MOM_CENTR1','column title: moment-derived column centroid') hdu1.header['TFORM15'] = ('E','column format: float32') hdu1.header['TUNIT15'] = ('pixel','column units: pixel') hdu1.header['TTYPE16'] = ('MOM_CENTR1_ERR','column title: moment-derived column error') hdu1.header['TFORM16'] = ('E','column format: float32') hdu1.header['TUNIT16'] = ('pixel','column units: pixel') hdu1.header['TTYPE17'] = ('MOM_CENTR2','column title: moment-derived row centroid') hdu1.header['TFORM17'] = ('E','column format: float32') hdu1.header['TUNIT17'] = ('pixel','column units: pixel') hdu1.header['TTYPE18'] = ('MOM_CENTR2_ERR','column title: moment-derived row error') hdu1.header['TFORM18'] = ('E','column format: float32') hdu1.header['TUNIT18'] = ('pixel','column units: pixel') hdu1.header['TTYPE19'] = ('POS_CORR1','column title: col correction for vel. abbern') hdu1.header['TFORM19'] = ('E','column format: float32') hdu1.header['TUNIT19'] = ('pixel','column units: pixel') hdu1.header['TTYPE20'] = ('POS_CORR2','column title: row correction for vel. abbern') hdu1.header['TFORM20'] = ('E','column format: float32') hdu1.header['TUNIT20'] = ('pixel','column units: pixel') hdu1.header['TTYPE21'] = ('PCA_FLUX','column title: PCA-corrected flux') hdu1.header['TFORM21'] = ('E','column format: float32') hdu1.header['TUNIT21'] = ('pixel','column units: e-/s') hdu1.header['TTYPE22'] = ('PCA_FLUX_NRM','column title: normalized PCA-corrected flux') hdu1.header['TFORM22'] = ('E','column format: float32') hdu1.header['EXTNAME'] = ('LIGHTCURVE','name of extension') for i in range(len(cards1)): if (cards1[i].keyword not in hdu1.header.keys() and cards1[i].keyword[:4] not in ['TTYP','TFOR','TUNI','TDIS','TDIM','WCAX','1CTY', '2CTY','1CRP','2CRP','1CRV','2CRV','1CUN','2CUN', '1CDE','2CDE','1CTY','2CTY','1CDL','2CDL','11PC', '12PC','21PC','22PC']): hdu1.header[cards1[i].keyword] = (cards1[i].value, cards1[i].comment) outstr.append(hdu1) # construct output mask bitmap extension if status == 0: hdu2 = ImageHDU(maskmap) for i in range(len(cards2)): if cards2[i].keyword not in hdu2.header.keys(): hdu2.header[cards2[i].keyword] = (cards2[i].value, cards2[i].comment) else: hdu2.header.cards[cards2[i].keyword].comment = cards2[i].comment outstr.append(hdu2) # construct principal component table if status == 0: cols = [Column(name='TIME',format='E',unit='BJD - 2454833',array=time)] for i in range(len(pcar[0,:])): colname = 'PC' + str(i + 1) col = Column(name=colname,format='E',array=pcar[:,i]) cols.append(col) hdu3 = new_table(ColDefs(cols)) hdu3.header['EXTNAME'] = ('PRINCIPAL_COMPONENTS','name of extension') hdu3.header['TTYPE1'] = ('TIME','column title: data time stamps') hdu3.header['TFORM1'] = ('D','data type: float64') hdu3.header['TUNIT1'] = ('BJD - 2454833','column units: barycenter corrected JD') hdu3.header['TDISP1'] = ('D12.7','column display format') for i in range(len(pcar[0,:])): hdu3.header['TTYPE' + str(i + 2)] = \ ('PC' + str(i + 1), 'column title: principal component number' + str(i + 1)) hdu3.header['TFORM' + str(i + 2)] = ('E','column format: float32') outstr.append(hdu3) # write output file if status == 0: outstr.writeto(outfile) # close input structure if status == 0: status = kepio.closefits(instr,logfile,verbose) # Create PCA report if status == 0 and plotpca: npp = 7 # Number of plots per page l = 1 repcnt = 1 for k in range(nreps): # First plot of every pagewith flux image, flux and calibrated time series status = kepplot.define(16,12,logfile,verbose) if (k % (npp - 1) == 0): pylab.figure(figsize=[10,16]) subplot2grid((npp,6),(0,0), colspan=2) # imshow(log10(pixMean.reshape(xdim,ydim).T-min(pixMean)+1),interpolation="nearest",cmap='RdYlBu') imshow(log10(flipud(pixMean.reshape(ydim,xdim))-min(pixMean)+1),interpolation="nearest",cmap='RdYlBu') xticks([]) yticks([]) ax1 = subplot2grid((npp,6),(0,2), colspan=4) px = copy(time) + bjdref py = copy(pixseriessum) px, xlab, status = kepplot.cleanx(px,logfile,verbose) py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose) kepplot.RangeOfPlot(px,py,0.01,False) kepplot.plot1d(px,py,cadence,lcolor,lwidth,fcolor,falpha,True) py = copy(fluxcor) py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose) plot(px,py,marker='.',color='r',linestyle='',markersize=1.0) kepplot.labels('',re.sub('\)','',re.sub('Flux \(','',ylab)),'k',18) grid() setp(ax1.get_xticklabels(), visible=False) # plot principal components subplot2grid((npp,6),(l,0), colspan=2) imshow(eigvec[k],interpolation="nearest",cmap='RdYlBu') xlim(-0.5,xdim-0.5) ylim(-0.5,ydim-0.5) xticks([]) yticks([]) # The last plot on the page that should have the xlabel if ( k% (npp - 1) == npp - 2 or k == nvecin - 1): subplot2grid((npp,6),(l,2), colspan=4) py = copy(model[:,k]) kepplot.RangeOfPlot(px,py,0.01,False) kepplot.plot1d(px,py,cadence,'r',lwidth,'g',falpha,True) kepplot.labels(xlab,'PC ' + str(k+1),'k',18) pylab.grid() pylab.tight_layout() l = 1 pylab.savefig(re.sub('.png','_%d.png' % repcnt,repname)) if not cmdLine: kepplot.render(cmdLine) repcnt += 1 # The other plots on the page that should have no xlabel else: ax2 = subplot2grid((npp,6),(l,2), colspan=4) py = copy(model[:,k]) kepplot.RangeOfPlot(px,py,0.01,False) kepplot.plot1d(px,py,cadence,'r',lwidth,'g',falpha,True) kepplot.labels('','PC ' + str(k+1),'k',18) grid() setp(ax2.get_xticklabels(), visible=False) pylab.tight_layout() l=l+1 pylab.savefig(re.sub('.png','_%d.png' % repcnt,repname)) if not cmdLine: kepplot.render(cmdLine) # plot style and size if status == 0 and plotpca: status = kepplot.define(labelsize,ticksize,logfile,verbose) pylab.figure(figsize=[xsize,ysize]) pylab.clf() # plot aperture photometry and PCA corrected data if status == 0 and plotpca: ax = kepplot.location([0.06,0.54,0.93,0.43]) px = copy(time) + bjdref py = copy(pixseriessum) px, xlab, status = kepplot.cleanx(px,logfile,verbose) py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose) kepplot.RangeOfPlot(px,py,0.01,False) kepplot.plot1d(px,py,cadence,lcolor,lwidth,fcolor,falpha,True) py = copy(fluxcor) py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose) kepplot.plot1d(px,py,cadence,'r',2,fcolor,0.0,True) pylab.setp(pylab.gca(),xticklabels=[]) kepplot.labels('',ylab,'k',24) pylab.grid() # plot aperture photometry and PCA corrected data if status == 0 and plotpca: ax = kepplot.location([0.06,0.09,0.93,0.43]) yr = array([],'float32') npc = min([6,nrem]) for i in range(npc-1,-1,-1): py = pcar[:,i] * c[i] py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose) cl = float(i) / (float(npc)) kepplot.plot1d(px,py,cadence,[1.0-cl,0.0,cl],2,fcolor,0.0,True) yr = append(yr,py) y1 = max(yr) y2 = -min(yr) kepplot.RangeOfPlot(px,array([-y1,y1,-y2,y2]),0.01,False) kepplot.labels(xlab,'Principal Components','k',24) pylab.grid() # save plot to file if status == 0 and plotpca: pylab.savefig(repname) # render plot if status == 0 and plotpca: kepplot.render(cmdLine) # stop time if status == 0: kepmsg.clock('KEPPCA ended at',logfile,verbose) return
def kepmask(infile, mfile, pfile, tabrow, imin, imax, iscale, cmap, verbose, logfile, status, cLine=False): global pimg, zscale, zmin, zmax, xmin, xmax, ymin, ymax, quarter global pxdim, pydim, kepmag, skygroup, season, channel global module, output, row, column, maskfile, plotfile global pkepid, pkepmag, pra, pdec, colmap, cmdLine # input arguments status = 0 numpy.seterr(all="ignore") zmin = imin zmax = imax zscale = iscale colmap = cmap maskfile = mfile plotfile = pfile cmdLine = cLine # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPMASK -- ' call += 'infile=' + infile + ' ' call += 'maskfile=' + mfile + ' ' call += 'plotfile=' + pfile + ' ' call += 'tabrow=' + str(tabrow) + ' ' call += 'imin=' + str(imin) + ' ' call += 'imax=' + str(imax) + ' ' call += 'iscale=' + str(iscale) + ' ' call += 'cmap=' + str(cmap) + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPMASK started at', logfile, verbose) # reference color map if cmap == 'browse': status = cmap_plot() # open TPF FITS file and check tabrow exists if status == 0: tpf, status = kepio.openfits(infile, 'readonly', logfile, verbose) if status == 0: try: naxis2 = tpf['TARGETTABLES'].header['NAXIS2'] except: txt = 'ERROR -- KEPMASK: No NAXIS2 keyword in ' + infile + '[TARGETTABLES]' status = kepmsg.err(logfile, txt, True) if status == 0 and tabrow > naxis2: txt = 'ERROR -- KEPMASK: tabrow is too large. There are ' + str( naxis2) + ' rows in the table.' status = kepmsg.err(logfile, txt, True) if status == 0: status = kepio.closefits(tpf, logfile, verbose) # read TPF data pixel image if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, pixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) img = pixels[tabrow] pkepid = copy(kepid) pra = copy(ra) pdec = copy(dec) pkepmag = copy(kepmag) pxdim = copy(xdim) pydim = copy(ydim) pimg = copy(img) # print target data if status == 0: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # subimage of channel for plot if status == 0: ymin = copy(row) ymax = ymin + ydim xmin = copy(column) xmax = xmin + xdim # intensity scale if status == 0: pimg, imin, imax = kepplot.intScale1D(pimg, zscale) if zmin and zmax and 'log' in zscale: zmin = log10(zmin) zmax = log10(zmax) elif zmin and zmax and 'sq' in zscale: zmin = sqrt(zmin) zmax = sqrt(zmax) elif zmin and zmax and 'li' in zscale: zmin *= 1.0 zmax *= 1.0 else: zmin = copy(imin) zmax = copy(imax) # nstat = 2; pixels = [] # work = array(sort(img),dtype=float32) # for i in range(len(work)): # if 'nan' not in str(work[i]): # pixels.append(work[i]) # pixels = array(pixels,dtype=float32) # if int(float(len(pixels)) / 10 + 0.5) > nstat: # nstat = int(float(len(pixels)) / 10 + 0.5) # if not zmin: # zmin = median(pixels[:nstat]) # if not zmax: # zmax = median(pixels[-nstat:]) # if 'log' in zscale: # pimg = log10(pimg) # if 'sq' in zscale: # pimg = sqrt(pimg) # plot limits ymin = float(ymin) - 0.5 ymax = float(ymax) - 0.5 xmin = float(xmin) - 0.5 xmax = float(xmax) - 0.5 # plot style try: params = { 'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 14, 'ytick.labelsize': 14 } pylab.rcParams.update(params) except: pass if status == 0: pylab.figure(figsize=[10, 7]) plotimage(cmdLine) return
def kepextract(infile,maskfile,outfile,subback,clobber,verbose,logfile,status): # startup parameters status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPEXTRACT -- ' call += 'infile='+infile+' ' call += 'maskfile='+maskfile+' ' call += 'outfile='+outfile+' ' backgr = 'n' if (subback): backgr = 'y' call += 'background='+backgr+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPEXTRACT started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPEXTRACT: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile,message,verbose) # open input file status = 0 instr = pyfits.open(infile,mode='readonly',memmap=True) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # input file data if status == 0: cards0 = instr[0].header.cards cards1 = instr[1].header.cards cards2 = instr[2].header.cards table = instr[1].data[:] maskmap = copy(instr[2].data) # input table data if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, time, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) time = numpy.array(time,dtype='float64') if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, timecorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) timecorr = numpy.array(timecorr,dtype='float32') if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadenceno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) cadenceno = numpy.array(cadenceno,dtype='int') if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, raw_cnts, status = \ kepio.readTPF(infile,'RAW_CNTS',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux_err, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux_bkg, status = \ kepio.readTPF(infile,'FLUX_BKG',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, flux_bkg_err, status = \ kepio.readTPF(infile,'FLUX_BKG_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cosmic_rays, status = \ kepio.readTPF(infile,'COSMIC_RAYS',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, quality, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) quality = numpy.array(quality,dtype='int') if status == 0: try: pos_corr1 = numpy.array(table.field('POS_CORR1'),dtype='float64') # ---for FITS wave #2 except: pos_corr1 = empty(len(time)); pos_corr1[:] = numpy.nan # ---temporary before FITS wave #2 try: pos_corr2 = numpy.array(table.field('POS_CORR2'),dtype='float64') # ---for FITS wave #2 except: pos_corr2 = empty(len(time)); pos_corr2[:] = numpy.nan # ---temporary before FITS wave #2 # dummy columns for output file psf_centr1 = empty(len(time)); psf_centr1[:] = numpy.nan psf_centr1_err = empty(len(time)); psf_centr1_err[:] = numpy.nan psf_centr2 = empty(len(time)); psf_centr2[:] = numpy.nan psf_centr2_err = empty(len(time)); psf_centr2_err[:] = numpy.nan # mom_centr1 = empty(len(time)); mom_centr1[:] = numpy.nan mom_centr1_err = empty(len(time)); mom_centr1_err[:] = numpy.nan # mom_centr2 = empty(len(time)); mom_centr2[:] = numpy.nan mom_centr2_err = empty(len(time)); mom_centr2_err[:] = numpy.nan # read mask definition file if status == 0 and 'aper' not in maskfile.lower() and maskfile.lower() != 'all': maskx = array([],'int') masky = array([],'int') lines, status = kepio.openascii(maskfile,'r',logfile,verbose) for line in lines: line = line.strip().split('|') if len(line) == 6: y0 = int(line[3]) x0 = int(line[4]) line = line[5].split(';') for items in line: try: masky = append(masky,y0 + int(items.split(',')[0])) maskx = append(maskx,x0 + int(items.split(',')[1])) except: continue status = kepio.closeascii(lines,logfile,verbose) if len(maskx) == 0 or len(masky) == 0: message = 'ERROR -- KEPEXTRACT: ' + maskfile + ' contains no pixels.' status = kepmsg.err(logfile,message,verbose) # subimage physical WCS data if status == 0: crpix1p = cards2['CRPIX1P'].value crpix2p = cards2['CRPIX2P'].value crval1p = cards2['CRVAL1P'].value crval2p = cards2['CRVAL2P'].value cdelt1p = cards2['CDELT1P'].value cdelt2p = cards2['CDELT2P'].value # define new subimage bitmap... if status == 0 and 'aper' not in maskfile.lower() and maskfile.lower() != 'all': aperx = array([],'int') apery = array([],'int') aperb = array([],'int') for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): aperx = append(aperx,crval1p + (j + 1 - crpix1p) * cdelt1p) apery = append(apery,crval2p + (i + 1 - crpix2p) * cdelt2p) if maskmap[i,j] == 0: aperb = append(aperb,0) else: aperb = append(aperb,1) maskmap[i,j] = 1 for k in range(len(maskx)): if aperx[-1] == maskx[k] and apery[-1] == masky[k]: aperb[-1] = 3 maskmap[i,j] = 3 # trap case where no aperture needs to be defined but pixel positions are still required for centroiding if status == 0 and maskfile.lower() == 'all': aperx = array([],'int') apery = array([],'int') for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): aperx = append(aperx,crval1p + (j + 1 - crpix1p) * cdelt1p) apery = append(apery,crval2p + (i + 1 - crpix2p) * cdelt2p) # ...or use old subimage bitmap if status == 0 and 'aper' in maskfile.lower(): aperb = array([],'int') for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): aperb = append(aperb,maskmap[i,j]) # ...or use all pixels if status == 0 and maskfile.lower() == 'all': aperb = array([],'int') for i in range(maskmap.shape[0]): for j in range(maskmap.shape[1]): if maskmap[i,j] == 0: aperb = append(aperb,0) else: aperb = append(aperb,3) maskmap[i,j] = 3 # subtract median pixel value for background? if status == 0: sky = array([],'float32') for i in range(len(time)): sky = append(sky,median(flux[i,:])) if not subback: sky[:] = 0.0 # legal mask defined? if status == 0: if len(aperb) == 0: message = 'ERROR -- KEPEXTRACT: no legal pixels within the subimage are defined.' status = kepmsg.err(logfile,message,verbose) # construct new table flux data if status == 0: naper = (aperb == 3).sum() ntime = len(time) sap_flux = array([],'float32') sap_flux_err = array([],'float32') sap_bkg = array([],'float32') sap_bkg_err = array([],'float32') raw_flux = array([],'float32') for i in range(len(time)): work1 = array([],'float64') work2 = array([],'float64') work3 = array([],'float64') work4 = array([],'float64') work5 = array([],'float64') for j in range(len(aperb)): if (aperb[j] == 3): work1 = append(work1,flux[i,j]-sky[i]) work2 = append(work2,flux_err[i,j]) work3 = append(work3,flux_bkg[i,j]) work4 = append(work4,flux_bkg_err[i,j]) work5 = append(work5,raw_cnts[i,j]) sap_flux = append(sap_flux,kepstat.sum(work1)) sap_flux_err = append(sap_flux_err,kepstat.sumerr(work2)) sap_bkg = append(sap_bkg,kepstat.sum(work3)) sap_bkg_err = append(sap_bkg_err,kepstat.sumerr(work4)) raw_flux = append(raw_flux,kepstat.sum(work5)) # construct new table moment data if status == 0: mom_centr1 = zeros(shape=(ntime)) mom_centr2 = zeros(shape=(ntime)) mom_centr1_err = zeros(shape=(ntime)) mom_centr2_err = zeros(shape=(ntime)) for i in range(ntime): xf = zeros(shape=(naper)) yf = zeros(shape=(naper)) f = zeros(shape=(naper)) xfe = zeros(shape=(naper)) yfe = zeros(shape=(naper)) fe = zeros(shape=(naper)) k = -1 for j in range(len(aperb)): if (aperb[j] == 3): k += 1 xf[k] = aperx[j] * flux[i,j] xfe[k] = aperx[j] * flux_err[i,j] yf[k] = apery[j] * flux[i,j] yfe[k] = apery[j] * flux_err[i,j] f[k] = flux[i,j] fe[k] = flux_err[i,j] xfsum = kepstat.sum(xf) yfsum = kepstat.sum(yf) fsum = kepstat.sum(f) xfsume = sqrt(kepstat.sum(square(xfe)) / naper) yfsume = sqrt(kepstat.sum(square(yfe)) / naper) fsume = sqrt(kepstat.sum(square(fe)) / naper) mom_centr1[i] = xfsum / fsum mom_centr2[i] = yfsum / fsum mom_centr1_err[i] = sqrt((xfsume / xfsum)**2 + ((fsume / fsum)**2)) mom_centr2_err[i] = sqrt((yfsume / yfsum)**2 + ((fsume / fsum)**2)) mom_centr1_err = mom_centr1_err * mom_centr1 mom_centr2_err = mom_centr2_err * mom_centr2 # construct new table PSF data if status == 0: psf_centr1 = zeros(shape=(ntime)) psf_centr2 = zeros(shape=(ntime)) psf_centr1_err = zeros(shape=(ntime)) psf_centr2_err = zeros(shape=(ntime)) modx = zeros(shape=(naper)) mody = zeros(shape=(naper)) k = -1 for j in range(len(aperb)): if (aperb[j] == 3): k += 1 modx[k] = aperx[j] mody[k] = apery[j] for i in range(ntime): modf = zeros(shape=(naper)) k = -1 guess = [mom_centr1[i], mom_centr2[i], nanmax(flux[i:]), 1.0, 1.0, 0.0, 0.0] for j in range(len(aperb)): if (aperb[j] == 3): k += 1 modf[k] = flux[i,j] args = (modx, mody, modf) ans = leastsq(kepfunc.PRFgauss2d,guess,args=args,xtol=1.0e-8,ftol=1.0e-4,full_output=True) s_sq = (ans[2]['fvec']**2).sum() / (ntime-len(guess)) psf_centr1[i] = ans[0][0] psf_centr2[i] = ans[0][1] try: psf_centr1_err[i] = sqrt(diag(ans[1] * s_sq))[0] except: psf_centr1_err[i] = numpy.nan try: psf_centr2_err[i] = sqrt(diag(ans[1] * s_sq))[1] except: psf_centr2_err[i] = numpy.nan # construct output primary extension if status == 0: hdu0 = pyfits.PrimaryHDU() for i in range(len(cards0)): if cards0[i].key not in hdu0.header.keys(): hdu0.header.update(cards0[i].key, cards0[i].value, cards0[i].comment) else: hdu0.header.cards[cards0[i].key].comment = cards0[i].comment status = kepkey.history(call,hdu0,outfile,logfile,verbose) outstr = HDUList(hdu0) # construct output light curve extension if status == 0: col1 = Column(name='TIME',format='D',unit='BJD - 2454833',array=time) col2 = Column(name='TIMECORR',format='E',unit='d',array=timecorr) col3 = Column(name='CADENCENO',format='J',array=cadenceno) col4 = Column(name='SAP_FLUX',format='E',array=sap_flux) col5 = Column(name='SAP_FLUX_ERR',format='E',array=sap_flux_err) col6 = Column(name='SAP_BKG',format='E',array=sap_bkg) col7 = Column(name='SAP_BKG_ERR',format='E',array=sap_bkg_err) col8 = Column(name='PDCSAP_FLUX',format='E',array=sap_flux) col9 = Column(name='PDCSAP_FLUX_ERR',format='E',array=sap_flux_err) col10 = Column(name='SAP_QUALITY',format='J',array=quality) col11 = Column(name='PSF_CENTR1',format='E',unit='pixel',array=psf_centr1) col12 = Column(name='PSF_CENTR1_ERR',format='E',unit='pixel',array=psf_centr1_err) col13 = Column(name='PSF_CENTR2',format='E',unit='pixel',array=psf_centr2) col14 = Column(name='PSF_CENTR2_ERR',format='E',unit='pixel',array=psf_centr2_err) col15 = Column(name='MOM_CENTR1',format='E',unit='pixel',array=mom_centr1) col16 = Column(name='MOM_CENTR1_ERR',format='E',unit='pixel',array=mom_centr1_err) col17 = Column(name='MOM_CENTR2',format='E',unit='pixel',array=mom_centr2) col18 = Column(name='MOM_CENTR2_ERR',format='E',unit='pixel',array=mom_centr2_err) col19 = Column(name='POS_CORR1',format='E',unit='pixel',array=pos_corr1) col20 = Column(name='POS_CORR2',format='E',unit='pixel',array=pos_corr2) col21 = Column(name='RAW_FLUX',format='E',array=raw_flux) cols = ColDefs([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11, \ col12,col13,col14,col15,col16,col17,col18,col19,col20,col21]) hdu1 = new_table(cols) hdu1.header.update('TTYPE1','TIME','column title: data time stamps') hdu1.header.update('TFORM1','D','data type: float64') hdu1.header.update('TUNIT1','BJD - 2454833','column units: barycenter corrected JD') hdu1.header.update('TDISP1','D12.7','column display format') hdu1.header.update('TTYPE2','TIMECORR','column title: barycentric-timeslice correction') hdu1.header.update('TFORM2','E','data type: float32') hdu1.header.update('TUNIT2','d','column units: days') hdu1.header.update('TTYPE3','CADENCENO','column title: unique cadence number') hdu1.header.update('TFORM3','J','column format: signed integer32') hdu1.header.update('TTYPE4','SAP_FLUX','column title: aperture photometry flux') hdu1.header.update('TFORM4','E','column format: float32') hdu1.header.update('TUNIT4','e-/s','column units: electrons per second') hdu1.header.update('TTYPE5','SAP_FLUX_ERR','column title: aperture phot. flux error') hdu1.header.update('TFORM5','E','column format: float32') hdu1.header.update('TUNIT5','e-/s','column units: electrons per second (1-sigma)') hdu1.header.update('TTYPE6','SAP_BKG','column title: aperture phot. background flux') hdu1.header.update('TFORM6','E','column format: float32') hdu1.header.update('TUNIT6','e-/s','column units: electrons per second') hdu1.header.update('TTYPE7','SAP_BKG_ERR','column title: ap. phot. background flux error') hdu1.header.update('TFORM7','E','column format: float32') hdu1.header.update('TUNIT7','e-/s','column units: electrons per second (1-sigma)') hdu1.header.update('TTYPE8','PDCSAP_FLUX','column title: PDC photometry flux') hdu1.header.update('TFORM8','E','column format: float32') hdu1.header.update('TUNIT8','e-/s','column units: electrons per second') hdu1.header.update('TTYPE9','PDCSAP_FLUX_ERR','column title: PDC flux error') hdu1.header.update('TFORM9','E','column format: float32') hdu1.header.update('TUNIT9','e-/s','column units: electrons per second (1-sigma)') hdu1.header.update('TTYPE10','SAP_QUALITY','column title: aperture photometry quality flag') hdu1.header.update('TFORM10','J','column format: signed integer32') hdu1.header.update('TTYPE11','PSF_CENTR1','column title: PSF fitted column centroid') hdu1.header.update('TFORM11','E','column format: float32') hdu1.header.update('TUNIT11','pixel','column units: pixel') hdu1.header.update('TTYPE12','PSF_CENTR1_ERR','column title: PSF fitted column error') hdu1.header.update('TFORM12','E','column format: float32') hdu1.header.update('TUNIT12','pixel','column units: pixel') hdu1.header.update('TTYPE13','PSF_CENTR2','column title: PSF fitted row centroid') hdu1.header.update('TFORM13','E','column format: float32') hdu1.header.update('TUNIT13','pixel','column units: pixel') hdu1.header.update('TTYPE14','PSF_CENTR2_ERR','column title: PSF fitted row error') hdu1.header.update('TFORM14','E','column format: float32') hdu1.header.update('TUNIT14','pixel','column units: pixel') hdu1.header.update('TTYPE15','MOM_CENTR1','column title: moment-derived column centroid') hdu1.header.update('TFORM15','E','column format: float32') hdu1.header.update('TUNIT15','pixel','column units: pixel') hdu1.header.update('TTYPE16','MOM_CENTR1_ERR','column title: moment-derived column error') hdu1.header.update('TFORM16','E','column format: float32') hdu1.header.update('TUNIT16','pixel','column units: pixel') hdu1.header.update('TTYPE17','MOM_CENTR2','column title: moment-derived row centroid') hdu1.header.update('TFORM17','E','column format: float32') hdu1.header.update('TUNIT17','pixel','column units: pixel') hdu1.header.update('TTYPE18','MOM_CENTR2_ERR','column title: moment-derived row error') hdu1.header.update('TFORM18','E','column format: float32') hdu1.header.update('TUNIT18','pixel','column units: pixel') hdu1.header.update('TTYPE19','POS_CORR1','column title: col correction for vel. abbern') hdu1.header.update('TFORM19','E','column format: float32') hdu1.header.update('TUNIT19','pixel','column units: pixel') hdu1.header.update('TTYPE20','POS_CORR2','column title: row correction for vel. abbern') hdu1.header.update('TFORM20','E','column format: float32') hdu1.header.update('TUNIT20','pixel','column units: pixel') hdu1.header.update('TTYPE21','RAW_FLUX','column title: raw aperture photometry flux') hdu1.header.update('TFORM21','E','column format: float32') hdu1.header.update('TUNIT21','e-/s','column units: electrons per second') hdu1.header.update('EXTNAME','LIGHTCURVE','name of extension') for i in range(len(cards1)): if (cards1[i].key not in hdu1.header.keys() and cards1[i].key[:4] not in ['TTYP','TFOR','TUNI','TDIS','TDIM','WCAX','1CTY', '2CTY','1CRP','2CRP','1CRV','2CRV','1CUN','2CUN', '1CDE','2CDE','1CTY','2CTY','1CDL','2CDL','11PC', '12PC','21PC','22PC']): hdu1.header.update(cards1[i].key, cards1[i].value, cards1[i].comment) outstr.append(hdu1) # construct output mask bitmap extension if status == 0: hdu2 = ImageHDU(maskmap) for i in range(len(cards2)): if cards2[i].key not in hdu2.header.keys(): hdu2.header.update(cards2[i].key, cards2[i].value, cards2[i].comment) else: hdu2.header.cards[cards2[i].key].comment = cards2[i].comment outstr.append(hdu2) # write output file if status == 0: outstr.writeto(outfile,checksum=True) # close input structure if status == 0: status = kepio.closefits(instr,logfile,verbose) # end time kepmsg.clock('KEPEXTRACT finished at',logfile,verbose)
def kepprf(infile,plotfile,rownum,columns,rows,fluxes,border,background,focus,prfdir,xtol,ftol, imscale,colmap,labcol,apercol,plt,verbose,logfile,status,cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPPRF -- ' call += 'infile='+infile+' ' call += 'plotfile='+plotfile+' ' call += 'rownum='+str(rownum)+' ' call += 'columns='+columns+' ' call += 'rows='+rows+' ' call += 'fluxes='+fluxes+' ' call += 'border='+str(border)+' ' bground = 'n' if (background): bground = 'y' call += 'background='+bground+' ' focs = 'n' if (focus): focs = 'y' call += 'focus='+focs+' ' call += 'prfdir='+prfdir+' ' call += 'xtol='+str(xtol)+' ' call += 'ftol='+str(xtol)+' ' call += 'imscale='+imscale+' ' call += 'colmap='+colmap+' ' call += 'labcol='+labcol+' ' call += 'apercol='+apercol+' ' plotit = 'n' if (plt): plotit = 'y' call += 'plot='+plotit+' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # test log file logfile = kepmsg.test(logfile) # start time kepmsg.clock('KEPPRF started at',logfile,verbose) # reference color map if colmap == 'browse': status = cmap_plot(cmdLine) # construct inital guess vector for fit if status == 0: guess = [] try: f = fluxes.strip().split(',') x = columns.strip().split(',') y = rows.strip().split(',') for i in xrange(len(f)): f[i] = float(f[i]) except: f = fluxes x = columns y = rows nsrc = len(f) for i in xrange(nsrc): try: guess.append(float(f[i])) except: message = 'ERROR -- KEPPRF: Fluxes must be floating point numbers' status = kepmsg.err(logfile,message,verbose) if status == 0: if len(x) != nsrc or len(y) != nsrc: message = 'ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and ' message += 'fluxes must have the same number of sources' status = kepmsg.err(logfile,message,verbose) if status == 0: for i in xrange(nsrc): try: guess.append(float(x[i])) except: message = 'ERROR -- KEPPRF: Columns must be floating point numbers' status = kepmsg.err(logfile,message,verbose) if status == 0: for i in xrange(nsrc): try: guess.append(float(y[i])) except: message = 'ERROR -- KEPPRF: Rows must be floating point numbers' status = kepmsg.err(logfile,message,verbose) if status == 0 and background: if border == 0: guess.append(0.0) else: for i in range((border+1)*2): guess.append(0.0) if status == 0 and focus: guess.append(1.0); guess.append(1.0); guess.append(0.0) # open TPF FITS file if status == 0: try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) except: message = 'ERROR -- KEPPRF: is %s a Target Pixel File? ' % infile status = kepmsg.err(logfile,message,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition(infile,logfile,verbose) npix = numpy.size(numpy.nonzero(maskimg)[0]) # print target data if status == 0 and verbose: print '' print ' KepID: %s' % kepid print ' BJD: %.2f' % (barytime[rownum-1] + 2454833.0) print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # is this a good row with finite timestamp and pixels? if status == 0: if not numpy.isfinite(barytime[rownum-1]) or numpy.nansum(fluxpixels[rownum-1,:]) == numpy.nan: message = 'ERROR -- KEPFIELD: Row ' + str(rownum) + ' is a bad quality timestamp' status = kepmsg.err(logfile,message,verbose) # construct input pixel image if status == 0: flux = fluxpixels[rownum-1,:] ferr = errpixels[rownum-1,:] DATx = arange(column,column+xdim) DATy = arange(row,row+ydim) # if numpy.nanmin > 420000.0: flux -= 420000.0 # image scale and intensity limits of pixel data if status == 0: n = 0 DATimg = empty((ydim,xdim)) ERRimg = empty((ydim,xdim)) for i in range(ydim): for j in range(xdim): DATimg[i,j] = flux[n] ERRimg[i,j] = ferr[n] n += 1 # determine suitable PRF calibration file if status == 0: if int(module) < 10: prefix = 'kplr0' else: prefix = 'kplr' prfglob = prfdir + '/' + prefix + str(module) + '.' + str(output) + '*' + '_prf.fits' try: prffile = glob.glob(prfglob)[0] except: message = 'ERROR -- KEPPRF: No PRF file found in ' + prfdir status = kepmsg.err(logfile,message,verbose) # read PRF images if status == 0: prfn = [0,0,0,0,0] crpix1p = numpy.zeros((5),dtype='float32') crpix2p = numpy.zeros((5),dtype='float32') crval1p = numpy.zeros((5),dtype='float32') crval2p = numpy.zeros((5),dtype='float32') cdelt1p = numpy.zeros((5),dtype='float32') cdelt2p = numpy.zeros((5),dtype='float32') for i in range(5): prfn[i], crpix1p[i], crpix2p[i], crval1p[i], crval2p[i], cdelt1p[i], cdelt2p[i], status \ = kepio.readPRFimage(prffile,i+1,logfile,verbose) prfn = array(prfn) PRFx = arange(0.5,shape(prfn[0])[1]+0.5) PRFy = arange(0.5,shape(prfn[0])[0]+0.5) PRFx = (PRFx - size(PRFx) / 2) * cdelt1p[0] PRFy = (PRFy - size(PRFy) / 2) * cdelt2p[0] # interpolate the calibrated PRF shape to the target position if status == 0: prf = zeros(shape(prfn[0]),dtype='float32') prfWeight = zeros((5),dtype='float32') for i in xrange(5): prfWeight[i] = sqrt((column - crval1p[i])**2 + (row - crval2p[i])**2) if prfWeight[i] == 0.0: prfWeight[i] = 1.0e-6 prf = prf + prfn[i] / prfWeight[i] prf = prf / nansum(prf) / cdelt1p[0] / cdelt2p[0] # interpolate the calibrated PRF shape to the target position # if status == 0: # prf = zeros(shape(prfn[0,:,:]),dtype='float32') # px = crval1p + len(PRFx) / 2 * cdelt1p[0] # py = crval2p + len(PRFy) / 2 * cdelt2p[0] # pp = [[px[0],py[0]], # [px[1],py[1]], # [px[2],py[2]], # [px[3],py[3]], # [px[4],py[4]]] # for index,value in ndenumerate(prf): # pz = prfn[:,index[0],index[1]] # prf[index] = griddata(pp, pz, ([column], [row]), method='linear') # print shape(prf) # location of the data image centered on the PRF image (in PRF pixel units) if status == 0: prfDimY = int(ydim / cdelt1p[0]) prfDimX = int(xdim / cdelt2p[0]) PRFy0 = (shape(prf)[0] - prfDimY) / 2 PRFx0 = (shape(prf)[1] - prfDimX) / 2 # interpolation function over the PRF if status == 0: splineInterpolation = scipy.interpolate.RectBivariateSpline(PRFx,PRFy,prf) # construct mesh for background model if status == 0 and background: bx = numpy.arange(1.,float(xdim+1)) by = numpy.arange(1.,float(ydim+1)) xx, yy = numpy.meshgrid(numpy.linspace(bx.min(), bx.max(), xdim), numpy.linspace(by.min(), by.max(), ydim)) # fit PRF model to pixel data if status == 0: start = time.time() if focus and background: args = (DATx,DATy,DATimg,ERRimg,nsrc,border,xx,yy,splineInterpolation,float(x[0]),float(y[0])) ans = fmin_powell(kepfunc.PRFwithFocusAndBackground,guess,args=args,xtol=xtol, ftol=ftol,disp=False) elif focus and not background: args = (DATx,DATy,DATimg,ERRimg,nsrc,splineInterpolation,float(x[0]),float(y[0])) ans = fmin_powell(kepfunc.PRFwithFocus,guess,args=args,xtol=xtol, ftol=ftol,disp=False) elif background and not focus: args = (DATx,DATy,DATimg,ERRimg,nsrc,border,xx,yy,splineInterpolation,float(x[0]),float(y[0])) ans = fmin_powell(kepfunc.PRFwithBackground,guess,args=args,xtol=xtol, ftol=ftol,disp=False) else: args = (DATx,DATy,DATimg,ERRimg,nsrc,splineInterpolation,float(x[0]),float(y[0])) ans = fmin_powell(kepfunc.PRF,guess,args=args,xtol=xtol, ftol=ftol,disp=False) print 'Convergence time = %.2fs\n' % (time.time() - start) # pad the PRF data if the PRF array is smaller than the data array if status == 0: flux = []; OBJx = []; OBJy = [] PRFmod = numpy.zeros((prfDimY,prfDimX)) if PRFy0 < 0 or PRFx0 < 0.0: PRFmod = numpy.zeros((prfDimY,prfDimX)) superPRF = zeros((prfDimY+1,prfDimX+1)) superPRF[abs(PRFy0):abs(PRFy0)+shape(prf)[0],abs(PRFx0):abs(PRFx0)+shape(prf)[1]] = prf prf = superPRF * 1.0 PRFy0 = 0 PRFx0 = 0 # rotate the PRF model around its center if focus: angle = ans[-1] prf = rotate(prf,-angle,reshape=False,mode='nearest') # iterate through the sources in the best fit PSF model for i in range(nsrc): flux.append(ans[i]) OBJx.append(ans[nsrc+i]) OBJy.append(ans[nsrc*2+i]) # calculate best-fit model y = (OBJy[i]-mean(DATy)) / cdelt1p[0] x = (OBJx[i]-mean(DATx)) / cdelt2p[0] prfTmp = shift(prf,[y,x],order=3,mode='constant') prfTmp = prfTmp[PRFy0:PRFy0+prfDimY,PRFx0:PRFx0+prfDimX] PRFmod = PRFmod + prfTmp * flux[i] wx = 1.0 wy = 1.0 angle = 0 b = 0.0 # write out best fit parameters if verbose: txt = 'Flux = %10.2f e-/s ' % flux[i] txt += 'X = %9.4f pix ' % OBJx[i] txt += 'Y = %9.4f pix ' % OBJy[i] kepmsg.log(logfile,txt,True) # # params = {'backend': 'png', # 'axes.linewidth': 2.5, # 'axes.labelsize': 24, # 'axes.font': 'sans-serif', # 'axes.fontweight' : 'bold', # 'text.fontsize': 12, # 'legend.fontsize': 12, # 'xtick.labelsize': 24, # 'ytick.labelsize': 24} # pylab.rcParams.update(params) # # pylab.figure(figsize=[20,10]) # ax = pylab.axes([0.05,0.08,0.46,0.9]) # xxx = numpy.arange(397.5,402.5,0.02) # yyy = numpy.sum(PRFmod,axis=0) / numpy.max(numpy.sum(PRFmod,axis=0)) # pylab.plot(xxx,yyy,color='b',linewidth=3.0) # xxx = numpy.append(numpy.insert(xxx,[0],[xxx[0]]),xxx[-1]) # yyy = numpy.append(numpy.insert(yyy,[0],[0.0]),yyy[-1]) # pylab.fill(xxx,yyy,fc='y',linewidth=0.0,alpha=0.3) # pylab.xlabel('Pixel Column Number') # pylab.xlim(397.5,402.5) # pylab.ylim(1.0e-30,1.02) # for xmaj in numpy.arange(397.5,402.5,1.0): # pylab.plot([xmaj,xmaj],[0.0,1.1],color='k',linewidth=0.5,linestyle=':') # for xmaj in numpy.arange(0.2,1.2,0.2): # pylab.plot([0.0,2000.0],[xmaj,xmaj],color='k',linewidth=0.5,linestyle=':') # # # ax = pylab.axes([0.51,0.08,0.46,0.9]) # xxx = numpy.arange(32.5,37.5,0.02) # yyy = numpy.sum(PRFmod,axis=1) / numpy.max(numpy.sum(PRFmod,axis=1)) # pylab.plot(xxx,yyy,color='b',linewidth=3.0) # xxx = numpy.append(numpy.insert(xxx,[0],[xxx[0]]),xxx[-1]) # yyy = numpy.append(numpy.insert(yyy,[0],[0.0]),yyy[-1]) # pylab.fill(xxx,yyy,fc='y',linewidth=0.0,alpha=0.3) # pylab.setp(pylab.gca(),yticklabels=[]) # pylab.xlabel('Pixel Row Number') # pylab.xlim(32.5,37.5) # pylab.ylim(1.0e-30,1.02) # for xmaj in numpy.arange(32.5,37.5,1.0): # pylab.plot([xmaj,xmaj],[0.0,1.1],color='k',linewidth=0.5,linestyle=':') # for xmaj in numpy.arange(0.2,1.2,0.2): # pylab.plot([0.0,2000.0],[xmaj,xmaj],color='k',linewidth=0.5,linestyle=':') # pylab.ion() # pylab.plot([]) # pylab.ioff() if verbose and background: bterms = border + 1 if bterms == 1: b = ans[nsrc*3] else: bcoeff = array([ans[nsrc*3:nsrc*3+bterms],ans[nsrc*3+bterms:nsrc*3+bterms*2]]) bkg = kepfunc.polyval2d(xx,yy,bcoeff) b = nanmean(bkg.reshape(bkg.size)) txt = '\n Mean background = %.2f e-/s' % b kepmsg.log(logfile,txt,True) if focus: wx = ans[-3] wy = ans[-2] angle = ans[-1] if verbose and focus: if not background: kepmsg.log(logfile,'',True) kepmsg.log(logfile,' X/Y focus factors = %.3f/%.3f' % (wx,wy),True) kepmsg.log(logfile,'PRF rotation angle = %.2f deg' % angle,True) # measure flux fraction and contamination if status == 0: PRFall = kepfunc.PRF2DET(flux,OBJx,OBJy,DATx,DATy,wx,wy,angle,splineInterpolation) PRFone = kepfunc.PRF2DET([flux[0]],[OBJx[0]],[OBJy[0]],DATx,DATy,wx,wy,angle,splineInterpolation) FluxInMaskAll = numpy.nansum(PRFall) FluxInMaskOne = numpy.nansum(PRFone) FluxInAperAll = 0.0 FluxInAperOne = 0.0 for i in range(1,ydim): for j in range(1,xdim): if kepstat.bitInBitmap(maskimg[i,j],2): FluxInAperAll += PRFall[i,j] FluxInAperOne += PRFone[i,j] FluxFraction = FluxInAperOne / flux[0] try: Contamination = (FluxInAperAll - FluxInAperOne) / FluxInAperAll except: Contamination = 0.0 kepmsg.log(logfile,'\n Total flux in mask = %.2f e-/s' % FluxInMaskAll,True) kepmsg.log(logfile,' Target flux in mask = %.2f e-/s' % FluxInMaskOne,True) kepmsg.log(logfile,' Total flux in aperture = %.2f e-/s' % FluxInAperAll,True) kepmsg.log(logfile,' Target flux in aperture = %.2f e-/s' % FluxInAperOne,True) kepmsg.log(logfile,' Target flux fraction in aperture = %.2f%%' % (FluxFraction * 100.0),True) kepmsg.log(logfile,'Contamination fraction in aperture = %.2f%%' % (Contamination * 100.0),True) # constuct model PRF in detector coordinates if status == 0: PRFfit = PRFall + 0.0 if background and bterms == 1: PRFfit = PRFall + b if background and bterms > 1: PRFfit = PRFall + bkg # calculate residual of DATA - FIT if status == 0: PRFres = DATimg - PRFfit FLUXres = numpy.nansum(PRFres) / npix # calculate the sum squared difference between data and model if status == 0: Pearson = abs(numpy.nansum(numpy.square(DATimg - PRFfit) / PRFfit)) Chi2 = numpy.nansum(numpy.square(DATimg - PRFfit) / numpy.square(ERRimg)) DegOfFreedom = npix - len(guess) - 1 try: kepmsg.log(logfile,'\n Residual flux = %.2f e-/s' % FLUXres,True) kepmsg.log(logfile,'Pearson\'s chi^2 test = %d for %d dof' % (Pearson,DegOfFreedom),True) except: pass kepmsg.log(logfile,' Chi^2 test = %d for %d dof' % (Chi2,DegOfFreedom),True) # image scale and intensity limits for plotting images if status == 0: imgdat_pl, zminfl, zmaxfl = kepplot.intScale2D(DATimg,imscale) imgprf_pl, zminpr, zmaxpr = kepplot.intScale2D(PRFmod,imscale) imgfit_pl, zminfi, zmaxfi = kepplot.intScale2D(PRFfit,imscale) imgres_pl, zminre, zmaxre = kepplot.intScale2D(PRFres,'linear') if imscale == 'linear': zmaxpr *= 0.9 elif imscale == 'logarithmic': zmaxpr = numpy.max(zmaxpr) zminpr = zmaxpr / 2 # plot style if status == 0: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 28, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 20, 'ytick.labelsize': 20, 'xtick.major.pad': 6, 'ytick.major.pad': 6} pylab.rcParams.update(params) except: pass pylab.figure(figsize=[12,10]) pylab.clf() plotimage(imgdat_pl,zminfl,zmaxfl,1,row,column,xdim,ydim,0.07,0.53,'observation',colmap,labcol) # pylab.text(830.0,242.1,'A',horizontalalignment='center',verticalalignment='center', # fontsize=28,fontweight=500,color='white') # pylab.text(831.1,240.62,'B',horizontalalignment='center',verticalalignment='center', # fontsize=28,fontweight=500,color='white') # plotimage(imgprf_pl,0.0,zmaxpr/0.5,2,row,column,xdim,ydim,0.52,0.52,'model',colmap) plotimage(imgprf_pl,zminpr,zmaxpr,2,row,column,xdim,ydim,0.44,0.53,'model',colmap,labcol) kepplot.borders(maskimg,xdim,ydim,pixcoord1,pixcoord2,1,apercol,'--',0.5) kepplot.borders(maskimg,xdim,ydim,pixcoord1,pixcoord2,2,apercol,'-',3.0) plotimage(imgfit_pl,zminfl,zmaxfl,3,row,column,xdim,ydim,0.07,0.08,'fit',colmap,labcol) # plotimage(imgres_pl,-zmaxre,zmaxre,4,row,column,xdim,ydim,0.44,0.08,'residual',colmap,'k') plotimage(imgres_pl,zminfl,zmaxfl,4,row,column,xdim,ydim,0.44,0.08,'residual',colmap,labcol) # plot data color bar # barwin = pylab.axes([0.84,0.53,0.06,0.45]) barwin = pylab.axes([0.84,0.08,0.06,0.9]) if imscale == 'linear': brange = numpy.arange(zminfl,zmaxfl,(zmaxfl-zminfl)/1000) elif imscale == 'logarithmic': brange = numpy.arange(10.0**zminfl,10.0**zmaxfl,(10.0**zmaxfl-10.0**zminfl)/1000) elif imscale == 'squareroot': brange = numpy.arange(zminfl**2,zmaxfl**2,(zmaxfl**2-zminfl**2)/1000) if imscale == 'linear': barimg = numpy.resize(brange,(1000,1)) elif imscale == 'logarithmic': barimg = numpy.log10(numpy.resize(brange,(1000,1))) elif imscale == 'squareroot': barimg = numpy.sqrt(numpy.resize(brange,(1000,1))) try: nrm = len(str(int(numpy.nanmax(brange))))-1 except: nrm = 0 brange = brange / 10**nrm pylab.imshow(barimg,aspect='auto',interpolation='nearest',origin='lower', vmin=numpy.nanmin(barimg),vmax=numpy.nanmax(barimg), extent=(0.0,1.0,brange[0],brange[-1]),cmap=colmap) barwin.yaxis.tick_right() barwin.yaxis.set_label_position('right') barwin.yaxis.set_major_locator(MaxNLocator(7)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().set_autoscale_on(False) pylab.setp(pylab.gca(),xticklabels=[],xticks=[]) pylab.ylabel('Flux (10$^%d$ e$^-$ s$^{-1}$)' % nrm) setp(barwin.get_yticklabels(), 'rotation', 90) barwin.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f')) # plot residual color bar # barwin = pylab.axes([0.84,0.08,0.06,0.45]) # Brange = numpy.arange(-zmaxre,zmaxre,(zmaxre+zmaxre)/1000) # try: # nrm = len(str(int(numpy.nanmax(brange))))-1 # except: # nrm = 0 # brange = brange / 10**nrm # barimg = numpy.resize(brange,(1000,1)) # pylab.imshow(barimg,aspect='auto',interpolation='nearest',origin='lower', # vmin=brange[0],vmax=brange[-1],extent=(0.0,1.0,brange[0],brange[-1]),cmap=colmap) # barwin.yaxis.tick_right() # barwin.yaxis.set_label_position('right') # barwin.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f')) # barwin.yaxis.set_major_locator(MaxNLocator(7)) # pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # pylab.gca().set_autoscale_on(False) # pylab.setp(pylab.gca(),xticklabels=[],xticks=[]) # pylab.ylabel('Residual (10$^%d$ e$^-$ s$^{-1}$)' % nrm) # setp(barwin.get_yticklabels(), 'rotation', 90) # render plot if status == 0 and len(plotfile) > 0 and plotfile.lower() != 'none': pylab.savefig(plotfile) if status == 0 and plt: if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # stop time kepmsg.clock('\nKEPPRF ended at',logfile,verbose) return
def __init__( self, infile, rownum=0, imscale="linear", cmap="YlOrBr", lcolor="k", acolor="b", query=True, logfile="kepcrowd.log", **kwargs ): self.colrow = [] self.fluxes = [] self._text = [] # hide warnings np.seterr(all="ignore") # test log file logfile = kepmsg.test(logfile) # info hashline = "----------------------------------------------------------------------------" kepmsg.log(logfile, hashline, False) call = "KEPFIELD -- " call += "infile=" + infile + " " call += "rownum=" + str(rownum) kepmsg.log(logfile, call + "\n", False) try: kepid, channel, skygroup, module, output, quarter, season, ra, dec, column, row, kepmag, xdim, ydim, barytime, status = kepio.readTPF( infile, "TIME", logfile, False ) except: message = "ERROR -- KEPFIELD: is %s a Target Pixel File? " % infile kepmsg.err(logfile, message, False) return "", "", "", None kepid, channel, skygroup, module, output, quarter, season, ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = kepio.readTPF( infile, "TIMECORR", logfile, False ) kepid, channel, skygroup, module, output, quarter, season, ra, dec, column, row, kepmag, xdim, ydim, cadno, status = kepio.readTPF( infile, "rownumNO", logfile, False ) kepid, channel, skygroup, module, output, quarter, season, ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = kepio.readTPF( infile, "FLUX", logfile, False ) kepid, channel, skygroup, module, output, quarter, season, ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = kepio.readTPF( infile, "FLUX_ERR", logfile, False ) kepid, channel, skygroup, module, output, quarter, season, ra, dec, column, row, kepmag, xdim, ydim, qual, status = kepio.readTPF( infile, "QUALITY", logfile, False ) # read mask defintion data from TPF file maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition(infile, logfile, False) # observed or simulated data? coa = False instr = pyfits.open(infile, mode="readonly", memmap=True) filever, status = kepkey.get(infile, instr[0], "FILEVER", logfile, False) if filever == "COA": coa = True # is this a good row with finite timestamp and pixels? if not np.isfinite(barytime[rownum - 1]) or not np.nansum(fluxpixels[rownum - 1, :]): message = "ERROR -- KEPFIELD: Row " + str(rownum) + " is a bad quality timestamp" kepmsg.err(logfile, message, True) return "", "", "", None # construct input pixel image flux = fluxpixels[rownum - 1, :] # image scale and intensity limits of pixel data flux_pl, zminfl, zmaxfl = kepplot.intScale1D(flux, imscale) n = 0 imgflux_pl = np.empty((ydim + 2, xdim + 2)) for i in range(ydim + 2): for j in range(xdim + 2): imgflux_pl[i, j] = np.nan for i in range(ydim): for j in range(xdim): imgflux_pl[i + 1, j + 1] = flux_pl[n] n += 1 # cone search around target coordinates using the MAST target search form dr = max([ydim + 2, xdim + 2]) * 4.0 kepid, ra, dec, kepmag = MASTRADec(float(ra), float(dec), dr, query, logfile) # convert celestial coordinates to detector coordinates sx = np.array([]) sy = np.array([]) inf, status = kepio.openfits(infile, "readonly", logfile, False) try: crpix1, crpix2, crval1, crval2, cdelt1, cdelt2, pc, status = kepkey.getWCSs( infile, inf["APERTURE"], logfile, False ) crpix1p, crpix2p, crval1p, crval2p, cdelt1p, cdelt2p, status = kepkey.getWCSp( infile, inf["APERTURE"], logfile, False ) for i in range(len(kepid)): dra = (ra[i] - crval1) * np.cos(np.radians(dec[i])) / cdelt1 ddec = (dec[i] - crval2) / cdelt2 if coa: sx = np.append(sx, -(pc[0, 0] * dra + pc[0, 1] * ddec) + crpix1 + crval1p - 1.0) else: sx = np.append(sx, pc[0, 0] * dra + pc[0, 1] * ddec + crpix1 + crval1p - 1.0) sy = np.append(sy, pc[1, 0] * dra + pc[1, 1] * ddec + crpix2 + crval2p - 1.0) except: message = "ERROR -- KEPFIELD: Non-compliant WCS information within file %s" % infile kepmsg.err(logfile, message, True) return "", "", "", None # plot self.fig = pl.figure(figsize=[10, 10]) pl.clf() # pixel limits of the subimage ymin = np.copy(float(row)) ymax = ymin + ydim xmin = np.copy(float(column)) xmax = xmin + xdim # plot limits for flux image ymin = float(ymin) - 1.5 ymax = float(ymax) + 0.5 xmin = float(xmin) - 1.5 xmax = float(xmax) + 0.5 # plot the image window ax = pl.axes([0.1, 0.11, 0.88, 0.82]) pl.title("Select sources for fitting (KOI first)", fontsize=24) pl.imshow( imgflux_pl, aspect="auto", interpolation="nearest", origin="lower", vmin=zminfl, vmax=zmaxfl, extent=(xmin, xmax, ymin, ymax), cmap=cmap, ) pl.gca().set_autoscale_on(False) labels = ax.get_yticklabels() pl.setp(labels, "rotation", 90) pl.gca().xaxis.set_major_formatter(pl.ScalarFormatter(useOffset=False)) pl.gca().yaxis.set_major_formatter(pl.ScalarFormatter(useOffset=False)) pl.xlabel("Pixel Column Number", {"color": "k"}, fontsize=24) pl.ylabel("Pixel Row Number", {"color": "k"}, fontsize=24) # plot mask borders kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, lcolor, "--", 0.5) # plot aperture borders kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, lcolor, "-", 4.0) # list sources with open(logfile, "a") as lf: print("Column Row RA J2000 Dec J2000 Kp Kepler ID", file=lf) print("----------------------------------------------------", file=lf) for i in range(len(sx) - 1, -1, -1): if sx[i] >= xmin and sx[i] < xmax and sy[i] >= ymin and sy[i] < ymax: if kepid[i] != 0 and kepmag[i] != 0.0: print( "%6.1f %6.1f %9.5f %8.5f %5.2f KIC %d" % ( float(sx[i]), float(sy[i]), float(ra[i]), float(dec[i]), float(kepmag[i]), int(kepid[i]), ), file=lf, ) elif kepid[i] != 0 and kepmag[i] == 0.0: print( "%6.1f %6.1f %9.5f %8.5f KIC %d" % (float(sx[i]), float(sy[i]), float(ra[i]), float(dec[i]), int(kepid[i])), file=lf, ) else: print( "%6.1f %6.1f %9.5f %8.5f" % (float(sx[i]), float(sy[i]), float(ra[i]), float(dec[i])), file=lf, ) # plot sources for i in range(len(sx) - 1, -1, -1): if kepid[i] != 0 and kepmag[i] != 0.0: size = max(np.array([80.0, 80.0 + (2.5 ** (18.0 - max(12.0, float(kepmag[i])))) * 250.0])) pl.scatter(sx[i], sy[i], s=size, facecolors="g", edgecolors="k", alpha=0.4) else: pl.scatter(sx[i], sy[i], s=80, facecolors="r", edgecolors="k", alpha=0.4) # Sizes for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(16) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(16) # render plot and activate source selection self.srcinfo = [kepid, sx, sy, kepmag] pl.connect("button_release_event", self.on_mouse_release) pl.show(block=True) pl.close()