def kepfold(infile,outfile,period,phasezero,bindata,binmethod,threshold,niter,nbins, rejqual,plottype,plotlab,clobber,verbose,logfile,status,cmdLine=False): # startup parameters status = 0 labelsize = 32; ticksize = 18; xsize = 18; ysize = 10 lcolor = '#0000ff'; lwidth = 2.0; fcolor = '#ffff00'; falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPFOLD -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'period='+str(period)+' ' call += 'phasezero='+str(phasezero)+' ' binit = 'n' if (bindata): binit = 'y' call += 'bindata='+binit+' ' call += 'binmethod='+binmethod+' ' call += 'threshold='+str(threshold)+' ' call += 'niter='+str(niter)+' ' call += 'nbins='+str(nbins)+' ' qflag = 'n' if (rejqual): qflag = 'y' call += 'rejqual='+qflag+ ' ' call += 'plottype='+plottype+ ' ' call += 'plotlab='+plotlab+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPFOLD started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPFOLD: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # input data if status == 0: table = instr[1].data incards = instr[1].header.cards try: sap = instr[1].data.field('SAP_FLUX') except: try: sap = instr[1].data.field('ap_raw_flux') except: sap = zeros(len(table.field(0))) try: saperr = instr[1].data.field('SAP_FLUX_ERR') except: try: saperr = instr[1].data.field('ap_raw_err') except: saperr = zeros(len(table.field(0))) try: pdc = instr[1].data.field('PDCSAP_FLUX') except: try: pdc = instr[1].data.field('ap_corr_flux') except: pdc = zeros(len(table.field(0))) try: pdcerr = instr[1].data.field('PDCSAP_FLUX_ERR') except: try: pdcerr = instr[1].data.field('ap_corr_err') except: pdcerr = zeros(len(table.field(0))) try: cbv = instr[1].data.field('CBVSAP_FLUX') except: cbv = zeros(len(table.field(0))) if 'cbv' in plottype: txt = 'ERROR -- KEPFOLD: CBVSAP_FLUX column is not populated. Use kepcotrend' status = kepmsg.err(logfile,txt,verbose) try: det = instr[1].data.field('DETSAP_FLUX') except: det = zeros(len(table.field(0))) if 'det' in plottype: txt = 'ERROR -- KEPFOLD: DETSAP_FLUX column is not populated. Use kepflatten' status = kepmsg.err(logfile,txt,verbose) try: deterr = instr[1].data.field('DETSAP_FLUX_ERR') except: deterr = zeros(len(table.field(0))) if 'det' in plottype: txt = 'ERROR -- KEPFOLD: DETSAP_FLUX_ERR column is not populated. Use kepflatten' status = kepmsg.err(logfile,txt,verbose) try: quality = instr[1].data.field('SAP_QUALITY') except: quality = zeros(len(table.field(0))) if qualflag: txt = 'WARNING -- KEPFOLD: Cannot find a QUALITY data column' kepmsg.warn(logfile,txt) if status == 0: barytime, status = kepio.readtimecol(infile,table,logfile,verbose) barytime1 = copy(barytime) # filter out NaNs and quality > 0 work1 = []; work2 = []; work3 = []; work4 = []; work5 = []; work6 = []; work8 = []; work9 = [] if status == 0: if 'sap' in plottype: datacol = copy(sap) errcol = copy(saperr) if 'pdc' in plottype: datacol = copy(pdc) errcol = copy(pdcerr) if 'cbv' in plottype: datacol = copy(cbv) errcol = copy(saperr) if 'det' in plottype: datacol = copy(det) errcol = copy(deterr) for i in range(len(barytime)): if (numpy.isfinite(barytime[i]) and numpy.isfinite(datacol[i]) and datacol[i] != 0.0 and numpy.isfinite(errcol[i]) and errcol[i] > 0.0): if rejqual and quality[i] == 0: work1.append(barytime[i]) work2.append(sap[i]) work3.append(saperr[i]) work4.append(pdc[i]) work5.append(pdcerr[i]) work6.append(cbv[i]) work8.append(det[i]) work9.append(deterr[i]) elif not rejqual: work1.append(barytime[i]) work2.append(sap[i]) work3.append(saperr[i]) work4.append(pdc[i]) work5.append(pdcerr[i]) work6.append(cbv[i]) work8.append(det[i]) work9.append(deterr[i]) barytime = array(work1,dtype='float64') sap = array(work2,dtype='float32') / cadenom saperr = array(work3,dtype='float32') / cadenom pdc = array(work4,dtype='float32') / cadenom pdcerr = array(work5,dtype='float32') / cadenom cbv = array(work6,dtype='float32') / cadenom det = array(work8,dtype='float32') / cadenom deterr = array(work9,dtype='float32') / cadenom # calculate phase if status == 0: if phasezero < bjdref: phasezero += bjdref date1 = (barytime1 + bjdref - phasezero) phase1 = (date1 / period) - floor(date1/period) date2 = (barytime + bjdref - phasezero) phase2 = (date2 / period) - floor(date2/period) phase2 = array(phase2,'float32') # sort phases if status == 0: ptuple = [] phase3 = []; sap3 = []; saperr3 = [] pdc3 = []; pdcerr3 = [] cbv3 = []; cbverr3 = [] det3 = []; deterr3 = [] for i in range(len(phase2)): ptuple.append([phase2[i], sap[i], saperr[i], pdc[i], pdcerr[i], cbv[i], saperr[i], det[i], deterr[i]]) phsort = sorted(ptuple,key=lambda ph: ph[0]) for i in range(len(phsort)): phase3.append(phsort[i][0]) sap3.append(phsort[i][1]) saperr3.append(phsort[i][2]) pdc3.append(phsort[i][3]) pdcerr3.append(phsort[i][4]) cbv3.append(phsort[i][5]) cbverr3.append(phsort[i][6]) det3.append(phsort[i][7]) deterr3.append(phsort[i][8]) phase3 = array(phase3,'float32') sap3 = array(sap3,'float32') saperr3 = array(saperr3,'float32') pdc3 = array(pdc3,'float32') pdcerr3 = array(pdcerr3,'float32') cbv3 = array(cbv3,'float32') cbverr3 = array(cbverr3,'float32') det3 = array(det3,'float32') deterr3 = array(deterr3,'float32') # bin phases if status == 0 and bindata: work1 = array([sap3[0]],'float32') work2 = array([saperr3[0]],'float32') work3 = array([pdc3[0]],'float32') work4 = array([pdcerr3[0]],'float32') work5 = array([cbv3[0]],'float32') work6 = array([cbverr3[0]],'float32') work7 = array([det3[0]],'float32') work8 = array([deterr3[0]],'float32') phase4 = array([],'float32') sap4 = array([],'float32') saperr4 = array([],'float32') pdc4 = array([],'float32') pdcerr4 = array([],'float32') cbv4 = array([],'float32') cbverr4 = array([],'float32') det4 = array([],'float32') deterr4 = array([],'float32') dt = 1.0 / nbins nb = 0.0 rng = numpy.append(phase3,phase3[0]+1.0) for i in range(len(rng)): if rng[i] < nb * dt or rng[i] >= (nb + 1.0) * dt: if len(work1) > 0: phase4 = append(phase4,(nb + 0.5) * dt) if (binmethod == 'mean'): sap4 = append(sap4,kepstat.mean(work1)) saperr4 = append(saperr4,kepstat.mean_err(work2)) pdc4 = append(pdc4,kepstat.mean(work3)) pdcerr4 = append(pdcerr4,kepstat.mean_err(work4)) cbv4 = append(cbv4,kepstat.mean(work5)) cbverr4 = append(cbverr4,kepstat.mean_err(work6)) det4 = append(det4,kepstat.mean(work7)) deterr4 = append(deterr4,kepstat.mean_err(work8)) elif (binmethod == 'median'): sap4 = append(sap4,kepstat.median(work1,logfile)) saperr4 = append(saperr4,kepstat.mean_err(work2)) pdc4 = append(pdc4,kepstat.median(work3,logfile)) pdcerr4 = append(pdcerr4,kepstat.mean_err(work4)) cbv4 = append(cbv4,kepstat.median(work5,logfile)) cbverr4 = append(cbverr4,kepstat.mean_err(work6)) det4 = append(det4,kepstat.median(work7,logfile)) deterr4 = append(deterr4,kepstat.mean_err(work8)) else: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip('poly0',[scipy.stats.nanmean(work1)],arange(0.0,float(len(work1)),1.0),work1,work2, threshold,threshold,niter,logfile,False) sap4 = append(sap4,coeffs[0]) saperr4 = append(saperr4,kepstat.mean_err(work2)) coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip('poly0',[scipy.stats.nanmean(work3)],arange(0.0,float(len(work3)),1.0),work3,work4, threshold,threshold,niter,logfile,False) pdc4 = append(pdc4,coeffs[0]) pdcerr4 = append(pdcerr4,kepstat.mean_err(work4)) coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip('poly0',[scipy.stats.nanmean(work5)],arange(0.0,float(len(work5)),1.0),work5,work6, threshold,threshold,niter,logfile,False) cbv4 = append(cbv4,coeffs[0]) cbverr4 = append(cbverr4,kepstat.mean_err(work6)) coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip('poly0',[scipy.stats.nanmean(work7)],arange(0.0,float(len(work7)),1.0),work7,work8, threshold,threshold,niter,logfile,False) det4 = append(det4,coeffs[0]) deterr4 = append(deterr4,kepstat.mean_err(work8)) work1 = array([],'float32') work2 = array([],'float32') work3 = array([],'float32') work4 = array([],'float32') work5 = array([],'float32') work6 = array([],'float32') work7 = array([],'float32') work8 = array([],'float32') nb += 1.0 else: work1 = append(work1,sap3[i]) work2 = append(work2,saperr3[i]) work3 = append(work3,pdc3[i]) work4 = append(work4,pdcerr3[i]) work5 = append(work5,cbv3[i]) work6 = append(work6,cbverr3[i]) work7 = append(work7,det3[i]) work8 = append(work8,deterr3[i]) # update HDU1 for output file if status == 0: cols = (instr[1].columns + ColDefs([Column(name='PHASE',format='E',array=phase1)])) instr[1] = pyfits.new_table(cols) instr[1].header.cards['TTYPE'+str(len(instr[1].columns))].comment = 'column title: phase' instr[1].header.cards['TFORM'+str(len(instr[1].columns))].comment = 'data type: float32' for i in range(len(incards)): if incards[i].key not in instr[1].header.keys(): instr[1].header.update(incards[i].key, incards[i].value, incards[i].comment) else: instr[1].header.cards[incards[i].key].comment = incards[i].comment instr[1].header.update('PERIOD',period,'period defining the phase [d]') instr[1].header.update('BJD0',phasezero,'time of phase zero [BJD]') # write new phased data extension for output file if status == 0 and bindata: col1 = Column(name='PHASE',format='E',array=phase4) col2 = Column(name='SAP_FLUX',format='E',unit='e/s',array=sap4/cadenom) col3 = Column(name='SAP_FLUX_ERR',format='E',unit='e/s',array=saperr4/cadenom) col4 = Column(name='PDC_FLUX',format='E',unit='e/s',array=pdc4/cadenom) col5 = Column(name='PDC_FLUX_ERR',format='E',unit='e/s',array=pdcerr4/cadenom) col6 = Column(name='CBV_FLUX',format='E',unit='e/s',array=cbv4/cadenom) col7 = Column(name='DET_FLUX',format='E',array=det4/cadenom) col8 = Column(name='DET_FLUX_ERR',format='E',array=deterr4/cadenom) cols = ColDefs([col1,col2,col3,col4,col5,col6,col7,col8]) instr.append(new_table(cols)) instr[-1].header.cards['TTYPE1'].comment = 'column title: phase' instr[-1].header.cards['TTYPE2'].comment = 'column title: simple aperture photometry' instr[-1].header.cards['TTYPE3'].comment = 'column title: SAP 1-sigma error' instr[-1].header.cards['TTYPE4'].comment = 'column title: pipeline conditioned photometry' instr[-1].header.cards['TTYPE5'].comment = 'column title: PDC 1-sigma error' instr[-1].header.cards['TTYPE6'].comment = 'column title: cotrended basis vector photometry' instr[-1].header.cards['TTYPE7'].comment = 'column title: Detrended aperture photometry' instr[-1].header.cards['TTYPE8'].comment = 'column title: DET 1-sigma error' instr[-1].header.cards['TFORM1'].comment = 'column type: float32' instr[-1].header.cards['TFORM2'].comment = 'column type: float32' instr[-1].header.cards['TFORM3'].comment = 'column type: float32' instr[-1].header.cards['TFORM4'].comment = 'column type: float32' instr[-1].header.cards['TFORM5'].comment = 'column type: float32' instr[-1].header.cards['TFORM6'].comment = 'column type: float32' instr[-1].header.cards['TFORM7'].comment = 'column type: float32' instr[-1].header.cards['TFORM8'].comment = 'column type: float32' instr[-1].header.cards['TUNIT2'].comment = 'column units: electrons per second' instr[-1].header.cards['TUNIT3'].comment = 'column units: electrons per second' instr[-1].header.cards['TUNIT4'].comment = 'column units: electrons per second' instr[-1].header.cards['TUNIT5'].comment = 'column units: electrons per second' instr[-1].header.cards['TUNIT6'].comment = 'column units: electrons per second' instr[-1].header.update('EXTNAME','FOLDED','extension name') instr[-1].header.update('PERIOD',period,'period defining the phase [d]') instr[-1].header.update('BJD0',phasezero,'time of phase zero [BJD]') instr[-1].header.update('BINMETHD',binmethod,'phase binning method') if binmethod =='sigclip': instr[-1].header.update('THRSHOLD',threshold,'sigma-clipping threshold [sigma]') instr[-1].header.update('NITER',niter,'max number of sigma-clipping iterations') # history keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) instr.writeto(outfile) # clean up x-axis unit if status == 0: ptime1 = array([],'float32') ptime2 = array([],'float32') pout1 = array([],'float32') pout2 = array([],'float32') if bindata: work = sap4 if plottype == 'pdc': work = pdc4 if plottype == 'cbv': work = cbv4 if plottype == 'det': work = det4 for i in range(len(phase4)): if (phase4[i] > 0.5): ptime2 = append(ptime2,phase4[i] - 1.0) pout2 = append(pout2,work[i]) ptime2 = append(ptime2,phase4) pout2 = append(pout2,work) for i in range(len(phase4)): if (phase4[i] <= 0.5): ptime2 = append(ptime2,phase4[i] + 1.0) pout2 = append(pout2,work[i]) work = sap3 if plottype == 'pdc': work = pdc3 if plottype == 'cbv': work = cbv3 if plottype == 'det': work = det3 for i in range(len(phase3)): if (phase3[i] > 0.5): ptime1 = append(ptime1,phase3[i] - 1.0) pout1 = append(pout1,work[i]) ptime1 = append(ptime1,phase3) pout1 = append(pout1,work) for i in range(len(phase3)): if (phase3[i] <= 0.5): ptime1 = append(ptime1,phase3[i] + 1.0) pout1 = append(pout1,work[i]) xlab = 'Orbital Phase ($\phi$)' # clean up y-axis units if status == 0: nrm = len(str(int(pout1[isfinite(pout1)].max())))-1 pout1 = pout1 / 10**nrm pout2 = pout2 / 10**nrm if nrm == 0: ylab = plotlab else: ylab = '10$^%d$ %s' % (nrm, plotlab) # data limits xmin = ptime1.min() xmax = ptime1.max() ymin = pout1[isfinite(pout1)].min() ymax = pout1[isfinite(pout1)].max() xr = xmax - xmin yr = ymax - ymin ptime1 = insert(ptime1,[0],[ptime1[0]]) ptime1 = append(ptime1,[ptime1[-1]]) pout1 = insert(pout1,[0],[0.0]) pout1 = append(pout1,0.0) if bindata: ptime2 = insert(ptime2,[0],ptime2[0] - 1.0 / nbins) ptime2 = insert(ptime2,[0],ptime2[0]) ptime2 = append(ptime2,[ptime2[-1] + 1.0 / nbins, ptime2[-1] + 1.0 / nbins]) pout2 = insert(pout2,[0],[pout2[-1]]) pout2 = insert(pout2,[0],[0.0]) pout2 = append(pout2,[pout2[2],0.0]) # plot new light curve if status == 0 and plottype != 'none': try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 18, 'legend.fontsize': 18, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} pylab.rcParams.update(params) except: print 'ERROR -- KEPFOLD: install latex for scientific plotting' status = 1 if status == 0 and plottype != 'none': pylab.figure(figsize=[17,7]) pylab.clf() ax = pylab.axes([0.06,0.11,0.93,0.86]) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90) if bindata: pylab.fill(ptime2,pout2,color=fcolor,linewidth=0.0,alpha=falpha) else: if 'det' in plottype: pylab.fill(ptime1,pout1,color=fcolor,linewidth=0.0,alpha=falpha) pylab.plot(ptime1,pout1,color=lcolor,linestyle='',linewidth=lwidth,marker='.') if bindata: pylab.plot(ptime2[1:-1],pout2[1:-1],color='r',linestyle='-',linewidth=lwidth,marker='') xlabel(xlab, {'color' : 'k'}) ylabel(ylab, {'color' : 'k'}) xlim(-0.49999,1.49999) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) # ylim(0.96001,1.03999) else: ylim(1.0e-10,ymax+yr*0.01) grid() if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) # stop time kepmsg.clock('KEPFOLD ended at: ',logfile,verbose)
def kepdetrend(infile, outfile, datacol, errcol, ranges1, npoly1, nsig1, niter1, ranges2, npoly2, nsig2, niter2, popnans, plot, clobber, verbose, logfile, status, cmdLine=False): # startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 16 ysize = 9 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPDETREND -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'datacol=' + str(datacol) + ' ' call += 'errcol=' + str(errcol) + ' ' call += 'ranges1=' + str(ranges1) + ' ' call += 'npoly1=' + str(npoly1) + ' ' call += 'nsig1=' + str(nsig1) + ' ' call += 'niter1=' + str(niter1) + ' ' call += 'ranges2=' + str(ranges2) + ' ' call += 'npoly2=' + str(npoly2) + ' ' call += 'nsig2=' + str(nsig2) + ' ' call += 'niter2=' + str(niter2) + ' ' popn = 'n' if (popnans): popn = 'y' call += 'popnans=' + popn + ' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot=' + plotit + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPDETREND started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPDETREND: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile, message, verbose) # open input file if status == 0: instr, status = kepio.openfits(infile, 'readonly', logfile, verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys( instr, infile, logfile, verbose, status) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr, file, logfile, verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile, instr[1], logfile, verbose) # filter input data table if status == 0: work1 = numpy.array( [table.field('time'), table.field(datacol), table.field(errcol)]) work1 = numpy.rot90(work1, 3) work1 = work1[~numpy.isnan(work1).any(1)] # read table columns if status == 0: intime = work1[:, 2] + bjdref indata = work1[:, 1] inerr = work1[:, 0] print intime # time ranges for region 1 (region to be corrected) if status == 0: time1 = [] data1 = [] err1 = [] t1start, t1stop, status = kepio.timeranges(ranges1, logfile, verbose) if status == 0: cadencelis1, status = kepstat.filterOnRange(intime, t1start, t1stop) if status == 0: for i in range(len(cadencelis1)): time1.append(intime[cadencelis1[i]]) data1.append(indata[cadencelis1[i]]) if errcol.lower() != 'none': err1.append(inerr[cadencelis1[i]]) t0 = time1[0] time1 = array(time1, dtype='float64') - t0 data1 = array(data1, dtype='float32') if errcol.lower() != 'none': err1 = array(err1, dtype='float32') else: err1 = None # fit function to range 1 if status == 0: functype = 'poly' + str(npoly1) pinit = [data1.mean()] if npoly1 > 0: for i in range(npoly1): pinit.append(0) pinit = array(pinit, dtype='float32') coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx1, ploty1, status = \ kepfit.lsqclip(functype,pinit,time1,data1,err1,nsig1,nsig1,niter1, logfile,verbose) fit1 = indata * 0.0 for i in range(len(coeffs)): fit1 += coeffs[i] * (intime - t0)**i for i in range(len(intime)): if i not in cadencelis1: fit1[i] = 0.0 plotx1 += t0 print coeffs # time ranges for region 2 (region that is correct) if status == 0: time2 = [] data2 = [] err2 = [] t2start, t2stop, status = kepio.timeranges(ranges2, logfile, verbose) cadencelis2, status = kepstat.filterOnRange(intime, t2start, t2stop) for i in range(len(cadencelis2)): time2.append(intime[cadencelis2[i]]) data2.append(indata[cadencelis2[i]]) if errcol.lower() != 'none': err2.append(inerr[cadencelis2[i]]) t0 = time2[0] time2 = array(time2, dtype='float64') - t0 data2 = array(data2, dtype='float32') if errcol.lower() != 'none': err2 = array(err2, dtype='float32') else: err2 = None # fit function to range 2 if status == 0: functype = 'poly' + str(npoly2) pinit = [data2.mean()] if npoly2 > 0: for i in range(npoly2): pinit.append(0) pinit = array(pinit, dtype='float32') coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx2, ploty2, status = \ kepfit.lsqclip(functype,pinit,time2,data2,err2,nsig2,nsig2,niter2, logfile,verbose) fit2 = indata * 0.0 for i in range(len(coeffs)): fit2 += coeffs[i] * (intime - t0)**i for i in range(len(intime)): if i not in cadencelis1: fit2[i] = 0.0 plotx2 += t0 # normalize data if status == 0: outdata = indata - fit1 + fit2 if errcol.lower() != 'none': outerr = inerr * 1.0 # comment keyword in output file if status == 0: status = kepkey.history(call, instr[0], outfile, logfile, verbose) # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 plotx1 = plotx1 - intime0 plotx2 = plotx2 - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = outdata ploty1 ploty2 nrm = len(str(int(numpy.nanmax(indata)))) - 1 indata = indata / 10**nrm pout = pout / 10**nrm ploty1 = ploty1 / 10**nrm ploty2 = ploty2 / 10**nrm ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits xmin = ptime.min() xmax = ptime.max() ymin = indata.min() ymax = indata.max() omin = pout.min() omax = pout.max() xr = xmax - xmin yr = ymax - ymin oo = omax - omin ptime = insert(ptime, [0], [ptime[0]]) ptime = append(ptime, [ptime[-1]]) indata = insert(indata, [0], [0.0]) indata = append(indata, [0.0]) pout = insert(pout, [0], [0.0]) pout = append(pout, 0.0) # plot light curve if status == 0 and plot: try: params = { 'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize } rcParams.update(params) except: pass pylab.figure(figsize=[xsize, ysize]) pylab.clf() # plot original data ax = pylab.axes([0.06, 0.523, 0.93, 0.45]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime, indata, color=lcolor, linestyle='-', linewidth=lwidth) pylab.fill(ptime, indata, color=fcolor, linewidth=0.0, alpha=falpha) pylab.plot(plotx1, ploty1, color='r', linestyle='-', linewidth=2.0) pylab.plot(plotx2, ploty2, color='g', linestyle='-', linewidth=2.0) pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin > 0.0: pylab.ylim(ymin - yr * 0.01, ymax + yr * 0.01) else: pylab.ylim(1.0e-10, ymax + yr * 0.01) pylab.ylabel(ylab, {'color': 'k'}) pylab.grid() # plot detrended data ax = pylab.axes([0.06, 0.073, 0.93, 0.45]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime, pout, color=lcolor, linestyle='-', linewidth=lwidth) pylab.fill(ptime, pout, color=fcolor, linewidth=0.0, alpha=falpha) pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin > 0.0: pylab.ylim(omin - oo * 0.01, omax + oo * 0.01) else: pylab.ylim(1.0e-10, omax + oo * 0.01) pylab.xlabel(xlab, {'color': 'k'}) try: pylab.ylabel(ylab, {'color': 'k'}) except: ylab = '10**%d e-/s' % nrm pylab.ylabel(ylab, {'color': 'k'}) # render plot if status == 0: if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # write output file if status == 0 and popnans: instr[1].data.field(datacol)[good_data] = outdata instr[1].data.field(errcol)[good_data] = outerr instr[1].data.field(datacol)[bad_data] = None instr[1].data.field(errcol)[bad_data] = None instr.writeto(outfile) elif status == 0 and not popnans: for i in range(len(outdata)): instr[1].data.field(datacol)[i] = outdata[i] if errcol.lower() != 'none': instr[1].data.field(errcol)[i] = outerr[i] instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr, logfile, verbose) ## end time if (status == 0): message = 'KEPDETREND completed at' else: message = '\nKEPDETREND aborted at' kepmsg.clock(message, logfile, verbose)
def kepoutlier(infile,outfile,datacol,nsig,stepsize,npoly,niter, operation,ranges,plot,plotfit,clobber,verbose,logfile,status, cmdLine=False): # startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 16 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPOUTLIER -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'nsig='+str(nsig)+' ' call += 'stepsize='+str(stepsize)+' ' call += 'npoly='+str(npoly)+' ' call += 'niter='+str(niter)+' ' call += 'operation='+str(operation)+' ' call += 'ranges='+str(ranges)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' plotf = 'n' if (plotfit): plotf = 'y' call += 'plotfit='+plotf+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPOUTLIER started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPOUTLIER: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 try: for i in range(len(table.field(0))): if numpy.isfinite(table.field('barytime')[i]) and \ numpy.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] except: for i in range(len(table.field(0))): if numpy.isfinite(table.field('time')[i]) and \ numpy.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) # read table columns if status == 0: try: intime = instr[1].data.field('barytime') + 2.4e6 except: intime, status = kepio.readfitscol(infile,instr[1].data,'time',logfile,verbose) indata, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom # time ranges for region to be corrected if status == 0: t1, t2, status = kepio.timeranges(ranges,logfile,verbose) cadencelis, status = kepstat.filterOnRange(intime,t1,t2) # find limits of each time step if status == 0: tstep1 = []; tstep2 = [] work = intime[0] while work < intime[-1]: tstep1.append(work) tstep2.append(array([work+stepsize,intime[-1]],dtype='float64').min()) work += stepsize # find cadence limits of each time step if status == 0: cstep1 = []; cstep2 = [] work1 = 0; work2 = 0 for i in range(len(intime)): if intime[i] >= intime[work1] and intime[i] < intime[work1] + stepsize: work2 = i else: cstep1.append(work1) cstep2.append(work2) work1 = i; work2 = i cstep1.append(work1) cstep2.append(work2) outdata = indata * 1.0 # comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = indata * 1.0 nrm = len(str(int(pout.max())))-1 pout = pout / 10**nrm ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) # plot light curve if status == 0 and plot: plotLatex = True try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: plotLatex = False if status == 0 and plot: pylab.figure(figsize=[xsize,ysize]) pylab.clf() # plot data ax = pylab.axes([0.06,0.1,0.93,0.87]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime,pout,color=lcolor,linestyle='-',linewidth=lwidth) fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) xlabel(xlab, {'color' : 'k'}) if not plotLatex: ylab = '10**%d electrons/sec' % nrm ylabel(ylab, {'color' : 'k'}) grid() # loop over each time step, fit data, determine rms if status == 0: masterfit = indata * 0.0 mastersigma = zeros(len(masterfit)) functype = 'poly' + str(npoly) for i in range(len(cstep1)): pinit = [indata[cstep1[i]:cstep2[i]+1].mean()] if npoly > 0: for j in range(npoly): pinit.append(0.0) pinit = array(pinit,dtype='float32') try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,intime[cstep1[i]:cstep2[i]+1]-intime[cstep1[i]], indata[cstep1[i]:cstep2[i]+1],None,nsig,nsig,niter,logfile, verbose) for j in range(len(coeffs)): masterfit[cstep1[i]:cstep2[i]+1] += coeffs[j] * \ (intime[cstep1[i]:cstep2[i]+1] - intime[cstep1[i]])**j for j in range(cstep1[i],cstep2[i]+1): mastersigma[j] = sigma if plotfit: pylab.plot(plotx+intime[cstep1[i]]-intime0,ploty / 10**nrm, 'g',lw='3') except: for j in range(cstep1[i],cstep2[i]+1): masterfit[j] = indata[j] mastersigma[j] = 1.0e10 message = 'WARNING -- KEPOUTLIER: could not fit range ' message += str(intime[cstep1[i]]) + '-' + str(intime[cstep2[i]]) kepmsg.warn(None,message) # reject outliers if status == 0: rejtime = []; rejdata = []; naxis2 = 0 for i in range(len(masterfit)): if abs(indata[i] - masterfit[i]) > nsig * mastersigma[i] and i in cadencelis: rejtime.append(intime[i]) rejdata.append(indata[i]) if operation == 'replace': [rnd] = kepstat.randarray([masterfit[i]],[mastersigma[i]]) table[naxis2] = table[i] table.field(datacol)[naxis2] = rnd naxis2 += 1 else: table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] rejtime = array(rejtime,dtype='float64') rejdata = array(rejdata,dtype='float32') pylab.plot(rejtime-intime0,rejdata / 10**nrm,'ro') # plot ranges xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) else: ylim(1.0e-10,ymax+yr*0.01) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # write output file if status == 0: instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPOUTLIER completed at' else: message = '\nKEPOUTLIER aborted at' kepmsg.clock(message,logfile,verbose)
def martinsff(intime, indata, centr1, centr2, npoly_cxcy, sigma_cxcy, npoly_ardx, npoly_dsdt, sigma_dsdt, npoly_arfl, sigma_arfl, verbose, logfile, status): # startup parameters status = 0 labelsize = 16 ticksize = 14 xsize = 20 ysize = 8 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 seterr(all="ignore") # fit centroid data with low-order polynomial cfit = zeros((len(centr2))) csig = zeros((len(centr2))) functype = 'poly' + str(npoly_cxcy) pinit = array([nanmean(centr2)]) if npoly_cxcy > 0: for j in range(npoly_cxcy): pinit = append(pinit, 0.0) try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,centr1,centr2,None,sigma_cxcy,sigma_cxcy,10,logfile,verbose) for j in range(len(coeffs)): cfit += coeffs[j] * numpy.power(centr1, j) csig[:] = sigma except: message = 'ERROR -- KEPSFF: could not fit centroid data with polynomial. There are no data points within the range of input rows %d - %d. Either increase the stepsize (with an appreciation of the effects on light curve quality this will have!), or better yet - cut the timeseries up to remove large gaps in the input light curve using kepclip.' % ( t1, t2) status = kepmsg.err(logfile, message, verbose) # sys.exit('') os._exit(1) # reject outliers time_good = array([], 'float64') centr1_good = array([], 'float32') centr2_good = array([], 'float32') flux_good = array([], 'float32') cad_good = array([], 'int') for i in range(len(cfit)): if abs(centr2[i] - cfit[i]) < sigma_cxcy * csig[i]: time_good = append(time_good, intime[i]) centr1_good = append(centr1_good, centr1[i]) centr2_good = append(centr2_good, centr2[i]) flux_good = append(flux_good, indata[i]) # covariance matrix for centroid time series centr = concatenate([[centr1_good] - mean(centr1_good), [centr2_good] - mean(centr2_good)]) covar = cov(centr) # eigenvector eigenvalues of covariance matrix [eval, evec] = numpy.linalg.eigh(covar) ex = arange(-10.0, 10.0, 0.1) epar = evec[1, 1] / evec[0, 1] * ex enor = evec[1, 0] / evec[0, 0] * ex ex = ex + mean(centr1) epar = epar + mean(centr2_good) enor = enor + mean(centr2_good) # rotate centroid data centr_rot = dot(evec.T, centr) # fit polynomial to rotated centroids rfit = zeros((len(centr2))) rsig = zeros((len(centr2))) functype = 'poly' + str(npoly_ardx) pinit = array([nanmean(centr_rot[0, :])]) pinit = array([1.0]) if npoly_ardx > 0: for j in range(npoly_ardx): pinit = append(pinit, 0.0) try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,centr_rot[1,:],centr_rot[0,:],None,100.0,100.0,1, logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile, message, verbose) rx = linspace(nanmin(centr_rot[1, :]), nanmax(centr_rot[1, :]), 100) ry = zeros((len(rx))) for i in range(len(coeffs)): ry = ry + coeffs[i] * numpy.power(rx, i) # calculate arclength of centroids s = zeros((len(rx))) for i in range(1, len(s)): work3 = ((ry[i] - ry[i - 1]) / (rx[i] - rx[i - 1]))**2 s[i] = s[i - 1] + math.sqrt(1.0 + work3) * (rx[i] - rx[i - 1]) # fit arclength as a function of strongest eigenvector sfit = zeros((len(centr2))) ssig = zeros((len(centr2))) functype = 'poly' + str(npoly_ardx) pinit = array([nanmean(s)]) if npoly_ardx > 0: for j in range(npoly_ardx): pinit = append(pinit, 0.0) try: acoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,rx,s,None,100.0,100.0,100,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile, message, verbose) # correlate arclength with detrended flux t = copy(time_good) y = copy(flux_good) z = centr_rot[1, :] x = zeros((len(z))) for i in range(len(acoeffs)): x = x + acoeffs[i] * numpy.power(z, i) # calculate time derivative of arclength s dx = zeros((len(x))) for i in range(1, len(x)): dx[i] = (x[i] - x[i - 1]) / (t[i] - t[i - 1]) dx[0] = dx[1] # fit polynomial to derivative and flag outliers (thruster firings) dfit = zeros((len(dx))) dsig = zeros((len(dx))) functype = 'poly' + str(npoly_dsdt) pinit = array([nanmean(dx)]) if npoly_dsdt > 0: for j in range(npoly_dsdt): pinit = append(pinit, 0.0) try: dcoeffs, errors, covar, iiter, dsigma, chi2, dof, fit, dumx, dumy, status = \ kepfit.lsqclip(functype,pinit,t,dx,None,3.0,3.0,10,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile, message, verbose) for i in range(len(dcoeffs)): dfit = dfit + dcoeffs[i] * numpy.power(t, i) centr1_pnt = array([], 'float32') centr2_pnt = array([], 'float32') time_pnt = array([], 'float64') flux_pnt = array([], 'float32') dx_pnt = array([], 'float32') s_pnt = array([], 'float32') time_thr = array([], 'float64') flux_thr = array([], 'float32') dx_thr = array([], 'float32') thr_cadence = zeros(len(t), dtype=bool) for i in range(len(t)): if dx[i] < dfit[i] + sigma_dsdt * dsigma and dx[ i] > dfit[i] - sigma_dsdt * dsigma: time_pnt = append(time_pnt, time_good[i]) flux_pnt = append(flux_pnt, flux_good[i]) dx_pnt = append(dx_pnt, dx[i]) s_pnt = append(s_pnt, x[i]) centr1_pnt = append(centr1_pnt, centr1_good[i]) centr2_pnt = append(centr2_pnt, centr2_good[i]) else: time_thr = append(time_thr, time_good[i]) flux_thr = append(flux_thr, flux_good[i]) dx_thr = append(dx_thr, dx[i]) thr_cadence[i] = True # fit arclength-flux correlation cfit = zeros((len(time_pnt))) csig = zeros((len(time_pnt))) functype = 'poly' + str(npoly_arfl) pinit = array([nanmean(flux_pnt)]) if npoly_arfl > 0: for j in range(npoly_arfl): pinit = append(pinit, 0.0) try: ccoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plx, ply, status = \ kepfit.lsqclip(functype,pinit,s_pnt,flux_pnt,None,sigma_arfl,sigma_arfl,100,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile, message, verbose) # correction factors for unfiltered data centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)]) centr_rot = dot(evec.T, centr) yy = copy(indata) zz = centr_rot[1, :] xx = zeros((len(zz))) cfac = zeros((len(zz))) for i in range(len(acoeffs)): xx = xx + acoeffs[i] * numpy.power(zz, i) for i in range(len(ccoeffs)): cfac = cfac + ccoeffs[i] * numpy.power(xx, i) # apply correction to flux time-series out_detsap = indata / cfac return out_detsap, cfac, thr_cadence
def kepsff(infile,outfile,datacol,cenmethod,stepsize,npoly_cxcy,sigma_cxcy,npoly_ardx, npoly_dsdt,sigma_dsdt,npoly_arfl,sigma_arfl,plotres,clobber,verbose,logfile, status,cmdLine=False): # startup parameters status = 0 labelsize = 16 ticksize = 14 xsize = 20 ysize = 8 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPSFF -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+datacol+' ' call += 'cenmethod='+cenmethod+' ' call += 'stepsize='+str(stepsize)+' ' call += 'npoly_cxcy='+str(npoly_cxcy)+' ' call += 'sigma_cxcy='+str(sigma_cxcy)+' ' call += 'npoly_ardx='+str(npoly_ardx)+' ' call += 'npoly_dsdt='+str(npoly_dsdt)+' ' call += 'sigma_dsdt='+str(sigma_dsdt)+' ' call += 'npoly_arfl='+str(npoly_arfl)+' ' call += 'sigma_arfl='+str(sigma_arfl)+' ' savep = 'n' if (plotres): savep = 'y' call += 'plotres='+savep+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPSFF started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPSFF: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # determine sequence of windows in time if status == 0: frametim = instr[1].header['FRAMETIM'] num_frm = instr[1].header['NUM_FRM'] exptime = frametim * num_frm / 86400 tstart = table.field('TIME')[0] tstop = table.field('TIME')[-1] winedge = arange(tstart,tstop,stepsize) if tstop > winedge[-1] + stepsize / 2: winedge = append(winedge,tstop) else: winedge[-1] = tstop winedge = (winedge - tstart) / exptime winedge = winedge.astype(int) if len(table.field('TIME')) > winedge[-1] + 1: winedge = append(winedge,len(table.field('TIME'))) elif len(table.field('TIME')) < winedge[-1]: winedge[-1] = len(table.field('TIME')) # step through the time windows if status == 0: for iw in range(1,len(winedge)): t1 = winedge[iw-1] t2 = winedge[iw] # filter input data table work1 = numpy.array([table.field('TIME')[t1:t2], table.field('CADENCENO')[t1:t2], table.field(datacol)[t1:t2], table.field('MOM_CENTR1')[t1:t2], table.field('MOM_CENTR2')[t1:t2], table.field('PSF_CENTR1')[t1:t2], table.field('PSF_CENTR2')[t1:t2], table.field('SAP_QUALITY')[t1:t2]],'float64') work1 = numpy.rot90(work1,3) work2 = work1[~numpy.isnan(work1).any(1)] work2 = work2[(work2[:,0] == 0.0) | (work2[:,0] > 1e5)] # assign table columns intime = work2[:,7] + bjdref cadenceno = work2[:,6].astype(int) indata = work2[:,5] mom_centr1 = work2[:,4] mom_centr2 = work2[:,3] psf_centr1 = work2[:,2] psf_centr2 = work2[:,1] sap_quality = work2[:,0] if cenmethod == 'moments': centr1 = copy(mom_centr1) centr2 = copy(mom_centr2) else: centr1 = copy(psf_centr1) centr2 = copy(psf_centr2) # fit centroid data with low-order polynomial cfit = zeros((len(centr2))) csig = zeros((len(centr2))) functype = 'poly' + str(npoly_cxcy) pinit = array([nanmean(centr2)]) if npoly_cxcy > 0: for j in range(npoly_cxcy): pinit = append(pinit,0.0) try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,centr1,centr2,None,sigma_cxcy,sigma_cxcy,10,logfile,verbose) for j in range(len(coeffs)): cfit += coeffs[j] * numpy.power(centr1,j) csig[:] = sigma except: message = 'ERROR -- KEPSFF: could not fit centroid data with polynomial. There are no data points within the range of input rows %d - %d. Either increase the stepsize (with an appreciation of the effects on light curve quality this will have!), or better yet - cut the timeseries up to remove large gaps in the input light curve using kepclip.' % (t1,t2) status = kepmsg.err(logfile,message,verbose) # sys.exit('') os._exit(1) # reject outliers time_good = array([],'float64') centr1_good = array([],'float32') centr2_good = array([],'float32') flux_good = array([],'float32') cad_good = array([],'int') for i in range(len(cfit)): if abs(centr2[i] - cfit[i]) < sigma_cxcy * csig[i]: time_good = append(time_good,intime[i]) centr1_good = append(centr1_good,centr1[i]) centr2_good = append(centr2_good,centr2[i]) flux_good = append(flux_good,indata[i]) cad_good = append(cad_good,cadenceno[i]) # covariance matrix for centroid time series centr = concatenate([[centr1_good] - mean(centr1_good), [centr2_good] - mean(centr2_good)]) covar = cov(centr) # eigenvector eigenvalues of covariance matrix [eval, evec] = numpy.linalg.eigh(covar) ex = arange(-10.0,10.0,0.1) epar = evec[1,1] / evec[0,1] * ex enor = evec[1,0] / evec[0,0] * ex ex = ex + mean(centr1) epar = epar + mean(centr2_good) enor = enor + mean(centr2_good) # rotate centroid data centr_rot = dot(evec.T,centr) # fit polynomial to rotated centroids rfit = zeros((len(centr2))) rsig = zeros((len(centr2))) functype = 'poly' + str(npoly_ardx) pinit = array([nanmean(centr_rot[0,:])]) pinit = array([1.0]) if npoly_ardx > 0: for j in range(npoly_ardx): pinit = append(pinit,0.0) try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,centr_rot[1,:],centr_rot[0,:],None,100.0,100.0,1, logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile,message,verbose) rx = linspace(nanmin(centr_rot[1,:]),nanmax(centr_rot[1,:]),100) ry = zeros((len(rx))) for i in range(len(coeffs)): ry = ry + coeffs[i] * numpy.power(rx,i) # calculate arclength of centroids s = zeros((len(rx))) for i in range(1,len(s)): work3 = ((ry[i] - ry[i-1]) / (rx[i] - rx[i-1]))**2 s[i] = s[i-1] + math.sqrt(1.0 + work3) * (rx[i] - rx[i-1]) # fit arclength as a function of strongest eigenvector sfit = zeros((len(centr2))) ssig = zeros((len(centr2))) functype = 'poly' + str(npoly_ardx) pinit = array([nanmean(s)]) if npoly_ardx > 0: for j in range(npoly_ardx): pinit = append(pinit,0.0) try: acoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,rx,s,None,100.0,100.0,100,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile,message,verbose) # correlate arclength with detrended flux t = copy(time_good) c = copy(cad_good) y = copy(flux_good) z = centr_rot[1,:] x = zeros((len(z))) for i in range(len(acoeffs)): x = x + acoeffs[i] * numpy.power(z,i) # calculate time derivative of arclength s dx = zeros((len(x))) for i in range(1,len(x)): dx[i] = (x[i] - x[i-1]) / (t[i] - t[i-1]) dx[0] = dx[1] # fit polynomial to derivative and flag outliers (thruster firings) dfit = zeros((len(dx))) dsig = zeros((len(dx))) functype = 'poly' + str(npoly_dsdt) pinit = array([nanmean(dx)]) if npoly_dsdt > 0: for j in range(npoly_dsdt): pinit = append(pinit,0.0) try: dcoeffs, errors, covar, iiter, dsigma, chi2, dof, fit, dumx, dumy, status = \ kepfit.lsqclip(functype,pinit,t,dx,None,3.0,3.0,10,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile,message,verbose) for i in range(len(dcoeffs)): dfit = dfit + dcoeffs[i] * numpy.power(t,i) centr1_pnt = array([],'float32') centr2_pnt = array([],'float32') time_pnt = array([],'float64') flux_pnt = array([],'float32') dx_pnt = array([],'float32') s_pnt = array([],'float32') time_thr = array([],'float64') flux_thr = array([],'float32') dx_thr = array([],'float32') thr_cadence = [] for i in range(len(t)): if dx[i] < dfit[i] + sigma_dsdt * dsigma and dx[i] > dfit[i] - sigma_dsdt * dsigma: time_pnt = append(time_pnt,time_good[i]) flux_pnt = append(flux_pnt,flux_good[i]) dx_pnt = append(dx_pnt,dx[i]) s_pnt = append(s_pnt,x[i]) centr1_pnt = append(centr1_pnt,centr1_good[i]) centr2_pnt = append(centr2_pnt,centr2_good[i]) else: time_thr = append(time_thr,time_good[i]) flux_thr = append(flux_thr,flux_good[i]) dx_thr = append(dx_thr,dx[i]) thr_cadence.append(cad_good[i]) # fit arclength-flux correlation cfit = zeros((len(time_pnt))) csig = zeros((len(time_pnt))) functype = 'poly' + str(npoly_arfl) pinit = array([nanmean(flux_pnt)]) if npoly_arfl > 0: for j in range(npoly_arfl): pinit = append(pinit,0.0) try: ccoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plx, ply, status = \ kepfit.lsqclip(functype,pinit,s_pnt,flux_pnt,None,sigma_arfl,sigma_arfl,100,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile,message,verbose) # correction factors for unfiltered data centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)]) centr_rot = dot(evec.T,centr) yy = copy(indata) zz = centr_rot[1,:] xx = zeros((len(zz))) cfac = zeros((len(zz))) for i in range(len(acoeffs)): xx = xx + acoeffs[i] * numpy.power(zz,i) for i in range(len(ccoeffs)): cfac = cfac + ccoeffs[i] * numpy.power(xx,i) # apply correction to flux time-series out_detsap = indata / cfac # split time-series data for plotting tim_gd = array([],'float32') flx_gd = array([],'float32') tim_bd = array([],'float32') flx_bd = array([],'float32') for i in range(len(indata)): if intime[i] in time_pnt: tim_gd = append(tim_gd,intime[i]) flx_gd = append(flx_gd,out_detsap[i]) else: tim_bd = append(tim_bd,intime[i]) flx_bd = append(flx_bd,out_detsap[i]) # plot style and size status = kepplot.define(labelsize,ticksize,logfile,verbose) pylab.figure(figsize=[xsize,ysize]) pylab.clf() # plot x-centroid vs y-centroid ax = kepplot.location([0.04,0.57,0.16,0.41]) # plot location px = copy(centr1) # clean-up x-axis units py = copy(centr2) # clean-up y-axis units pxmin = px.min() pxmax = px.max() pymin = py.min() pymax = py.max() pxr = pxmax - pxmin pyr = pymax - pymin pad = 0.05 if pxr > pyr: dely = (pxr - pyr) / 2 xlim(pxmin - pxr * pad, pxmax + pxr * pad) ylim(pymin - dely - pyr * pad, pymax + dely + pyr * pad) else: delx = (pyr - pxr) / 2 ylim(pymin - pyr * pad, pymax + pyr * pad) xlim(pxmin - delx - pxr * pad, pxmax + delx + pxr * pad) pylab.plot(px,py,color='#980000',markersize=5,marker='D',ls='') # plot data pylab.plot(centr1_good,centr2_good,color='#009900',markersize=5,marker='D',ls='') # plot data pylab.plot(ex,epar,color='k',ls='-') pylab.plot(ex,enor,color='k',ls='-') for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) kepplot.labels('CCD Column','CCD Row','k',16) # labels pylab.grid() # grid lines # plot arclength fits vs drift along strongest eigenvector ax = kepplot.location([0.24,0.57,0.16,0.41]) # plot location px = rx - rx[0] py = s - rx - (s[0] - rx[0]) # clean-up y-axis units py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose) # clean-up x-axis units kepplot.RangeOfPlot(px,py,0.05,False) # data limits pylab.plot(px,py,color='#009900',markersize=5,marker='D',ls='') px = plotx - rx[0] # clean-up x-axis units py = ploty-plotx - (s[0] - rx[0]) # clean-up y-axis units py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose) # clean-up x-axis units pylab.plot(px,py,color='r',ls='-',lw=3) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) ylab = re.sub(' e\S+',' pixels)',ylab) ylab = re.sub(' s\S+','',ylab) ylab = re.sub('Flux','s $-$ x\'',ylab) kepplot.labels('Linear Drift [x\'] (pixels)',ylab,'k',16) # labels pylab.grid() # grid lines # plot time derivative of arclength s ax = kepplot.location([0.04,0.08,0.16,0.41]) # plot location px = copy(time_pnt) py = copy(dx_pnt) px, xlab, status = kepplot.cleanx(px,logfile,verbose) # clean-up x-axis units kepplot.RangeOfPlot(px,dx,0.05,False) # data limits pylab.plot(px,py,color='#009900',markersize=5,marker='D',ls='') try: px = copy(time_thr) py = copy(dx_thr) px, xlab, status = kepplot.cleanx(px,logfile,verbose) # clean-up x-axis units pylab.plot(px,py,color='#980000',markersize=5,marker='D',ls='') except: pass px = copy(t) py = copy(dfit) px, xlab, status = kepplot.cleanx(px,logfile,verbose) # clean-up x-axis units pylab.plot(px,py,color='r',ls='-',lw=3) py = copy(dfit+sigma_dsdt*dsigma) pylab.plot(px,py,color='r',ls='--',lw=3) py = copy(dfit-sigma_dsdt*dsigma) pylab.plot(px,py,color='r',ls='--',lw=3) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) kepplot.labels(xlab,'ds/dt (pixels day$^{-1}$)','k',16) # labels pylab.grid() # grid lines # plot relation of arclength vs detrended flux ax = kepplot.location([0.24,0.08,0.16,0.41]) # plot location px = copy(s_pnt) py = copy(flux_pnt) py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose) # clean-up x-axis units kepplot.RangeOfPlot(px,py,0.05,False) # data limits pylab.plot(px,py,color='#009900',markersize=5,marker='D',ls='') pylab.plot(plx,ply,color='r',ls='-',lw=3) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) kepplot.labels('Arclength [s] (pixels)',ylab,'k',16) # labels pylab.grid() # grid lines # plot aperture photometry kepplot.location([0.44,0.53,0.55,0.45]) # plot location px, xlab, status = kepplot.cleanx(intime,logfile,verbose) # clean-up x-axis units py, ylab, status = kepplot.cleany(indata,1.0,logfile,verbose) # clean-up x-axis units kepplot.RangeOfPlot(px,py,0.01,True) # data limits kepplot.plot1d(px,py,cadence,lcolor,lwidth,fcolor,falpha,True) # plot data kepplot.labels(' ',ylab,'k',16) # labels pylab.setp(pylab.gca(),xticklabels=[]) # remove x- or y-tick labels kepplot.labels(xlab,re.sub('Flux','Aperture Flux',ylab),'k',16) # labels pylab.grid() # grid lines # Plot corrected photometry kepplot.location([0.44,0.08,0.55,0.45]) # plot location kepplot.RangeOfPlot(px,py,0.01,True) # data limits px, xlab, status = kepplot.cleanx(tim_gd,logfile,verbose) # clean-up x-axis units py, ylab, status = kepplot.cleany(flx_gd,1.0,logfile,verbose) # clean-up x-axis units kepplot.plot1d(px,py,cadence,lcolor,lwidth,fcolor,falpha,True) # plot data try: px, xlab, status = kepplot.cleanx(tim_bd,logfile,verbose) # clean-up x-axis units py = copy(flx_bd) pylab.plot(px,py,color='#980000',markersize=5,marker='D',ls='') except: pass kepplot.labels(xlab,re.sub('Flux','Corrected Flux',ylab),'k',16) # labels pylab.grid() # grid lines # render plot if plotres: kepplot.render(cmdLine) # save plot to file if plotres: pylab.savefig(re.sub('.fits','_%d.png' % (iw + 1),outfile)) # correct fluxes within the output file intime = work1[:,7] + bjdref cadenceno = work1[:,6].astype(int) indata = work1[:,5] mom_centr1 = work1[:,4] mom_centr2 = work1[:,3] psf_centr1 = work1[:,2] psf_centr2 = work1[:,1] centr1 = copy(mom_centr1) centr2 = copy(mom_centr2) centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)]) centr_rot = dot(evec.T,centr) yy = copy(indata) zz = centr_rot[1,:] xx = zeros((len(zz))) cfac = zeros((len(zz))) for i in range(len(acoeffs)): xx = xx + acoeffs[i] * numpy.power(zz,i) for i in range(len(ccoeffs)): cfac = cfac + ccoeffs[i] * numpy.power(xx,i) out_detsap = yy / cfac instr[1].data.field('SAP_FLUX')[t1:t2] /= cfac instr[1].data.field('PDCSAP_FLUX')[t1:t2] /= cfac try: instr[1].data.field('DETSAP_FLUX')[t1:t2] /= cfac except: pass # add quality flag to output file for thruster firings for i in range(len(intime)): if cadenceno[i] in thr_cadence: instr[1].data.field('SAP_QUALITY')[t1+i] += 131072 # write output file if status == 0: instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) # end time if (status == 0): message = 'KEPSFF completed at' else: message = '\nKEPSFF aborted at' kepmsg.clock(message,logfile,verbose)
def kepfold(infile, outfile, period, phasezero, bindata, binmethod, threshold, niter, nbins, rejqual, plottype, plotlab, clobber, verbose, logfile, status, cmdLine=False): # startup parameters status = 0 labelsize = 32 ticksize = 18 xsize = 18 ysize = 10 lcolor = '#0000ff' lwidth = 2.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPFOLD -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'period=' + str(period) + ' ' call += 'phasezero=' + str(phasezero) + ' ' binit = 'n' if (bindata): binit = 'y' call += 'bindata=' + binit + ' ' call += 'binmethod=' + binmethod + ' ' call += 'threshold=' + str(threshold) + ' ' call += 'niter=' + str(niter) + ' ' call += 'nbins=' + str(nbins) + ' ' qflag = 'n' if (rejqual): qflag = 'y' call += 'rejqual=' + qflag + ' ' call += 'plottype=' + plottype + ' ' call += 'plotlab=' + plotlab + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPFOLD started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPFOLD: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile, message, verbose) # open input file if status == 0: instr, status = kepio.openfits(infile, 'readonly', logfile, verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys( instr, infile, logfile, verbose, status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr, file, logfile, verbose) # input data if status == 0: table = instr[1].data incards = instr[1].header.cards try: sap = instr[1].data.field('SAP_FLUX') except: try: sap = instr[1].data.field('ap_raw_flux') except: sap = zeros(len(table.field(0))) try: saperr = instr[1].data.field('SAP_FLUX_ERR') except: try: saperr = instr[1].data.field('ap_raw_err') except: saperr = zeros(len(table.field(0))) try: pdc = instr[1].data.field('PDCSAP_FLUX') except: try: pdc = instr[1].data.field('ap_corr_flux') except: pdc = zeros(len(table.field(0))) try: pdcerr = instr[1].data.field('PDCSAP_FLUX_ERR') except: try: pdcerr = instr[1].data.field('ap_corr_err') except: pdcerr = zeros(len(table.field(0))) try: cbv = instr[1].data.field('CBVSAP_FLUX') except: cbv = zeros(len(table.field(0))) if 'cbv' in plottype: txt = 'ERROR -- KEPFOLD: CBVSAP_FLUX column is not populated. Use kepcotrend' status = kepmsg.err(logfile, txt, verbose) try: det = instr[1].data.field('DETSAP_FLUX') except: det = zeros(len(table.field(0))) if 'det' in plottype: txt = 'ERROR -- KEPFOLD: DETSAP_FLUX column is not populated. Use kepflatten' status = kepmsg.err(logfile, txt, verbose) try: deterr = instr[1].data.field('DETSAP_FLUX_ERR') except: deterr = zeros(len(table.field(0))) if 'det' in plottype: txt = 'ERROR -- KEPFOLD: DETSAP_FLUX_ERR column is not populated. Use kepflatten' status = kepmsg.err(logfile, txt, verbose) try: quality = instr[1].data.field('SAP_QUALITY') except: quality = zeros(len(table.field(0))) if qualflag: txt = 'WARNING -- KEPFOLD: Cannot find a QUALITY data column' kepmsg.warn(logfile, txt) if status == 0: barytime, status = kepio.readtimecol(infile, table, logfile, verbose) barytime1 = copy(barytime) # filter out NaNs and quality > 0 work1 = [] work2 = [] work3 = [] work4 = [] work5 = [] work6 = [] work8 = [] work9 = [] if status == 0: if 'sap' in plottype: datacol = copy(sap) errcol = copy(saperr) if 'pdc' in plottype: datacol = copy(pdc) errcol = copy(pdcerr) if 'cbv' in plottype: datacol = copy(cbv) errcol = copy(saperr) if 'det' in plottype: datacol = copy(det) errcol = copy(deterr) for i in range(len(barytime)): if (numpy.isfinite(barytime[i]) and numpy.isfinite(datacol[i]) and datacol[i] != 0.0 and numpy.isfinite(errcol[i]) and errcol[i] > 0.0): if rejqual and quality[i] == 0: work1.append(barytime[i]) work2.append(sap[i]) work3.append(saperr[i]) work4.append(pdc[i]) work5.append(pdcerr[i]) work6.append(cbv[i]) work8.append(det[i]) work9.append(deterr[i]) elif not rejqual: work1.append(barytime[i]) work2.append(sap[i]) work3.append(saperr[i]) work4.append(pdc[i]) work5.append(pdcerr[i]) work6.append(cbv[i]) work8.append(det[i]) work9.append(deterr[i]) barytime = array(work1, dtype='float64') sap = array(work2, dtype='float32') / cadenom saperr = array(work3, dtype='float32') / cadenom pdc = array(work4, dtype='float32') / cadenom pdcerr = array(work5, dtype='float32') / cadenom cbv = array(work6, dtype='float32') / cadenom det = array(work8, dtype='float32') / cadenom deterr = array(work9, dtype='float32') / cadenom # calculate phase if status == 0: if phasezero < bjdref: phasezero += bjdref date1 = (barytime1 + bjdref - phasezero) phase1 = (date1 / period) - floor(date1 / period) date2 = (barytime + bjdref - phasezero) phase2 = (date2 / period) - floor(date2 / period) phase2 = array(phase2, 'float32') # sort phases if status == 0: ptuple = [] phase3 = [] sap3 = [] saperr3 = [] pdc3 = [] pdcerr3 = [] cbv3 = [] cbverr3 = [] det3 = [] deterr3 = [] for i in range(len(phase2)): ptuple.append([ phase2[i], sap[i], saperr[i], pdc[i], pdcerr[i], cbv[i], saperr[i], det[i], deterr[i] ]) phsort = sorted(ptuple, key=lambda ph: ph[0]) for i in range(len(phsort)): phase3.append(phsort[i][0]) sap3.append(phsort[i][1]) saperr3.append(phsort[i][2]) pdc3.append(phsort[i][3]) pdcerr3.append(phsort[i][4]) cbv3.append(phsort[i][5]) cbverr3.append(phsort[i][6]) det3.append(phsort[i][7]) deterr3.append(phsort[i][8]) phase3 = array(phase3, 'float32') sap3 = array(sap3, 'float32') saperr3 = array(saperr3, 'float32') pdc3 = array(pdc3, 'float32') pdcerr3 = array(pdcerr3, 'float32') cbv3 = array(cbv3, 'float32') cbverr3 = array(cbverr3, 'float32') det3 = array(det3, 'float32') deterr3 = array(deterr3, 'float32') # bin phases if status == 0 and bindata: work1 = array([sap3[0]], 'float32') work2 = array([saperr3[0]], 'float32') work3 = array([pdc3[0]], 'float32') work4 = array([pdcerr3[0]], 'float32') work5 = array([cbv3[0]], 'float32') work6 = array([cbverr3[0]], 'float32') work7 = array([det3[0]], 'float32') work8 = array([deterr3[0]], 'float32') phase4 = array([], 'float32') sap4 = array([], 'float32') saperr4 = array([], 'float32') pdc4 = array([], 'float32') pdcerr4 = array([], 'float32') cbv4 = array([], 'float32') cbverr4 = array([], 'float32') det4 = array([], 'float32') deterr4 = array([], 'float32') dt = 1.0 / nbins nb = 0.0 rng = numpy.append(phase3, phase3[0] + 1.0) for i in range(len(rng)): if rng[i] < nb * dt or rng[i] >= (nb + 1.0) * dt: if len(work1) > 0: phase4 = append(phase4, (nb + 0.5) * dt) if (binmethod == 'mean'): sap4 = append(sap4, kepstat.mean(work1)) saperr4 = append(saperr4, kepstat.mean_err(work2)) pdc4 = append(pdc4, kepstat.mean(work3)) pdcerr4 = append(pdcerr4, kepstat.mean_err(work4)) cbv4 = append(cbv4, kepstat.mean(work5)) cbverr4 = append(cbverr4, kepstat.mean_err(work6)) det4 = append(det4, kepstat.mean(work7)) deterr4 = append(deterr4, kepstat.mean_err(work8)) elif (binmethod == 'median'): sap4 = append(sap4, kepstat.median(work1, logfile)) saperr4 = append(saperr4, kepstat.mean_err(work2)) pdc4 = append(pdc4, kepstat.median(work3, logfile)) pdcerr4 = append(pdcerr4, kepstat.mean_err(work4)) cbv4 = append(cbv4, kepstat.median(work5, logfile)) cbverr4 = append(cbverr4, kepstat.mean_err(work6)) det4 = append(det4, kepstat.median(work7, logfile)) deterr4 = append(deterr4, kepstat.mean_err(work8)) else: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip('poly0',[scipy.stats.nanmean(work1)],arange(0.0,float(len(work1)),1.0),work1,work2, threshold,threshold,niter,logfile,False) sap4 = append(sap4, coeffs[0]) saperr4 = append(saperr4, kepstat.mean_err(work2)) coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip('poly0',[scipy.stats.nanmean(work3)],arange(0.0,float(len(work3)),1.0),work3,work4, threshold,threshold,niter,logfile,False) pdc4 = append(pdc4, coeffs[0]) pdcerr4 = append(pdcerr4, kepstat.mean_err(work4)) coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip('poly0',[scipy.stats.nanmean(work5)],arange(0.0,float(len(work5)),1.0),work5,work6, threshold,threshold,niter,logfile,False) cbv4 = append(cbv4, coeffs[0]) cbverr4 = append(cbverr4, kepstat.mean_err(work6)) coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip('poly0',[scipy.stats.nanmean(work7)],arange(0.0,float(len(work7)),1.0),work7,work8, threshold,threshold,niter,logfile,False) det4 = append(det4, coeffs[0]) deterr4 = append(deterr4, kepstat.mean_err(work8)) work1 = array([], 'float32') work2 = array([], 'float32') work3 = array([], 'float32') work4 = array([], 'float32') work5 = array([], 'float32') work6 = array([], 'float32') work7 = array([], 'float32') work8 = array([], 'float32') nb += 1.0 else: work1 = append(work1, sap3[i]) work2 = append(work2, saperr3[i]) work3 = append(work3, pdc3[i]) work4 = append(work4, pdcerr3[i]) work5 = append(work5, cbv3[i]) work6 = append(work6, cbverr3[i]) work7 = append(work7, det3[i]) work8 = append(work8, deterr3[i]) # update HDU1 for output file if status == 0: cols = (instr[1].columns + ColDefs([Column(name='PHASE', format='E', array=phase1)])) instr[1] = pyfits.new_table(cols) instr[1].header.cards[ 'TTYPE' + str(len(instr[1].columns))].comment = 'column title: phase' instr[1].header.cards[ 'TFORM' + str(len(instr[1].columns))].comment = 'data type: float32' for i in range(len(incards)): if incards[i].key not in list(instr[1].header.keys()): instr[1].header.update(incards[i].key, incards[i].value, incards[i].comment) else: instr[1].header.cards[ incards[i].key].comment = incards[i].comment instr[1].header.update('PERIOD', period, 'period defining the phase [d]') instr[1].header.update('BJD0', phasezero, 'time of phase zero [BJD]') # write new phased data extension for output file if status == 0 and bindata: col1 = Column(name='PHASE', format='E', array=phase4) col2 = Column(name='SAP_FLUX', format='E', unit='e/s', array=sap4 / cadenom) col3 = Column(name='SAP_FLUX_ERR', format='E', unit='e/s', array=saperr4 / cadenom) col4 = Column(name='PDC_FLUX', format='E', unit='e/s', array=pdc4 / cadenom) col5 = Column(name='PDC_FLUX_ERR', format='E', unit='e/s', array=pdcerr4 / cadenom) col6 = Column(name='CBV_FLUX', format='E', unit='e/s', array=cbv4 / cadenom) col7 = Column(name='DET_FLUX', format='E', array=det4 / cadenom) col8 = Column(name='DET_FLUX_ERR', format='E', array=deterr4 / cadenom) cols = ColDefs([col1, col2, col3, col4, col5, col6, col7, col8]) instr.append(new_table(cols)) instr[-1].header.cards['TTYPE1'].comment = 'column title: phase' instr[-1].header.cards[ 'TTYPE2'].comment = 'column title: simple aperture photometry' instr[-1].header.cards[ 'TTYPE3'].comment = 'column title: SAP 1-sigma error' instr[-1].header.cards[ 'TTYPE4'].comment = 'column title: pipeline conditioned photometry' instr[-1].header.cards[ 'TTYPE5'].comment = 'column title: PDC 1-sigma error' instr[-1].header.cards[ 'TTYPE6'].comment = 'column title: cotrended basis vector photometry' instr[-1].header.cards[ 'TTYPE7'].comment = 'column title: Detrended aperture photometry' instr[-1].header.cards[ 'TTYPE8'].comment = 'column title: DET 1-sigma error' instr[-1].header.cards['TFORM1'].comment = 'column type: float32' instr[-1].header.cards['TFORM2'].comment = 'column type: float32' instr[-1].header.cards['TFORM3'].comment = 'column type: float32' instr[-1].header.cards['TFORM4'].comment = 'column type: float32' instr[-1].header.cards['TFORM5'].comment = 'column type: float32' instr[-1].header.cards['TFORM6'].comment = 'column type: float32' instr[-1].header.cards['TFORM7'].comment = 'column type: float32' instr[-1].header.cards['TFORM8'].comment = 'column type: float32' instr[-1].header.cards[ 'TUNIT2'].comment = 'column units: electrons per second' instr[-1].header.cards[ 'TUNIT3'].comment = 'column units: electrons per second' instr[-1].header.cards[ 'TUNIT4'].comment = 'column units: electrons per second' instr[-1].header.cards[ 'TUNIT5'].comment = 'column units: electrons per second' instr[-1].header.cards[ 'TUNIT6'].comment = 'column units: electrons per second' instr[-1].header.update('EXTNAME', 'FOLDED', 'extension name') instr[-1].header.update('PERIOD', period, 'period defining the phase [d]') instr[-1].header.update('BJD0', phasezero, 'time of phase zero [BJD]') instr[-1].header.update('BINMETHD', binmethod, 'phase binning method') if binmethod == 'sigclip': instr[-1].header.update('THRSHOLD', threshold, 'sigma-clipping threshold [sigma]') instr[-1].header.update('NITER', niter, 'max number of sigma-clipping iterations') # history keyword in output file if status == 0: status = kepkey.history(call, instr[0], outfile, logfile, verbose) instr.writeto(outfile) # clean up x-axis unit if status == 0: ptime1 = array([], 'float32') ptime2 = array([], 'float32') pout1 = array([], 'float32') pout2 = array([], 'float32') if bindata: work = sap4 if plottype == 'pdc': work = pdc4 if plottype == 'cbv': work = cbv4 if plottype == 'det': work = det4 for i in range(len(phase4)): if (phase4[i] > 0.5): ptime2 = append(ptime2, phase4[i] - 1.0) pout2 = append(pout2, work[i]) ptime2 = append(ptime2, phase4) pout2 = append(pout2, work) for i in range(len(phase4)): if (phase4[i] <= 0.5): ptime2 = append(ptime2, phase4[i] + 1.0) pout2 = append(pout2, work[i]) work = sap3 if plottype == 'pdc': work = pdc3 if plottype == 'cbv': work = cbv3 if plottype == 'det': work = det3 for i in range(len(phase3)): if (phase3[i] > 0.5): ptime1 = append(ptime1, phase3[i] - 1.0) pout1 = append(pout1, work[i]) ptime1 = append(ptime1, phase3) pout1 = append(pout1, work) for i in range(len(phase3)): if (phase3[i] <= 0.5): ptime1 = append(ptime1, phase3[i] + 1.0) pout1 = append(pout1, work[i]) xlab = 'Orbital Phase ($\phi$)' # clean up y-axis units if status == 0: nrm = len(str(int(pout1[isfinite(pout1)].max()))) - 1 pout1 = pout1 / 10**nrm pout2 = pout2 / 10**nrm if nrm == 0: ylab = plotlab else: ylab = '10$^%d$ %s' % (nrm, plotlab) # data limits xmin = ptime1.min() xmax = ptime1.max() ymin = pout1[isfinite(pout1)].min() ymax = pout1[isfinite(pout1)].max() xr = xmax - xmin yr = ymax - ymin ptime1 = insert(ptime1, [0], [ptime1[0]]) ptime1 = append(ptime1, [ptime1[-1]]) pout1 = insert(pout1, [0], [0.0]) pout1 = append(pout1, 0.0) if bindata: ptime2 = insert(ptime2, [0], ptime2[0] - 1.0 / nbins) ptime2 = insert(ptime2, [0], ptime2[0]) ptime2 = append( ptime2, [ptime2[-1] + 1.0 / nbins, ptime2[-1] + 1.0 / nbins]) pout2 = insert(pout2, [0], [pout2[-1]]) pout2 = insert(pout2, [0], [0.0]) pout2 = append(pout2, [pout2[2], 0.0]) # plot new light curve if status == 0 and plottype != 'none': try: params = { 'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 18, 'legend.fontsize': 18, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize } pylab.rcParams.update(params) except: print('ERROR -- KEPFOLD: install latex for scientific plotting') status = 1 if status == 0 and plottype != 'none': pylab.figure(figsize=[17, 7]) pylab.clf() ax = pylab.axes([0.06, 0.11, 0.93, 0.86]) pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90) if bindata: pylab.fill(ptime2, pout2, color=fcolor, linewidth=0.0, alpha=falpha) else: if 'det' in plottype: pylab.fill(ptime1, pout1, color=fcolor, linewidth=0.0, alpha=falpha) pylab.plot(ptime1, pout1, color=lcolor, linestyle='', linewidth=lwidth, marker='.') if bindata: pylab.plot(ptime2[1:-1], pout2[1:-1], color='r', linestyle='-', linewidth=lwidth, marker='') xlabel(xlab, {'color': 'k'}) ylabel(ylab, {'color': 'k'}) xlim(-0.49999, 1.49999) if ymin >= 0.0: ylim(ymin - yr * 0.01, ymax + yr * 0.01) # ylim(0.96001,1.03999) else: ylim(1.0e-10, ymax + yr * 0.01) grid() if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # close input file if status == 0: status = kepio.closefits(instr, logfile, verbose) # stop time kepmsg.clock('KEPFOLD ended at: ', logfile, verbose)
def kepfoldimg(infile,outfile,datacol,period,phasezero,binmethod,threshold,niter,nbins, plot,plotlab,clobber,verbose,logfile,status): # startup parameters status = 0 labelsize = 24; ticksize = 16; xsize = 17; ysize = 7 lcolor = '#0000ff'; lwidth = 1.0; fcolor = '#ffff00'; falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPFOLD -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+datacol+' ' call += 'period='+str(period)+' ' call += 'phasezero='+str(phasezero)+' ' call += 'binmethod='+binmethod+' ' call += 'threshold='+str(threshold)+' ' call += 'niter='+str(niter)+' ' call += 'nbins='+str(nbins)+' ' plotres = 'n' if (plot): plotres = 'y' call += 'plot='+plotres+ ' ' call += 'plotlab='+plotlab+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPFOLDIMG started at: ',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPFOLDIMG: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,infile,logfile,verbose) # input data if status == 0: table = instr[1].data incards = instr[1].header.cards indata, status = kepio.readfitscol(infile,table,datacol,logfile,verbose) barytime, status = kepio.readtimecol(infile,table,logfile,verbose) # filter out NaNs work1 = []; work2 = [] if status == 0: for i in range(len(barytime)): if (numpy.isfinite(barytime[i]) and numpy.isfinite(indata[i]) and indata[i] != 0.0): work1.append(barytime[i]) work2.append(indata[i]) barytime = array(work1,dtype='float64') indata = array(work2,dtype='float32') # calculate phase if status == 0: phase2 = [] phase1 = (barytime - phasezero) / period for i in range(len(phase1)): phase2.append(phase1[i] - int(phase1[i])) if phase2[-1] < 0.0: phase2[-1] += 1.0 phase2 = array(phase2,'float32') # sort phases if status == 0: ptuple = [] phase3 = [] data3 = [] for i in range(len(phase2)): ptuple.append([phase2[i], indata[i]]) phsort = sorted(ptuple,key=lambda ph: ph[0]) for i in range(len(phsort)): phase3.append(phsort[i][0]) data3.append(phsort[i][1]) phase3 = array(phase3,'float32') data3 = array(data3,'float32') # bin phases if status == 0: work1 = array([data3[0]],'float32') phase4 = array([],'float32') data4 = array([],'float32') dt = (phase3[-1] - phase3[0]) / nbins nb = 0.0 for i in range(len(phase3)): if phase3[i] < phase3[0] + nb * dt or phase3[i] >= phase3[0] + (nb + 1.0) * dt: if len(work1) > 0: phase4 = append(phase4,phase3[0] + (nb + 0.5) * dt) if (binmethod == 'mean'): data4 = append(data4,kepstat.mean(work1)) elif (binmethod == 'median'): data4 = append(data4,kepstat.median(work1,logfile)) else: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip('poly0',[1.0],arange(0.0,float(len(work1)),1.0),work1,None, threshold,threshold,niter,logfile,verbose) data4 = append(data4,coeffs[0]) work1 = array([],'float32') nb += 1.0 else: work1 = append(work1,data3[i]) # update HDU1 for output file if status == 0: cols = (instr[1].columns + ColDefs([Column(name='PHASE',format='E',array=phase1)])) instr[1] = pyfits.new_table(cols) instr[1].header.cards['TTYPE20'].comment = 'column title: phase' instr[1].header.cards['TFORM20'].comment = 'data type: float32' for i in range(len(incards)): if incards[i].key not in instr[1].header.keys(): instr[1].header.update(incards[i].key, incards[i].value, incards[i].comment) else: instr[1].header.cards[incards[i].key].comment = incards[i].comment instr[1].header.update('PERIOD',period,'period defining the phase [d]') instr[1].header.update('BJD0',phasezero,'time of phase zero [BJD]') # write new phased data extension for output file if status == 0: col1 = Column(name='PHASE',format='E',array=phase4) col2 = Column(name=datacol,format='E',unit='e/s',array=data4/cadence) cols = ColDefs([col1,col2]) instr.append(new_table(cols)) instr[-1].header.cards['TTYPE1'].comment = 'column title: phase' instr[-1].header.cards['TTYPE2'].comment = 'column title: simple aperture photometry' instr[-1].header.cards['TFORM1'].comment = 'column type: float32' instr[-1].header.cards['TFORM2'].comment = 'column type: float32' instr[-1].header.cards['TUNIT2'].comment = 'column units: electrons per second' instr[-1].header.update('EXTNAME','FOLDED','extension name') instr[-1].header.update('PERIOD',period,'period defining the phase [d]') instr[-1].header.update('BJD0',phasezero,'time of phase zero [BJD]') instr[-1].header.update('BINMETHD',binmethod,'phase binning method') if binmethod =='sigclip': instr[-1].header.update('THRSHOLD',threshold,'sigma-clipping threshold [sigma]') instr[-1].header.update('NITER',niter,'max number of sigma-clipping iterations') # history keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) # clean up x-axis unit if status == 0: ptime = array([],'float32') pout = array([],'float32') work = data4 for i in range(len(phase4)): if (phase4[i] > 0.5): ptime = append(ptime,phase4[i] - 1.0) pout = append(pout,work[i] / cadence) ptime = append(ptime,phase4) pout = append(pout,work / cadence) for i in range(len(phase4)): if (phase4[i] <= 0.5): ptime = append(ptime,phase4[i] + 1.0) pout = append(pout,work[i] / cadence) xlab = 'Phase ($\phi$)' # clean up y-axis units if status == 0: nrm = len(str(int(pout.max())))-1 pout = pout / 10**nrm ylab = '10$^%d$ %s' % (nrm, plotlab) # data limits xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) # plot new light curve if status == 0 and plot: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} pylab.rcParams.update(params) except: print 'ERROR -- KEPFOLD: install latex for scientific plotting' status = 1 if status == 0 and plot: pylab.figure(1,figsize=[17,7]) pylab.clf() pylab.axes([0.06,0.1,0.93,0.87]) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.plot(ptime,pout,color=lcolor,linestyle='-',linewidth=lwidth) fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) xlabel(xlab, {'color' : 'k'}) ylabel(ylab, {'color' : 'k'}) xlim(-0.49999,1.49999) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) else: ylim(1.0e-10,ymax+yr*0.01) pylab.grid() pylab.draw() # stop time kepmsg.clock('KEPFOLDIMG ended at: ',logfile,verbose)
def kepsff(infile, outfile, datacol, cenmethod, stepsize, npoly_cxcy, sigma_cxcy, npoly_ardx, npoly_dsdt, sigma_dsdt, npoly_arfl, sigma_arfl, plotres, clobber, verbose, logfile, status, cmdLine=False): # startup parameters status = 0 labelsize = 16 ticksize = 14 xsize = 20 ysize = 8 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPSFF -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'datacol=' + datacol + ' ' call += 'cenmethod=' + cenmethod + ' ' call += 'stepsize=' + str(stepsize) + ' ' call += 'npoly_cxcy=' + str(npoly_cxcy) + ' ' call += 'sigma_cxcy=' + str(sigma_cxcy) + ' ' call += 'npoly_ardx=' + str(npoly_ardx) + ' ' call += 'npoly_dsdt=' + str(npoly_dsdt) + ' ' call += 'sigma_dsdt=' + str(sigma_dsdt) + ' ' call += 'npoly_arfl=' + str(npoly_arfl) + ' ' call += 'sigma_arfl=' + str(sigma_arfl) + ' ' savep = 'n' if (plotres): savep = 'y' call += 'plotres=' + savep + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPSFF started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPSFF: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile, message, verbose) # open input file if status == 0: instr, status = kepio.openfits(infile, 'readonly', logfile, verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys( instr, infile, logfile, verbose, status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr, file, logfile, verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile, instr[1], logfile, verbose) # determine sequence of windows in time if status == 0: frametim = instr[1].header['FRAMETIM'] num_frm = instr[1].header['NUM_FRM'] exptime = frametim * num_frm / 86400 tstart = table.field('TIME')[0] tstop = table.field('TIME')[-1] winedge = arange(tstart, tstop, stepsize) if tstop > winedge[-1] + stepsize / 2: winedge = append(winedge, tstop) else: winedge[-1] = tstop winedge = (winedge - tstart) / exptime winedge = winedge.astype(int) if len(table.field('TIME')) > winedge[-1] + 1: winedge = append(winedge, len(table.field('TIME'))) elif len(table.field('TIME')) < winedge[-1]: winedge[-1] = len(table.field('TIME')) # step through the time windows if status == 0: for iw in range(1, len(winedge)): t1 = winedge[iw - 1] t2 = winedge[iw] # filter input data table work1 = numpy.array([ table.field('TIME')[t1:t2], table.field('CADENCENO')[t1:t2], table.field(datacol)[t1:t2], table.field('MOM_CENTR1')[t1:t2], table.field('MOM_CENTR2')[t1:t2], table.field('PSF_CENTR1')[t1:t2], table.field('PSF_CENTR2')[t1:t2], table.field('SAP_QUALITY')[t1:t2] ], 'float64') work1 = numpy.rot90(work1, 3) work2 = work1[~numpy.isnan(work1).any(1)] work2 = work2[(work2[:, 0] == 0.0) | (work2[:, 0] > 1e5)] # assign table columns intime = work2[:, 7] + bjdref cadenceno = work2[:, 6].astype(int) indata = work2[:, 5] mom_centr1 = work2[:, 4] mom_centr2 = work2[:, 3] psf_centr1 = work2[:, 2] psf_centr2 = work2[:, 1] sap_quality = work2[:, 0] if cenmethod == 'moments': centr1 = copy(mom_centr1) centr2 = copy(mom_centr2) else: centr1 = copy(psf_centr1) centr2 = copy(psf_centr2) # fit centroid data with low-order polynomial cfit = zeros((len(centr2))) csig = zeros((len(centr2))) functype = 'poly' + str(npoly_cxcy) pinit = array([nanmean(centr2)]) if npoly_cxcy > 0: for j in range(npoly_cxcy): pinit = append(pinit, 0.0) try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,centr1,centr2,None,sigma_cxcy,sigma_cxcy,10,logfile,verbose) for j in range(len(coeffs)): cfit += coeffs[j] * numpy.power(centr1, j) csig[:] = sigma except: message = 'ERROR -- KEPSFF: could not fit centroid data with polynomial. There are no data points within the range of input rows %d - %d. Either increase the stepsize (with an appreciation of the effects on light curve quality this will have!), or better yet - cut the timeseries up to remove large gaps in the input light curve using kepclip.' % ( t1, t2) status = kepmsg.err(logfile, message, verbose) # sys.exit('') os._exit(1) # reject outliers time_good = array([], 'float64') centr1_good = array([], 'float32') centr2_good = array([], 'float32') flux_good = array([], 'float32') cad_good = array([], 'int') for i in range(len(cfit)): if abs(centr2[i] - cfit[i]) < sigma_cxcy * csig[i]: time_good = append(time_good, intime[i]) centr1_good = append(centr1_good, centr1[i]) centr2_good = append(centr2_good, centr2[i]) flux_good = append(flux_good, indata[i]) cad_good = append(cad_good, cadenceno[i]) # covariance matrix for centroid time series centr = concatenate([[centr1_good] - mean(centr1_good), [centr2_good] - mean(centr2_good)]) covar = cov(centr) # eigenvector eigenvalues of covariance matrix [eval, evec] = numpy.linalg.eigh(covar) ex = arange(-10.0, 10.0, 0.1) epar = evec[1, 1] / evec[0, 1] * ex enor = evec[1, 0] / evec[0, 0] * ex ex = ex + mean(centr1) epar = epar + mean(centr2_good) enor = enor + mean(centr2_good) # rotate centroid data centr_rot = dot(evec.T, centr) # fit polynomial to rotated centroids rfit = zeros((len(centr2))) rsig = zeros((len(centr2))) functype = 'poly' + str(npoly_ardx) pinit = array([nanmean(centr_rot[0, :])]) pinit = array([1.0]) if npoly_ardx > 0: for j in range(npoly_ardx): pinit = append(pinit, 0.0) try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,centr_rot[1,:],centr_rot[0,:],None,100.0,100.0,1, logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile, message, verbose) rx = linspace(nanmin(centr_rot[1, :]), nanmax(centr_rot[1, :]), 100) ry = zeros((len(rx))) for i in range(len(coeffs)): ry = ry + coeffs[i] * numpy.power(rx, i) # calculate arclength of centroids s = zeros((len(rx))) for i in range(1, len(s)): work3 = ((ry[i] - ry[i - 1]) / (rx[i] - rx[i - 1]))**2 s[i] = s[i - 1] + math.sqrt(1.0 + work3) * (rx[i] - rx[i - 1]) # fit arclength as a function of strongest eigenvector sfit = zeros((len(centr2))) ssig = zeros((len(centr2))) functype = 'poly' + str(npoly_ardx) pinit = array([nanmean(s)]) if npoly_ardx > 0: for j in range(npoly_ardx): pinit = append(pinit, 0.0) try: acoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,rx,s,None,100.0,100.0,100,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile, message, verbose) # correlate arclength with detrended flux t = copy(time_good) c = copy(cad_good) y = copy(flux_good) z = centr_rot[1, :] x = zeros((len(z))) for i in range(len(acoeffs)): x = x + acoeffs[i] * numpy.power(z, i) # calculate time derivative of arclength s dx = zeros((len(x))) for i in range(1, len(x)): dx[i] = (x[i] - x[i - 1]) / (t[i] - t[i - 1]) dx[0] = dx[1] # fit polynomial to derivative and flag outliers (thruster firings) dfit = zeros((len(dx))) dsig = zeros((len(dx))) functype = 'poly' + str(npoly_dsdt) pinit = array([nanmean(dx)]) if npoly_dsdt > 0: for j in range(npoly_dsdt): pinit = append(pinit, 0.0) try: dcoeffs, errors, covar, iiter, dsigma, chi2, dof, fit, dumx, dumy, status = \ kepfit.lsqclip(functype,pinit,t,dx,None,3.0,3.0,10,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile, message, verbose) for i in range(len(dcoeffs)): dfit = dfit + dcoeffs[i] * numpy.power(t, i) centr1_pnt = array([], 'float32') centr2_pnt = array([], 'float32') time_pnt = array([], 'float64') flux_pnt = array([], 'float32') dx_pnt = array([], 'float32') s_pnt = array([], 'float32') time_thr = array([], 'float64') flux_thr = array([], 'float32') dx_thr = array([], 'float32') thr_cadence = [] for i in range(len(t)): if dx[i] < dfit[i] + sigma_dsdt * dsigma and dx[ i] > dfit[i] - sigma_dsdt * dsigma: time_pnt = append(time_pnt, time_good[i]) flux_pnt = append(flux_pnt, flux_good[i]) dx_pnt = append(dx_pnt, dx[i]) s_pnt = append(s_pnt, x[i]) centr1_pnt = append(centr1_pnt, centr1_good[i]) centr2_pnt = append(centr2_pnt, centr2_good[i]) else: time_thr = append(time_thr, time_good[i]) flux_thr = append(flux_thr, flux_good[i]) dx_thr = append(dx_thr, dx[i]) thr_cadence.append(cad_good[i]) # fit arclength-flux correlation cfit = zeros((len(time_pnt))) csig = zeros((len(time_pnt))) functype = 'poly' + str(npoly_arfl) pinit = array([nanmean(flux_pnt)]) if npoly_arfl > 0: for j in range(npoly_arfl): pinit = append(pinit, 0.0) try: ccoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plx, ply, status = \ kepfit.lsqclip(functype,pinit,s_pnt,flux_pnt,None,sigma_arfl,sigma_arfl,100,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile, message, verbose) # correction factors for unfiltered data centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)]) centr_rot = dot(evec.T, centr) yy = copy(indata) zz = centr_rot[1, :] xx = zeros((len(zz))) cfac = zeros((len(zz))) for i in range(len(acoeffs)): xx = xx + acoeffs[i] * numpy.power(zz, i) for i in range(len(ccoeffs)): cfac = cfac + ccoeffs[i] * numpy.power(xx, i) # apply correction to flux time-series out_detsap = indata / cfac # split time-series data for plotting tim_gd = array([], 'float32') flx_gd = array([], 'float32') tim_bd = array([], 'float32') flx_bd = array([], 'float32') for i in range(len(indata)): if intime[i] in time_pnt: tim_gd = append(tim_gd, intime[i]) flx_gd = append(flx_gd, out_detsap[i]) else: tim_bd = append(tim_bd, intime[i]) flx_bd = append(flx_bd, out_detsap[i]) # plot style and size status = kepplot.define(labelsize, ticksize, logfile, verbose) pylab.figure(figsize=[xsize, ysize]) pylab.clf() # plot x-centroid vs y-centroid ax = kepplot.location([0.04, 0.57, 0.16, 0.41]) # plot location px = copy(centr1) # clean-up x-axis units py = copy(centr2) # clean-up y-axis units pxmin = px.min() pxmax = px.max() pymin = py.min() pymax = py.max() pxr = pxmax - pxmin pyr = pymax - pymin pad = 0.05 if pxr > pyr: dely = (pxr - pyr) / 2 xlim(pxmin - pxr * pad, pxmax + pxr * pad) ylim(pymin - dely - pyr * pad, pymax + dely + pyr * pad) else: delx = (pyr - pxr) / 2 ylim(pymin - pyr * pad, pymax + pyr * pad) xlim(pxmin - delx - pxr * pad, pxmax + delx + pxr * pad) pylab.plot(px, py, color='#980000', markersize=5, marker='D', ls='') # plot data pylab.plot(centr1_good, centr2_good, color='#009900', markersize=5, marker='D', ls='') # plot data pylab.plot(ex, epar, color='k', ls='-') pylab.plot(ex, enor, color='k', ls='-') for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) kepplot.labels('CCD Column', 'CCD Row', 'k', 16) # labels pylab.grid() # grid lines # plot arclength fits vs drift along strongest eigenvector ax = kepplot.location([0.24, 0.57, 0.16, 0.41]) # plot location px = rx - rx[0] py = s - rx - (s[0] - rx[0]) # clean-up y-axis units py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose) # clean-up x-axis units kepplot.RangeOfPlot(px, py, 0.05, False) # data limits pylab.plot(px, py, color='#009900', markersize=5, marker='D', ls='') px = plotx - rx[0] # clean-up x-axis units py = ploty - plotx - (s[0] - rx[0]) # clean-up y-axis units py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose) # clean-up x-axis units pylab.plot(px, py, color='r', ls='-', lw=3) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) ylab = re.sub(' e\S+', ' pixels)', ylab) ylab = re.sub(' s\S+', '', ylab) ylab = re.sub('Flux', 's $-$ x\'', ylab) kepplot.labels('Linear Drift [x\'] (pixels)', ylab, 'k', 16) # labels pylab.grid() # grid lines # plot time derivative of arclength s ax = kepplot.location([0.04, 0.08, 0.16, 0.41]) # plot location px = copy(time_pnt) py = copy(dx_pnt) px, xlab, status = kepplot.cleanx(px, logfile, verbose) # clean-up x-axis units kepplot.RangeOfPlot(px, dx, 0.05, False) # data limits pylab.plot(px, py, color='#009900', markersize=5, marker='D', ls='') try: px = copy(time_thr) py = copy(dx_thr) px, xlab, status = kepplot.cleanx( px, logfile, verbose) # clean-up x-axis units pylab.plot(px, py, color='#980000', markersize=5, marker='D', ls='') except: pass px = copy(t) py = copy(dfit) px, xlab, status = kepplot.cleanx(px, logfile, verbose) # clean-up x-axis units pylab.plot(px, py, color='r', ls='-', lw=3) py = copy(dfit + sigma_dsdt * dsigma) pylab.plot(px, py, color='r', ls='--', lw=3) py = copy(dfit - sigma_dsdt * dsigma) pylab.plot(px, py, color='r', ls='--', lw=3) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) kepplot.labels(xlab, 'ds/dt (pixels day$^{-1}$)', 'k', 16) # labels pylab.grid() # grid lines # plot relation of arclength vs detrended flux ax = kepplot.location([0.24, 0.08, 0.16, 0.41]) # plot location px = copy(s_pnt) py = copy(flux_pnt) py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose) # clean-up x-axis units kepplot.RangeOfPlot(px, py, 0.05, False) # data limits pylab.plot(px, py, color='#009900', markersize=5, marker='D', ls='') pylab.plot(plx, ply, color='r', ls='-', lw=3) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) kepplot.labels('Arclength [s] (pixels)', ylab, 'k', 16) # labels pylab.grid() # grid lines # plot aperture photometry kepplot.location([0.44, 0.53, 0.55, 0.45]) # plot location px, xlab, status = kepplot.cleanx(intime, logfile, verbose) # clean-up x-axis units py, ylab, status = kepplot.cleany(indata, 1.0, logfile, verbose) # clean-up x-axis units kepplot.RangeOfPlot(px, py, 0.01, True) # data limits kepplot.plot1d(px, py, cadence, lcolor, lwidth, fcolor, falpha, True) # plot data kepplot.labels(' ', ylab, 'k', 16) # labels pylab.setp(pylab.gca(), xticklabels=[]) # remove x- or y-tick labels kepplot.labels(xlab, re.sub('Flux', 'Aperture Flux', ylab), 'k', 16) # labels pylab.grid() # grid lines # Plot corrected photometry kepplot.location([0.44, 0.08, 0.55, 0.45]) # plot location kepplot.RangeOfPlot(px, py, 0.01, True) # data limits px, xlab, status = kepplot.cleanx(tim_gd, logfile, verbose) # clean-up x-axis units py, ylab, status = kepplot.cleany(flx_gd, 1.0, logfile, verbose) # clean-up x-axis units kepplot.plot1d(px, py, cadence, lcolor, lwidth, fcolor, falpha, True) # plot data try: px, xlab, status = kepplot.cleanx( tim_bd, logfile, verbose) # clean-up x-axis units py = copy(flx_bd) pylab.plot(px, py, color='#980000', markersize=5, marker='D', ls='') except: pass kepplot.labels(xlab, re.sub('Flux', 'Corrected Flux', ylab), 'k', 16) # labels pylab.grid() # grid lines # render plot if plotres: kepplot.render(cmdLine) # save plot to file if plotres: pylab.savefig(re.sub('.fits', '_%d.png' % (iw + 1), outfile)) # correct fluxes within the output file intime = work1[:, 7] + bjdref cadenceno = work1[:, 6].astype(int) indata = work1[:, 5] mom_centr1 = work1[:, 4] mom_centr2 = work1[:, 3] psf_centr1 = work1[:, 2] psf_centr2 = work1[:, 1] centr1 = copy(mom_centr1) centr2 = copy(mom_centr2) centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)]) centr_rot = dot(evec.T, centr) yy = copy(indata) zz = centr_rot[1, :] xx = zeros((len(zz))) cfac = zeros((len(zz))) for i in range(len(acoeffs)): xx = xx + acoeffs[i] * numpy.power(zz, i) for i in range(len(ccoeffs)): cfac = cfac + ccoeffs[i] * numpy.power(xx, i) out_detsap = yy / cfac instr[1].data.field('SAP_FLUX')[t1:t2] /= cfac instr[1].data.field('PDCSAP_FLUX')[t1:t2] /= cfac try: instr[1].data.field('DETSAP_FLUX')[t1:t2] /= cfac except: pass # add quality flag to output file for thruster firings for i in range(len(intime)): if cadenceno[i] in thr_cadence: instr[1].data.field('SAP_QUALITY')[t1 + i] += 131072 # write output file if status == 0: instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr, logfile, verbose) # end time if (status == 0): message = 'KEPSFF completed at' else: message = '\nKEPSFF aborted at' kepmsg.clock(message, logfile, verbose)
def GetCDPP(time, trial_lc, npoly, nsig, niter, winsize, stepsize, timescale, logfile, verbose, status): # detrend data: find limits of each time step if status == 0: npts = len(time) tstep1 = [] tstep2 = [] work = time[0] while work <= time[-1]: tstep1.append(work) tstep2.append( array([work + winsize, time[-1]], dtype='float64').min()) work += stepsize # detrend data: find cadence limits of each time step if status == 0: cstep1 = [] cstep2 = [] for n in range(len(tstep1)): for i in range(len(time) - 1): if time[i] <= tstep1[n] and time[i + 1] > tstep1[n]: for j in range(i, len(time) - 1): if time[j] < tstep2[n] and time[j + 1] >= tstep2[n]: cstep1.append(i) cstep2.append(j + 1) # detrend data: loop over each time step, fit data, determine rms if status == 0: fitarray = zeros((npts, len(cstep1)), dtype='float32') fitarray[:, :] = numpy.nan masterfit = trial_lc * 0.0 functype = 'poly' + str(npoly) for i in range(len(cstep1)): timeSeries = time[cstep1[i]:cstep2[i] + 1] - time[cstep1[i]] dataSeries = trial_lc[cstep1[i]:cstep2[i] + 1] pinit = [dataSeries.mean()] if npoly > 0: for j in range(npoly): pinit.append(0.0) pinit = array(pinit, dtype='float32') try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,timeSeries,dataSeries,None,nsig,nsig,niter, logfile,verbose) fitarray[cstep1[i]:cstep2[i] + 1, i] = 0.0 for j in range(len(coeffs)): fitarray[cstep1[i]:cstep2[i] + 1, i] += coeffs[j] * timeSeries**j except: for j in range(cstep1[i], cstep2[i] + 1): fitarray[cstep1[i]:cstep2[i] + 1, i] = 0.0 # message = 'WARNING -- KEPFLATTEN: could not fit range ' # message += str(time[cstep1[i]]) + '-' + str(time[cstep2[i]]) # kepmsg.warn(None,message) # detrend data: find mean fit for each timestamp if status == 0: for i in range(npts): masterfit[i] = nanmean(fitarray[i, :]) masterfit[-1] = masterfit[-4] #fudge masterfit[-2] = masterfit[-4] #fudge masterfit[-3] = masterfit[-4] #fudge # detrend data: normalize light curve if status == 0: trial_lc = trial_lc / masterfit # calculate STDDEV in units of ppm if status == 0: stddev = kepstat.running_frac_std(time, trial_lc, timescale / 24) * 1.0e6 # calculate median STDDEV if status == 0: medstddev = ones((len(stddev)), dtype='float32') * median(stddev) # print '\nMedian %.1fhr STDDEV = %d ppm' % (timescale, median(stddev)) return median(stddev), stddev, status
def kepoutlier(infile,outfile,datacol,nsig,stepsize,npoly,niter, operation,ranges,plot,plotfit,clobber,verbose,logfile,status, cmdLine=False): # startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 16 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPOUTLIER -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'nsig='+str(nsig)+' ' call += 'stepsize='+str(stepsize)+' ' call += 'npoly='+str(npoly)+' ' call += 'niter='+str(niter)+' ' call += 'operation='+str(operation)+' ' call += 'ranges='+str(ranges)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' plotf = 'n' if (plotfit): plotf = 'y' call += 'plotfit='+plotf+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPOUTLIER started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPOUTLIER: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 try: for i in range(len(table.field(0))): if numpy.isfinite(table.field('barytime')[i]) and \ numpy.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] except: for i in range(len(table.field(0))): if numpy.isfinite(table.field('time')[i]) and \ numpy.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) # read table columns if status == 0: try: intime = instr[1].data.field('barytime') + 2.4e6 except: intime, status = kepio.readfitscol(infile,instr[1].data,'time',logfile,verbose) indata, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom # time ranges for region to be corrected if status == 0: t1, t2, status = kepio.timeranges(ranges,logfile,verbose) cadencelis, status = kepstat.filterOnRange(intime,t1,t2) # find limits of each time step if status == 0: tstep1 = []; tstep2 = [] work = intime[0] while work < intime[-1]: tstep1.append(work) tstep2.append(array([work+stepsize,intime[-1]],dtype='float64').min()) work += stepsize # find cadence limits of each time step if status == 0: cstep1 = []; cstep2 = [] work1 = 0; work2 = 0 for i in range(len(intime)): if intime[i] >= intime[work1] and intime[i] < intime[work1] + stepsize: work2 = i else: cstep1.append(work1) cstep2.append(work2) work1 = i; work2 = i cstep1.append(work1) cstep2.append(work2) outdata = indata * 1.0 # comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = indata * 1.0 nrm = len(str(int(pout.max())))-1 pout = pout / 10**nrm ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) # plot light curve if status == 0 and plot: plotLatex = True try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: plotLatex = False if status == 0 and plot: pylab.figure(figsize=[xsize,ysize]) pylab.clf() # plot data ax = pylab.axes([0.06,0.1,0.93,0.87]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime,pout,color=lcolor,linestyle='-',linewidth=lwidth) fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) xlabel(xlab, {'color' : 'k'}) if not plotLatex: ylab = '10**%d electrons/sec' % nrm ylabel(ylab, {'color' : 'k'}) grid() # loop over each time step, fit data, determine rms if status == 0: masterfit = indata * 0.0 mastersigma = zeros(len(masterfit)) functype = 'poly' + str(npoly) for i in range(len(cstep1)): pinit = [indata[cstep1[i]:cstep2[i]+1].mean()] if npoly > 0: for j in range(npoly): pinit.append(0.0) pinit = array(pinit,dtype='float32') try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,intime[cstep1[i]:cstep2[i]+1]-intime[cstep1[i]], indata[cstep1[i]:cstep2[i]+1],None,nsig,nsig,niter,logfile, verbose) for j in range(len(coeffs)): masterfit[cstep1[i]:cstep2[i]+1] += coeffs[j] * \ (intime[cstep1[i]:cstep2[i]+1] - intime[cstep1[i]])**j for j in range(cstep1[i],cstep2[i]+1): mastersigma[j] = sigma if plotfit: pylab.plot(plotx+intime[cstep1[i]]-intime0,ploty / 10**nrm, 'g',lw='3') except: for j in range(cstep1[i],cstep2[i]+1): masterfit[j] = indata[j] mastersigma[j] = 1.0e10 message = 'WARNING -- KEPOUTLIER: could not fit range ' message += str(intime[cstep1[i]]) + '-' + str(intime[cstep2[i]]) kepmsg.warn(None,message) # reject outliers if status == 0: rejtime = []; rejdata = []; naxis2 = 0 for i in range(len(masterfit)): if abs(indata[i] - masterfit[i]) > nsig * mastersigma[i] and i in cadencelis: rejtime.append(intime[i]) rejdata.append(indata[i]) if operation == 'replace': [rnd] = kepstat.randarray([masterfit[i]],[mastersigma[i]]) table[naxis2] = table[i] table.field(datacol)[naxis2] = rnd naxis2 += 1 else: table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] rejtime = array(rejtime,dtype='float64') rejdata = array(rejdata,dtype='float32') pylab.plot(rejtime-intime0,rejdata / 10**nrm,'ro') # plot ranges xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) else: ylim(1.0e-10,ymax+yr*0.01) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # write output file if status == 0: instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) # end time if (status == 0): message = 'KEPOUTLIER completed at' else: message = '\nKEPOUTLIER aborted at' kepmsg.clock(message,logfile,verbose)
def martinsff(intime,indata,centr1,centr2, npoly_cxcy,sigma_cxcy,npoly_ardx, npoly_dsdt,sigma_dsdt,npoly_arfl,sigma_arfl,verbose,logfile, status): # startup parameters status = 0 labelsize = 16 ticksize = 14 xsize = 20 ysize = 8 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 seterr(all="ignore") # fit centroid data with low-order polynomial cfit = zeros((len(centr2))) csig = zeros((len(centr2))) functype = 'poly' + str(npoly_cxcy) pinit = array([nanmean(centr2)]) if npoly_cxcy > 0: for j in range(npoly_cxcy): pinit = append(pinit,0.0) try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,centr1,centr2,None,sigma_cxcy,sigma_cxcy,10,logfile,verbose) for j in range(len(coeffs)): cfit += coeffs[j] * numpy.power(centr1,j) csig[:] = sigma except: message = 'ERROR -- KEPSFF: could not fit centroid data with polynomial. There are no data points within the range of input rows %d - %d. Either increase the stepsize (with an appreciation of the effects on light curve quality this will have!), or better yet - cut the timeseries up to remove large gaps in the input light curve using kepclip.' % (t1,t2) status = kepmsg.err(logfile,message,verbose) # sys.exit('') os._exit(1) # reject outliers time_good = array([],'float64') centr1_good = array([],'float32') centr2_good = array([],'float32') flux_good = array([],'float32') cad_good = array([],dtype=bool) for i in range(len(cfit)): if abs(centr2[i] - cfit[i]) < sigma_cxcy * csig[i]: cad_good = append(cad_good, True) time_good = append(time_good,intime[i]) centr1_good = append(centr1_good,centr1[i]) centr2_good = append(centr2_good,centr2[i]) flux_good = append(flux_good,indata[i]) else: #import ipdb #ipdb.set_trace() cad_good = append(cad_good, False) print(intime[i]) # covariance matrix for centroid time series centr = concatenate([[centr1_good] - mean(centr1_good), [centr2_good] - mean(centr2_good)]) covar = cov(centr) # eigenvector eigenvalues of covariance matrix [eval, evec] = numpy.linalg.eigh(covar) ex = arange(-10.0,10.0,0.1) epar = evec[1,1] / evec[0,1] * ex enor = evec[1,0] / evec[0,0] * ex ex = ex + mean(centr1) epar = epar + mean(centr2_good) enor = enor + mean(centr2_good) # rotate centroid data centr_rot = dot(evec.T,centr) # fit polynomial to rotated centroids rfit = zeros((len(centr2))) rsig = zeros((len(centr2))) functype = 'poly' + str(npoly_ardx) pinit = array([nanmean(centr_rot[0,:])]) pinit = array([1.0]) if npoly_ardx > 0: for j in range(npoly_ardx): pinit = append(pinit,0.0) try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,centr_rot[1,:],centr_rot[0,:],None,100.0,100.0,1, logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile,message,verbose) rx = linspace(nanmin(centr_rot[1,:]),nanmax(centr_rot[1,:]),100) ry = zeros((len(rx))) for i in range(len(coeffs)): ry = ry + coeffs[i] * numpy.power(rx,i) # calculate arclength of centroids s = zeros((len(rx))) for i in range(1,len(s)): work3 = ((ry[i] - ry[i-1]) / (rx[i] - rx[i-1]))**2 s[i] = s[i-1] + math.sqrt(1.0 + work3) * (rx[i] - rx[i-1]) # fit arclength as a function of strongest eigenvector sfit = zeros((len(centr2))) ssig = zeros((len(centr2))) functype = 'poly' + str(npoly_ardx) pinit = array([nanmean(s)]) if npoly_ardx > 0: for j in range(npoly_ardx): pinit = append(pinit,0.0) try: acoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,rx,s,None,100.0,100.0,100,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile,message,verbose) # correlate arclength with detrended flux t = copy(time_good) y = copy(flux_good) z = centr_rot[1,:] x = zeros((len(z))) for i in range(len(acoeffs)): x = x + acoeffs[i] * numpy.power(z,i) # calculate time derivative of arclength s dx = zeros((len(x))) for i in range(1,len(x)): dx[i] = (x[i] - x[i-1]) / (t[i] - t[i-1]) dx[0] = dx[1] # fit polynomial to derivative and flag outliers (thruster firings) dfit = zeros((len(dx))) dsig = zeros((len(dx))) functype = 'poly' + str(npoly_dsdt) pinit = array([nanmean(dx)]) if npoly_dsdt > 0: for j in range(npoly_dsdt): pinit = append(pinit,0.0) try: dcoeffs, errors, covar, iiter, dsigma, chi2, dof, fit, dumx, dumy, status = \ kepfit.lsqclip(functype,pinit,t,dx,None,3.0,3.0,10,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile,message,verbose) for i in range(len(dcoeffs)): dfit = dfit + dcoeffs[i] * numpy.power(t,i) centr1_pnt = array([],'float32') centr2_pnt = array([],'float32') time_pnt = array([],'float64') flux_pnt = array([],'float32') dx_pnt = array([],'float32') s_pnt = array([],'float32') time_thr = array([],'float64') flux_thr = array([],'float32') dx_thr = array([],'float32') thr_cadence = zeros(len(t),dtype=bool) for i in range(len(t)): if dx[i] < dfit[i] + sigma_dsdt * dsigma and dx[i] > dfit[i] - sigma_dsdt * dsigma: time_pnt = append(time_pnt,time_good[i]) flux_pnt = append(flux_pnt,flux_good[i]) dx_pnt = append(dx_pnt,dx[i]) s_pnt = append(s_pnt,x[i]) centr1_pnt = append(centr1_pnt,centr1_good[i]) centr2_pnt = append(centr2_pnt,centr2_good[i]) else: time_thr = append(time_thr,time_good[i]) flux_thr = append(flux_thr,flux_good[i]) dx_thr = append(dx_thr,dx[i]) thr_cadence[i] = True # fit arclength-flux correlation cfit = zeros((len(time_pnt))) csig = zeros((len(time_pnt))) functype = 'poly' + str(npoly_arfl) pinit = array([nanmean(flux_pnt)]) if npoly_arfl > 0: for j in range(npoly_arfl): pinit = append(pinit,0.0) try: ccoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plx, ply, status = \ kepfit.lsqclip(functype,pinit,s_pnt,flux_pnt,None,sigma_arfl,sigma_arfl,100,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile,message,verbose) # correction factors for unfiltered data centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)]) centr_rot = dot(evec.T,centr) yy = copy(indata) zz = centr_rot[1,:] xx = zeros((len(zz))) cfac = zeros((len(zz))) for i in range(len(acoeffs)): xx = xx + acoeffs[i] * numpy.power(zz,i) for i in range(len(ccoeffs)): cfac = cfac + ccoeffs[i] * numpy.power(xx,i) # apply correction to flux time-series out_detsap = indata / cfac #add back in the missing thr_cadence data new_thr = np.zeros_like(cad_good) j = 0 if np.all(cad_good == True): pass else: for i,c in enumerate(cad_good): if c == False: j+=1 else: new_thr[i] = thr_cadence[i-j] return out_detsap, cfac, new_thr
def kepdetrend(infile,outfile,datacol,errcol,ranges1,npoly1,nsig1,niter1, ranges2,npoly2,nsig2,niter2,popnans,plot,clobber,verbose,logfile, status,cmdLine=False): # startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 16 ysize = 9 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPDETREND -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'errcol='+str(errcol)+' ' call += 'ranges1='+str(ranges1)+' ' call += 'npoly1='+str(npoly1)+' ' call += 'nsig1='+str(nsig1)+' ' call += 'niter1='+str(niter1)+' ' call += 'ranges2='+str(ranges2)+' ' call += 'npoly2='+str(npoly2)+' ' call += 'nsig2='+str(nsig2)+' ' call += 'niter2='+str(niter2)+' ' popn = 'n' if (popnans): popn = 'y' call += 'popnans='+popn+ ' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPDETREND started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPDETREND: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # filter input data table if status == 0: work1 = numpy.array([table.field('time'), table.field(datacol), table.field(errcol)]) work1 = numpy.rot90(work1,3) work1 = work1[~numpy.isnan(work1).any(1)] # read table columns if status == 0: intime = work1[:,2] + bjdref indata = work1[:,1] inerr = work1[:,0] print(intime) # time ranges for region 1 (region to be corrected) if status == 0: time1 = []; data1 = []; err1 = [] t1start, t1stop, status = kepio.timeranges(ranges1,logfile,verbose) if status == 0: cadencelis1, status = kepstat.filterOnRange(intime,t1start,t1stop) if status == 0: for i in range(len(cadencelis1)): time1.append(intime[cadencelis1[i]]) data1.append(indata[cadencelis1[i]]) if errcol.lower() != 'none': err1.append(inerr[cadencelis1[i]]) t0 = time1[0] time1 = array(time1,dtype='float64') - t0 data1 = array(data1,dtype='float32') if errcol.lower() != 'none': err1 = array(err1,dtype='float32') else: err1 = None # fit function to range 1 if status == 0: functype = 'poly' + str(npoly1) pinit = [data1.mean()] if npoly1 > 0: for i in range(npoly1): pinit.append(0) pinit = array(pinit,dtype='float32') coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx1, ploty1, status = \ kepfit.lsqclip(functype,pinit,time1,data1,err1,nsig1,nsig1,niter1, logfile,verbose) fit1 = indata * 0.0 for i in range(len(coeffs)): fit1 += coeffs[i] * (intime - t0)**i for i in range(len(intime)): if i not in cadencelis1: fit1[i] = 0.0 plotx1 += t0 print(coeffs) # time ranges for region 2 (region that is correct) if status == 0: time2 = []; data2 = []; err2 = [] t2start, t2stop, status = kepio.timeranges(ranges2,logfile,verbose) cadencelis2, status = kepstat.filterOnRange(intime,t2start,t2stop) for i in range(len(cadencelis2)): time2.append(intime[cadencelis2[i]]) data2.append(indata[cadencelis2[i]]) if errcol.lower() != 'none': err2.append(inerr[cadencelis2[i]]) t0 = time2[0] time2 = array(time2,dtype='float64') - t0 data2 = array(data2,dtype='float32') if errcol.lower() != 'none': err2 = array(err2,dtype='float32') else: err2 = None # fit function to range 2 if status == 0: functype = 'poly' + str(npoly2) pinit = [data2.mean()] if npoly2 > 0: for i in range(npoly2): pinit.append(0) pinit = array(pinit,dtype='float32') coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx2, ploty2, status = \ kepfit.lsqclip(functype,pinit,time2,data2,err2,nsig2,nsig2,niter2, logfile,verbose) fit2 = indata * 0.0 for i in range(len(coeffs)): fit2 += coeffs[i] * (intime - t0)**i for i in range(len(intime)): if i not in cadencelis1: fit2[i] = 0.0 plotx2 += t0 # normalize data if status == 0: outdata = indata - fit1 + fit2 if errcol.lower() != 'none': outerr = inerr * 1.0 # comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 plotx1 = plotx1 - intime0 plotx2 = plotx2 - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = outdata ploty1 ploty2 nrm = len(str(int(numpy.nanmax(indata))))-1 indata = indata / 10**nrm pout = pout / 10**nrm ploty1 = ploty1 / 10**nrm ploty2 = ploty2 / 10**nrm ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits xmin = ptime.min() xmax = ptime.max() ymin = indata.min() ymax = indata.max() omin = pout.min() omax = pout.max() xr = xmax - xmin yr = ymax - ymin oo = omax - omin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) indata = insert(indata,[0],[0.0]) indata = append(indata,[0.0]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) # plot light curve if status == 0 and plot: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: pass pylab.figure(figsize=[xsize,ysize]) pylab.clf() # plot original data ax = pylab.axes([0.06,0.523,0.93,0.45]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime,indata,color=lcolor,linestyle='-',linewidth=lwidth) pylab.fill(ptime,indata,color=fcolor,linewidth=0.0,alpha=falpha) pylab.plot(plotx1,ploty1,color='r',linestyle='-',linewidth=2.0) pylab.plot(plotx2,ploty2,color='g',linestyle='-',linewidth=2.0) pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin > 0.0: pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) else: pylab.ylim(1.0e-10,ymax+yr*0.01) pylab.ylabel(ylab, {'color' : 'k'}) pylab.grid() # plot detrended data ax = pylab.axes([0.06,0.073,0.93,0.45]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime,pout,color=lcolor,linestyle='-',linewidth=lwidth) pylab.fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin > 0.0: pylab.ylim(omin-oo*0.01,omax+oo*0.01) else: pylab.ylim(1.0e-10,omax+oo*0.01) pylab.xlabel(xlab, {'color' : 'k'}) try: pylab.ylabel(ylab, {'color' : 'k'}) except: ylab = '10**%d e-/s' % nrm pylab.ylabel(ylab, {'color' : 'k'}) # render plot if status == 0: if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # write output file if status == 0 and popnans: instr[1].data.field(datacol)[good_data] = outdata instr[1].data.field(errcol)[good_data] = outerr instr[1].data.field(datacol)[bad_data] = None instr[1].data.field(errcol)[bad_data] = None instr.writeto(outfile) elif status == 0 and not popnans: for i in range(len(outdata)): instr[1].data.field(datacol)[i] = outdata[i] if errcol.lower() != 'none': instr[1].data.field(errcol)[i] = outerr[i] instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPDETREND completed at' else: message = '\nKEPDETREND aborted at' kepmsg.clock(message,logfile,verbose)
def GetCDPP(time,trial_lc,npoly,nsig,niter,winsize,stepsize,timescale,logfile,verbose,status): # detrend data: find limits of each time step if status == 0: npts = len(time) tstep1 = []; tstep2 = [] work = time[0] while work <= time[-1]: tstep1.append(work) tstep2.append(array([work+winsize,time[-1]],dtype='float64').min()) work += stepsize # detrend data: find cadence limits of each time step if status == 0: cstep1 = []; cstep2 = [] for n in range(len(tstep1)): for i in range(len(time)-1): if time[i] <= tstep1[n] and time[i+1] > tstep1[n]: for j in range(i,len(time)-1): if time[j] < tstep2[n] and time[j+1] >= tstep2[n]: cstep1.append(i) cstep2.append(j+1) # detrend data: loop over each time step, fit data, determine rms if status == 0: fitarray = zeros((npts,len(cstep1)),dtype='float32') fitarray[:,:] = numpy.nan masterfit = trial_lc * 0.0 functype = 'poly' + str(npoly) for i in range(len(cstep1)): timeSeries = time[cstep1[i]:cstep2[i]+1]-time[cstep1[i]] dataSeries = trial_lc[cstep1[i]:cstep2[i]+1] pinit = [dataSeries.mean()] if npoly > 0: for j in range(npoly): pinit.append(0.0) pinit = array(pinit,dtype='float32') try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,timeSeries,dataSeries,None,nsig,nsig,niter, logfile,verbose) fitarray[cstep1[i]:cstep2[i]+1,i] = 0.0 for j in range(len(coeffs)): fitarray[cstep1[i]:cstep2[i]+1,i] += coeffs[j] * timeSeries**j except: for j in range(cstep1[i],cstep2[i]+1): fitarray[cstep1[i]:cstep2[i]+1,i] = 0.0 # message = 'WARNING -- KEPFLATTEN: could not fit range ' # message += str(time[cstep1[i]]) + '-' + str(time[cstep2[i]]) # kepmsg.warn(None,message) # detrend data: find mean fit for each timestamp if status == 0: for i in range(npts): masterfit[i] = nanmean(fitarray[i,:]) masterfit[-1] = masterfit[-4] #fudge masterfit[-2] = masterfit[-4] #fudge masterfit[-3] = masterfit[-4] #fudge # detrend data: normalize light curve if status == 0: trial_lc = trial_lc / masterfit # calculate STDDEV in units of ppm if status == 0: stddev = kepstat.running_frac_std(time,trial_lc,timescale/24) * 1.0e6 # calculate median STDDEV if status == 0: medstddev = ones((len(stddev)),dtype='float32') * median(stddev) # print '\nMedian %.1fhr STDDEV = %d ppm' % (timescale, median(stddev)) return median(stddev), stddev, status
def kepflatten(infile,outfile,datacol,errcol,nsig,stepsize,winsize,npoly,niter,ranges, plot,clobber,verbose,logfile,status,cmdLine=False): # startup parameters status = 0 labelsize = 32 ticksize = 18 xsize = 16 ysize = 10 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPFLATTEN -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'errcol='+str(errcol)+' ' call += 'nsig='+str(nsig)+' ' call += 'stepsize='+str(stepsize)+' ' call += 'winsize='+str(winsize)+' ' call += 'npoly='+str(npoly)+' ' call += 'niter='+str(niter)+' ' call += 'ranges='+str(ranges)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPFLATTEN started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # test winsize > stepsize if winsize < stepsize: message = 'ERROR -- KEPFLATTEN: winsize must be greater than stepsize' status = kepmsg.err(logfile,message,verbose) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPFLATTEN: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # filter input data table if status == 0: try: datac = table.field(datacol) except: message = 'ERROR -- KEPFLATTEN: cannot find or read data column ' + datacol status = kepmsg.err(logfile,message,verbose) if status == 0: try: err = table.field(errcol) except: message = 'WARNING -- KEPFLATTEN: cannot find or read error column ' + errcol errcol = 'None' if status == 0: if errcol.lower() == 'none' or errcol == 'PSF_FLUX_ERR': err = datac * cadence err = numpy.sqrt(numpy.abs(err)) / cadence work1 = numpy.array([table.field('time'), datac, err]) else: work1 = numpy.array([table.field('time'), datac, err]) work1 = numpy.rot90(work1,3) work1 = work1[~numpy.isnan(work1).any(1)] # read table columns if status == 0: intime = work1[:,2] + bjdref indata = work1[:,1] inerr = work1[:,0] if len(intime) == 0: message = 'ERROR -- KEPFLATTEN: one of the input arrays is all NaN' status = kepmsg.err(logfile,message,verbose) # time ranges for region to be corrected if status == 0: t1, t2, status = kepio.timeranges(ranges,logfile,verbose) cadencelis, status = kepstat.filterOnRange(intime,t1,t2) # find limits of each time step if status == 0: tstep1 = []; tstep2 = [] work = intime[0] while work <= intime[-1]: tstep1.append(work) tstep2.append(array([work+winsize,intime[-1]],dtype='float64').min()) work += stepsize # find cadence limits of each time step if status == 0: cstep1 = []; cstep2 = [] for n in range(len(tstep1)): for i in range(len(intime)-1): if intime[i] <= tstep1[n] and intime[i+1] > tstep1[n]: for j in range(i,len(intime)-1): if intime[j] < tstep2[n] and intime[j+1] >= tstep2[n]: cstep1.append(i) cstep2.append(j+1) # comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = copy(indata) nrm = len(str(int(pout.max())))-1 pout = pout / 10**nrm ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) # plot light curve if status == 0 and plot: plotLatex = True try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: plotLatex = False if status == 0 and plot: pylab.figure(figsize=[xsize,ysize]) pylab.clf() # plot data ax = pylab.axes([0.06,0.54,0.93,0.43]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90) pylab.setp(pylab.gca(),xticklabels=[]) pylab.plot(ptime[1:-1],pout[1:-1],color=lcolor,linestyle='-',linewidth=lwidth) pylab.fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) if not plotLatex: ylab = '10**%d electrons/sec' % nrm ylabel(ylab, {'color' : 'k'}) grid() # loop over each time step, fit data, determine rms if status == 0: fitarray = numpy.zeros((len(indata),len(cstep1)),dtype='float32') sigarray = numpy.zeros((len(indata),len(cstep1)),dtype='float32') fitarray[:,:] = numpy.nan sigarray[:,:] = numpy.nan masterfit = indata * 0.0 mastersigma = numpy.zeros(len(masterfit)) functype = 'poly' + str(npoly) for i in range(len(cstep1)): timeSeries = intime[cstep1[i]:cstep2[i]+1]-intime[cstep1[i]] dataSeries = indata[cstep1[i]:cstep2[i]+1] fitTimeSeries = numpy.array([],dtype='float32') fitDataSeries = numpy.array([],dtype='float32') pinit = [dataSeries.mean()] if npoly > 0: for j in range(npoly): pinit.append(0.0) pinit = array(pinit,dtype='float32') try: if len(fitarray[cstep1[i]:cstep2[i]+1,i]) > len(pinit): coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,timeSeries,dataSeries,None,nsig,nsig,niter, logfile,verbose) fitarray[cstep1[i]:cstep2[i]+1,i] = 0.0 sigarray[cstep1[i]:cstep2[i]+1,i] = sigma for j in range(len(coeffs)): fitarray[cstep1[i]:cstep2[i]+1,i] += coeffs[j] * timeSeries**j except: for j in range(cstep1[i],cstep2[i]+1): fitarray[cstep1[i]:cstep2[i]+1,i] = 0.0 sigarray[cstep1[i]:cstep2[i]+1,i] = 1.0e-10 message = 'WARNING -- KEPFLATTEN: could not fit range ' message += str(intime[cstep1[i]]) + '-' + str(intime[cstep2[i]]) kepmsg.warn(None,message) # find mean fit for each timestamp if status == 0: for i in range(len(indata)): masterfit[i] = scipy.stats.nanmean(fitarray[i,:]) mastersigma[i] = scipy.stats.nanmean(sigarray[i,:]) masterfit[-1] = masterfit[-4] #fudge masterfit[-2] = masterfit[-4] #fudge masterfit[-3] = masterfit[-4] #fudge pylab.plot(intime-intime0, masterfit / 10**nrm,'g',lw='3') # reject outliers if status == 0: rejtime = []; rejdata = []; naxis2 = 0 for i in range(len(masterfit)): if abs(indata[i] - masterfit[i]) > nsig * mastersigma[i] and i in cadencelis: rejtime.append(intime[i]) rejdata.append(indata[i]) rejtime = array(rejtime,dtype='float64') rejdata = array(rejdata,dtype='float32') if plot: pylab.plot(rejtime-intime0,rejdata / 10**nrm,'ro') # new data for output file if status == 0: outdata = indata / masterfit outerr = inerr / masterfit # plot ranges if status == 0 and plot: pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) else: pylab.ylim(1.0e-10,ymax+yr*0.01) # plot residual data if status == 0 and plot: ax = pylab.axes([0.06,0.09,0.93,0.43]) # force tick labels to be absolute rather than relative if status == 0 and plot: pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, 'rotation', 90) # clean up y-axis units if status == 0: pout = copy(outdata) ylab = 'Normalized Flux' # data limits if status == 0 and plot: ymin = pout.min() ymax = pout.max() yr = ymax - ymin pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) pylab.plot(ptime[1:-1],pout[1:-1],color=lcolor,linestyle='-',linewidth=lwidth) pylab.fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) pylab.xlabel(xlab, {'color' : 'k'}) pylab.ylabel(ylab, {'color' : 'k'}) pylab.grid() # plot ranges if status == 0 and plot: pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) else: pylab.ylim(1.0e-10,ymax+yr*0.01) # render plot if status == 0 and plot: pylab.savefig(re.sub('.fits','.png',outfile)) if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # add NaNs back into data if status == 0: n = 0 work1 = array([],dtype='float32') work2 = array([],dtype='float32') instr, status = kepio.openfits(infile,'readonly',logfile,verbose) table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) tn = table.field('time') dn = table.field(datacol) for i in range(len(table.field(0))): if numpy.isfinite(tn[i]) and numpy.isfinite(dn[i]) and numpy.isfinite(err[i]): try: work1 = numpy.append(work1,outdata[n]) work2 = numpy.append(work2,outerr[n]) n += 1 except: pass else: work1 = numpy.append(work1,numpy.nan) work2 = numpy.append(work2,numpy.nan) # history keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) # write output file try: col1 = pyfits.Column(name='DETSAP_FLUX',format='E13.7',array=work1) col2 = pyfits.Column(name='DETSAP_FLUX_ERR',format='E13.7',array=work2) cols = instr[1].data.columns + col1 + col2 instr[1] = pyfits.new_table(cols,header=instr[1].header) instr.writeto(outfile) except ValueError: try: instr[1].data.field('DETSAP_FLUX')[:] = work1 instr[1].data.field('DETSAP_FLUX_ERR')[:] = work2 instr.writeto(outfile) except: message = 'ERROR -- KEPFLATTEN: cannot add DETSAP_FLUX data to FITS file' status = kepmsg.err(logfile,message,verbose) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPFLATTEN completed at' else: message = '\nKEPFLATTEN aborted at' kepmsg.clock(message,logfile,verbose)