def kepdraw(infile,outfile,datacol,ploterr,errcol,quality, lcolor,lwidth,fcolor,falpha,labelsize,ticksize, xsize,ysize,fullrange,chooserange,y1,y2,plotgrid, ylabel,plottype,verbose,logfile,status,cmdLine=False): # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPDRAW -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+datacol+' ' perr = 'n' if (ploterr): perr = 'y' call += 'ploterr='+perr+ ' ' call += 'errcol='+errcol+' ' qual = 'n' if (quality): qual = 'y' call += 'quality='+qual+ ' ' call += 'lcolor='+str(lcolor)+' ' call += 'lwidth='+str(lwidth)+' ' call += 'fcolor='+str(fcolor)+' ' call += 'falpha='+str(falpha)+' ' call += 'labelsize='+str(labelsize)+' ' call += 'ticksize='+str(ticksize)+' ' call += 'xsize='+str(xsize)+' ' call += 'ysize='+str(ysize)+' ' frange = 'n' if (fullrange): frange = 'y' call += 'fullrange='+frange+ ' ' crange = 'n' if (chooserange): crange = 'y' call += 'chooserange='+crange+ ' ' call += 'ymin='+str(y1)+' ' call += 'ymax='+str(y2)+' ' pgrid = 'n' if (plotgrid): pgrid = 'y' call += 'plotgrid='+pgrid+ ' ' call += 'ylabel='+str(ylabel)+' ' call += 'plottype='+plottype+' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPDRAW started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # open input file if status == 0: struct, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(struct,infile,logfile,verbose,status) # read table structure if status == 0: table, status = kepio.readfitstab(infile,struct[1],logfile,verbose) # read table columns if status == 0: intime, status = kepio.readtimecol(infile,table,logfile,verbose) intime += bjdref indata, status = kepio.readfitscol(infile,table,datacol,logfile,verbose) indataerr, status = kepio.readfitscol(infile,table,errcol,logfile,verbose) # read table quality column if status == 0 and quality: try: qualtest = table.field('SAP_QUALITY') except: message = 'ERROR -- KEPDRAW: no SAP_QUALITY column found in file ' + infile message += '. Use kepdraw quality=n' status = kepmsg.err(logfile,message,verbose) # close infile if status == 0: status = kepio.closefits(struct,logfile,verbose) # remove infinities and bad data if status == 0: if numpy.isnan(numpy.nansum(indataerr)): indataerr[:] = 1.0e-5 work1 = numpy.array([intime, indata, indataerr],dtype='float64') work1 = numpy.rot90(work1,3) work1 = work1[~numpy.isnan(work1).any(1)] work1 = work1[~numpy.isinf(work1).any(1)] barytime = numpy.array(work1[:,2],dtype='float64') data = numpy.array(work1[:,1],dtype='float32') dataerr = numpy.array(work1[:,0],dtype='float32') if len(barytime) == 0: message = 'ERROR -- KEPDRAW: Plotting arrays are full of NaN' status = kepmsg.err(logfile,message,verbose) # clean up x-axis unit if status == 0: barytime0 = float(int(tstart / 100) * 100.0) barytime -= barytime0 xlab = 'BJD $-$ %d' % barytime0 # clean up y-axis units try: nrm = len(str(int(numpy.nanmax(data))))-1 except: nrm = 0 data = data / 10**nrm if 'e$^-$ s$^{-1}$' in ylabel or 'default' in ylabel: if nrm == 0: ylab1 = 'e$^-$ s$^{-1}$' else: ylab1 = '10$^%d$ e$^-$ s$^{-1}$' % nrm else: ylab1 = re.sub('_','-',ylabel) # data limits xmin = numpy.nanmin(barytime) xmax = numpy.nanmax(barytime) ymin = numpy.nanmin(data) ymax = numpy.nanmax(data) xr = xmax - xmin yr = ymax - ymin barytime = insert(barytime,[0],[barytime[0]]) barytime = append(barytime,[barytime[-1]]) data = insert(data,[0],[0.0]) data = append(data,0.0) # define plot formats try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} pylab.rcParams.update(params) except: pass # define size of plot on monitor screen pylab.figure(figsize=[xsize,ysize]) # delete any fossil plots in the matplotlib window pylab.clf() # position axes inside the plotting window ax = pylab.subplot(111) pylab.subplots_adjust(0.07,0.1,0.92,0.88) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=ticksize) # if plot type is 'fast' plot data time series as points if plottype == 'fast': pylab.plot(barytime,data,'o',color=lcolor) # if plot type is 'pretty' plot data time series as an unbroken line, retaining data gaps else: ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for i in range(1,len(data)-1): dt = barytime[i] - barytime[i-1] if dt < work1: ltime = numpy.append(ltime,barytime[i]) ldata = numpy.append(ldata,data[i]) else: pylab.plot(ltime,ldata,color=lcolor,linestyle='-',linewidth=lwidth) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') pylab.plot(ltime,ldata,color=lcolor,linestyle='-',linewidth=lwidth) # plot the fill color below data time series, with no data gaps pylab.fill(barytime,data,fc=fcolor,linewidth=0.0,alpha=falpha) # define plot x and y limits pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin-yr*0.01 <= 0.0 or fullrange: pylab.ylim(1.0e-10,ymax+yr*0.01) else: pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) if chooserange: pylab.ylim(y1,y2) # plot labels pylab.xlabel(xlab, {'color' : 'k'}) try: pylab.ylabel(ylab1, {'color' : 'k'}) except: ylab1 = '10**%d e-/s' % nrm pylab.ylabel(ylab1, {'color' : 'k'}) # make grid on plot if plotgrid: pylab.grid() # save plot to file if status == 0 and outfile.lower() != 'none': pylab.savefig(outfile) # render plot if cmdLine: # pylab.show() pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # end time if (status == 0): message = 'KEPDRAW completed at' else: message = '\nKEPDRAW aborted at' kepmsg.clock(message,logfile,verbose)
def kepbls(infile,outfile,datacol,errcol,minper,maxper,mindur,maxdur,nsearch, nbins,plot,clobber,verbose,logfile,status,cmdLine=False): # startup parameters numpy.seterr(all="ignore") status = 0 labelsize = 32 ticksize = 18 xsize = 16 ysize = 8 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPBLS -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'errcol='+str(errcol)+' ' call += 'minper='+str(minper)+' ' call += 'maxper='+str(maxper)+' ' call += 'mindur='+str(mindur)+' ' call += 'maxdur='+str(maxdur)+' ' call += 'nsearch='+str(nsearch)+' ' call += 'nbins='+str(nbins)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPBLS started at',logfile,verbose) # is duration greater than one bin in the phased light curve? if float(nbins) * maxdur / 24.0 / maxper <= 1.0: message = 'WARNING -- KEPBLS: ' + str(maxdur) + ' hours transit duration < 1 phase bin when P = ' message += str(maxper) + ' days' kepmsg.warn(logfile,message) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPBLS: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # filter input data table if status == 0: work1 = numpy.array([table.field('time'), table.field(datacol), table.field(errcol)]) work1 = numpy.rot90(work1,3) work1 = work1[~numpy.isnan(work1).any(1)] # read table columns if status == 0: intime = work1[:,2] + bjdref indata = work1[:,1] inerr = work1[:,0] # test whether the period range is sensible if status == 0: tr = intime[-1] - intime[0] if maxper > tr: message = 'ERROR -- KEPBLS: maxper is larger than the time range of the input data' status = kepmsg.err(logfile,message,verbose) # prepare time series if status == 0: work1 = intime - intime[0] work2 = indata - numpy.mean(indata) # start period search if status == 0: srMax = numpy.array([],dtype='float32') transitDuration = numpy.array([],dtype='float32') transitPhase = numpy.array([],dtype='float32') dPeriod = (maxper - minper) / nsearch trialPeriods = numpy.arange(minper,maxper+dPeriod,dPeriod,dtype='float32') complete = 0 print ' ' for trialPeriod in trialPeriods: fracComplete = float(complete) / float(len(trialPeriods) - 1) * 100.0 txt = '\r' txt += 'Trial period = ' txt += str(int(trialPeriod)) txt += ' days [' txt += str(int(fracComplete)) txt += '% complete]' txt += ' ' * 20 sys.stdout.write(txt) sys.stdout.flush() complete += 1 srMax = numpy.append(srMax,0.0) transitDuration = numpy.append(transitDuration,numpy.nan) transitPhase = numpy.append(transitPhase,numpy.nan) trialFrequency = 1.0 / trialPeriod # minimum and maximum transit durations in quantized phase units duration1 = max(int(float(nbins) * mindur / 24.0 / trialPeriod),2) duration2 = max(int(float(nbins) * maxdur / 24.0 / trialPeriod) + 1,duration1 + 1) # 30 minutes in quantized phase units halfHour = int(0.02083333 / trialPeriod * nbins + 1) # compute folded time series with trial period work4 = numpy.zeros((nbins),dtype='float32') work5 = numpy.zeros((nbins),dtype='float32') phase = numpy.array(((work1 * trialFrequency) - numpy.floor(work1 * trialFrequency)) * float(nbins),dtype='int') ptuple = numpy.array([phase, work2, inerr]) ptuple = numpy.rot90(ptuple,3) phsort = numpy.array(sorted(ptuple,key=lambda ph: ph[2])) for i in range(nbins): elements = numpy.nonzero(phsort[:,2] == float(i))[0] work4[i] = numpy.mean(phsort[elements,1]) work5[i] = math.sqrt(numpy.sum(numpy.power(phsort[elements,0], 2)) / len(elements)) # extend the work arrays beyond nbins by wrapping work4 = numpy.append(work4,work4[:duration2]) work5 = numpy.append(work5,work5[:duration2]) # calculate weights of folded light curve points sigmaSum = numpy.nansum(numpy.power(work5,-2)) omega = numpy.power(work5,-2) / sigmaSum # calculate weighted phased light curve s = omega * work4 # iterate through trial period phase for i1 in range(nbins): # iterate through transit durations for duration in range(duration1,duration2+1,int(halfHour)): # calculate maximum signal residue i2 = i1 + duration sr1 = numpy.sum(numpy.power(s[i1:i2],2)) sr2 = numpy.sum(omega[i1:i2]) sr = math.sqrt(sr1 / (sr2 * (1.0 - sr2))) if sr > srMax[-1]: srMax[-1] = sr transitDuration[-1] = float(duration) transitPhase[-1] = float((i1 + i2) / 2) # normalize maximum signal residue curve bestSr = numpy.max(srMax) bestTrial = numpy.nonzero(srMax == bestSr)[0][0] srMax /= bestSr transitDuration *= trialPeriods / 24.0 BJD0 = numpy.array(transitPhase * trialPeriods / nbins,dtype='float64') + intime[0] - 2454833.0 print '\n' # clean up x-axis unit if status == 0: ptime = copy(trialPeriods) xlab = 'Trial Period (days)' # clean up y-axis units if status == 0: pout = copy(srMax) ylab = 'Normalized Signal Residue' # data limits xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) # plot light curve if status == 0 and plot: plotLatex = True try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: plotLatex = False if status == 0 and plot: pylab.figure(figsize=[xsize,ysize]) pylab.clf() # plot data ax = pylab.axes([0.06,0.10,0.93,0.87]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90) # plot curve if status == 0 and plot: pylab.plot(ptime[1:-1],pout[1:-1],color=lcolor,linestyle='-',linewidth=lwidth) pylab.fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) pylab.xlabel(xlab, {'color' : 'k'}) pylab.ylabel(ylab, {'color' : 'k'}) pylab.grid() # plot ranges if status == 0 and plot: pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) else: pylab.ylim(1.0e-10,ymax+yr*0.01) # render plot if status == 0 and plot: if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # append new BLS data extension to the output file if status == 0: col1 = Column(name='PERIOD',format='E',unit='days',array=trialPeriods) col2 = Column(name='BJD0',format='D',unit='BJD - 2454833',array=BJD0) col3 = Column(name='DURATION',format='E',unit='hours',array=transitDuration) col4 = Column(name='SIG_RES',format='E',array=srMax) cols = ColDefs([col1,col2,col3,col4]) instr.append(new_table(cols)) instr[-1].header.cards['TTYPE1'].comment = 'column title: trial period' instr[-1].header.cards['TTYPE2'].comment = 'column title: trial mid-transit zero-point' instr[-1].header.cards['TTYPE3'].comment = 'column title: trial transit duration' instr[-1].header.cards['TTYPE4'].comment = 'column title: normalized signal residue' instr[-1].header.cards['TFORM1'].comment = 'column type: float32' instr[-1].header.cards['TFORM2'].comment = 'column type: float64' instr[-1].header.cards['TFORM3'].comment = 'column type: float32' instr[-1].header.cards['TFORM4'].comment = 'column type: float32' instr[-1].header.cards['TUNIT1'].comment = 'column units: days' instr[-1].header.cards['TUNIT2'].comment = 'column units: BJD - 2454833' instr[-1].header.cards['TUNIT3'].comment = 'column units: hours' instr[-1].header.update('EXTNAME','BLS','extension name') instr[-1].header.update('PERIOD',trialPeriods[bestTrial],'most significant trial period [d]') instr[-1].header.update('BJD0',BJD0[bestTrial] + 2454833.0,'time of mid-transit [BJD]') instr[-1].header.update('TRANSDUR',transitDuration[bestTrial],'transit duration [hours]') instr[-1].header.update('SIGNRES',srMax[bestTrial] * bestSr,'maximum signal residue') # history keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) # print best trial period results if status == 0: print ' Best trial period = %.5f days' % trialPeriods[bestTrial] print ' Time of mid-transit = BJD %.5f' % (BJD0[bestTrial] + 2454833.0) print ' Transit duration = %.5f hours' % transitDuration[bestTrial] print ' Maximum signal residue = %.4g \n' % (srMax[bestTrial] * bestSr) # end time if (status == 0): message = 'KEPBLS completed at' else: message = '\nKEPBLS aborted at' kepmsg.clock(message,logfile,verbose)
def kepfilter(infile,outfile,datacol,function,cutoff,passband,plot,plotlab, clobber,verbose,logfile,status,cmdLine=False): ## startup parameters status = 0 numpy.seterr(all="ignore") labelsize = 24 ticksize = 16 xsize = 16 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 ## log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPFILTER -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'function='+str(function)+' ' call += 'cutoff='+str(cutoff)+' ' call += 'passband='+str(passband)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' call += 'plotlab='+str(plotlab)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) ## start time kepmsg.clock('KEPFILTER started at',logfile,verbose) ## test log file logfile = kepmsg.test(logfile) ## clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPFILTER: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) ## open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence ## fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) ## read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # read time and flux columns if status == 0: barytime, status = kepio.readtimecol(infile,table,logfile,verbose) flux, status = kepio.readsapcol(infile,table,logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 for i in range(len(table.field(0))): if (numpy.isfinite(barytime[i]) and numpy.isfinite(flux[i]) and flux[i] != 0.0): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) ## read table columns if status == 0: intime, status = kepio.readtimecol(infile,instr[1].data,logfile,verbose) if status == 0: indata, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom ## define data sampling if status == 0: tr = 1.0 / (cadence / 86400) timescale = 1.0 / (cutoff / tr) ## define convolution function if status == 0: if function == 'boxcar': filtfunc = numpy.ones(numpy.ceil(timescale)) elif function == 'gauss': timescale /= 2 dx = numpy.ceil(timescale * 10 + 1) filtfunc = kepfunc.gauss() filtfunc = filtfunc([1.0,dx/2-1.0,timescale],linspace(0,dx-1,dx)) elif function == 'sinc': dx = numpy.ceil(timescale * 12 + 1) fx = linspace(0,dx-1,dx) fx = fx - dx / 2 + 0.5 fx /= timescale filtfunc = numpy.sinc(fx) filtfunc /= numpy.sum(filtfunc) ## pad time series at both ends with noise model if status == 0: ave, sigma = kepstat.stdev(indata[:len(filtfunc)]) padded = append(kepstat.randarray(np.ones(len(filtfunc)) * ave, np.ones(len(filtfunc)) * sigma), indata) ave, sigma = kepstat.stdev(indata[-len(filtfunc):]) padded = append(padded, kepstat.randarray(np.ones(len(filtfunc)) * ave, np.ones(len(filtfunc)) * sigma)) ## convolve data if status == 0: convolved = convolve(padded,filtfunc,'same') ## remove padding from the output array if status == 0: if function == 'boxcar': outdata = convolved[len(filtfunc):-len(filtfunc)] else: outdata = convolved[len(filtfunc):-len(filtfunc)] ## subtract low frequencies if status == 0 and passband == 'high': outmedian = median(outdata) outdata = indata - outdata + outmedian ## comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) ## clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 ## clean up y-axis units if status == 0: pout = indata * 1.0 pout2 = outdata * 1.0 nrm = len(str(int(numpy.nanmax(pout))))-1 pout = pout / 10**nrm pout2 = pout2 / 10**nrm ylab = '10$^%d$ %s' % (nrm, plotlab) ## data limits xmin = ptime.min() xmax = ptime.max() ymin = numpy.nanmin(pout) ymax = numpy.nanmax(pout) xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) pout2 = insert(pout2,[0],[0.0]) pout2 = append(pout2,0.0) ## plot light curve if status == 0 and plot: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: print('ERROR -- KEPFILTER: install latex for scientific plotting') status = 1 if status == 0 and plot: pylab.figure(figsize=[xsize,ysize]) pylab.clf() ## plot filtered data ax = pylab.axes([0.06,0.1,0.93,0.87]) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime,pout,color='#ff9900',linestyle='-',linewidth=lwidth) fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) if passband == 'low': pylab.plot(ptime[1:-1],pout2[1:-1],color=lcolor,linestyle='-',linewidth=lwidth) else: pylab.plot(ptime,pout2,color=lcolor,linestyle='-',linewidth=lwidth) fill(ptime,pout2,color=lcolor,linewidth=0.0,alpha=falpha) xlabel(xlab, {'color' : 'k'}) ylabel(ylab, {'color' : 'k'}) xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) else: ylim(1.0e-10,ymax+yr*0.01) pylab.grid() # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() ## write output file if status == 0: for i in range(len(outdata)): instr[1].data.field(datacol)[i] = outdata[i] instr.writeto(outfile) ## close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPFILTER completed at' else: message = '\nKEPFILTER aborted at' kepmsg.clock(message,logfile,verbose)
def kepbls(infile, outfile, datacol, errcol, minper, maxper, mindur, maxdur, nsearch, nbins, plot, clobber, verbose, logfile, status, cmdLine=False): # startup parameters numpy.seterr(all="ignore") status = 0 labelsize = 32 ticksize = 18 xsize = 16 ysize = 8 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPBLS -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'datacol=' + str(datacol) + ' ' call += 'errcol=' + str(errcol) + ' ' call += 'minper=' + str(minper) + ' ' call += 'maxper=' + str(maxper) + ' ' call += 'mindur=' + str(mindur) + ' ' call += 'maxdur=' + str(maxdur) + ' ' call += 'nsearch=' + str(nsearch) + ' ' call += 'nbins=' + str(nbins) + ' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot=' + plotit + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPBLS started at', logfile, verbose) # is duration greater than one bin in the phased light curve? if float(nbins) * maxdur / 24.0 / maxper <= 1.0: message = 'WARNING -- KEPBLS: ' + str( maxdur) + ' hours transit duration < 1 phase bin when P = ' message += str(maxper) + ' days' kepmsg.warn(logfile, message) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPBLS: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile, message, verbose) # open input file if status == 0: instr, status = kepio.openfits(infile, 'readonly', logfile, verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys( instr, infile, logfile, verbose, status) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr, file, logfile, verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile, instr[1], logfile, verbose) # filter input data table if status == 0: work1 = numpy.array( [table.field('time'), table.field(datacol), table.field(errcol)]) work1 = numpy.rot90(work1, 3) work1 = work1[~numpy.isnan(work1).any(1)] # read table columns if status == 0: intime = work1[:, 2] + bjdref indata = work1[:, 1] inerr = work1[:, 0] # test whether the period range is sensible if status == 0: tr = intime[-1] - intime[0] if maxper > tr: message = 'ERROR -- KEPBLS: maxper is larger than the time range of the input data' status = kepmsg.err(logfile, message, verbose) # prepare time series if status == 0: work1 = intime - intime[0] work2 = indata - numpy.mean(indata) # start period search if status == 0: srMax = numpy.array([], dtype='float32') transitDuration = numpy.array([], dtype='float32') transitPhase = numpy.array([], dtype='float32') dPeriod = (maxper - minper) / nsearch trialPeriods = numpy.arange(minper, maxper + dPeriod, dPeriod, dtype='float32') complete = 0 print ' ' for trialPeriod in trialPeriods: fracComplete = float(complete) / float(len(trialPeriods) - 1) * 100.0 txt = '\r' txt += 'Trial period = ' txt += str(int(trialPeriod)) txt += ' days [' txt += str(int(fracComplete)) txt += '% complete]' txt += ' ' * 20 sys.stdout.write(txt) sys.stdout.flush() complete += 1 srMax = numpy.append(srMax, 0.0) transitDuration = numpy.append(transitDuration, numpy.nan) transitPhase = numpy.append(transitPhase, numpy.nan) trialFrequency = 1.0 / trialPeriod # minimum and maximum transit durations in quantized phase units duration1 = max(int(float(nbins) * mindur / 24.0 / trialPeriod), 2) duration2 = max( int(float(nbins) * maxdur / 24.0 / trialPeriod) + 1, duration1 + 1) # 30 minutes in quantized phase units halfHour = int(0.02083333 / trialPeriod * nbins + 1) # compute folded time series with trial period work4 = numpy.zeros((nbins), dtype='float32') work5 = numpy.zeros((nbins), dtype='float32') phase = numpy.array( ((work1 * trialFrequency) - numpy.floor(work1 * trialFrequency)) * float(nbins), dtype='int') ptuple = numpy.array([phase, work2, inerr]) ptuple = numpy.rot90(ptuple, 3) phsort = numpy.array(sorted(ptuple, key=lambda ph: ph[2])) for i in range(nbins): elements = numpy.nonzero(phsort[:, 2] == float(i))[0] work4[i] = numpy.mean(phsort[elements, 1]) work5[i] = math.sqrt( numpy.sum(numpy.power(phsort[elements, 0], 2)) / len(elements)) # extend the work arrays beyond nbins by wrapping work4 = numpy.append(work4, work4[:duration2]) work5 = numpy.append(work5, work5[:duration2]) # calculate weights of folded light curve points sigmaSum = numpy.nansum(numpy.power(work5, -2)) omega = numpy.power(work5, -2) / sigmaSum # calculate weighted phased light curve s = omega * work4 # iterate through trial period phase for i1 in range(nbins): # iterate through transit durations for duration in range(duration1, duration2 + 1, int(halfHour)): # calculate maximum signal residue i2 = i1 + duration sr1 = numpy.sum(numpy.power(s[i1:i2], 2)) sr2 = numpy.sum(omega[i1:i2]) sr = math.sqrt(sr1 / (sr2 * (1.0 - sr2))) if sr > srMax[-1]: srMax[-1] = sr transitDuration[-1] = float(duration) transitPhase[-1] = float((i1 + i2) / 2) # normalize maximum signal residue curve bestSr = numpy.max(srMax) bestTrial = numpy.nonzero(srMax == bestSr)[0][0] srMax /= bestSr transitDuration *= trialPeriods / 24.0 BJD0 = numpy.array(transitPhase * trialPeriods / nbins, dtype='float64') + intime[0] - 2454833.0 print '\n' # clean up x-axis unit if status == 0: ptime = copy(trialPeriods) xlab = 'Trial Period (days)' # clean up y-axis units if status == 0: pout = copy(srMax) ylab = 'Normalized Signal Residue' # data limits xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime, [0], [ptime[0]]) ptime = append(ptime, [ptime[-1]]) pout = insert(pout, [0], [0.0]) pout = append(pout, 0.0) # plot light curve if status == 0 and plot: plotLatex = True try: params = { 'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize } rcParams.update(params) except: plotLatex = False if status == 0 and plot: pylab.figure(figsize=[xsize, ysize]) pylab.clf() # plot data ax = pylab.axes([0.06, 0.10, 0.93, 0.87]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90) # plot curve if status == 0 and plot: pylab.plot(ptime[1:-1], pout[1:-1], color=lcolor, linestyle='-', linewidth=lwidth) pylab.fill(ptime, pout, color=fcolor, linewidth=0.0, alpha=falpha) pylab.xlabel(xlab, {'color': 'k'}) pylab.ylabel(ylab, {'color': 'k'}) pylab.grid() # plot ranges if status == 0 and plot: pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin >= 0.0: pylab.ylim(ymin - yr * 0.01, ymax + yr * 0.01) else: pylab.ylim(1.0e-10, ymax + yr * 0.01) # render plot if status == 0 and plot: if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # append new BLS data extension to the output file if status == 0: col1 = Column(name='PERIOD', format='E', unit='days', array=trialPeriods) col2 = Column(name='BJD0', format='D', unit='BJD - 2454833', array=BJD0) col3 = Column(name='DURATION', format='E', unit='hours', array=transitDuration) col4 = Column(name='SIG_RES', format='E', array=srMax) cols = ColDefs([col1, col2, col3, col4]) instr.append(new_table(cols)) instr[-1].header.cards['TTYPE1'].comment = 'column title: trial period' instr[-1].header.cards[ 'TTYPE2'].comment = 'column title: trial mid-transit zero-point' instr[-1].header.cards[ 'TTYPE3'].comment = 'column title: trial transit duration' instr[-1].header.cards[ 'TTYPE4'].comment = 'column title: normalized signal residue' instr[-1].header.cards['TFORM1'].comment = 'column type: float32' instr[-1].header.cards['TFORM2'].comment = 'column type: float64' instr[-1].header.cards['TFORM3'].comment = 'column type: float32' instr[-1].header.cards['TFORM4'].comment = 'column type: float32' instr[-1].header.cards['TUNIT1'].comment = 'column units: days' instr[-1].header.cards[ 'TUNIT2'].comment = 'column units: BJD - 2454833' instr[-1].header.cards['TUNIT3'].comment = 'column units: hours' instr[-1].header.update('EXTNAME', 'BLS', 'extension name') instr[-1].header.update('PERIOD', trialPeriods[bestTrial], 'most significant trial period [d]') instr[-1].header.update('BJD0', BJD0[bestTrial] + 2454833.0, 'time of mid-transit [BJD]') instr[-1].header.update('TRANSDUR', transitDuration[bestTrial], 'transit duration [hours]') instr[-1].header.update('SIGNRES', srMax[bestTrial] * bestSr, 'maximum signal residue') # history keyword in output file if status == 0: status = kepkey.history(call, instr[0], outfile, logfile, verbose) instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr, logfile, verbose) # print best trial period results if status == 0: print ' Best trial period = %.5f days' % trialPeriods[bestTrial] print ' Time of mid-transit = BJD %.5f' % (BJD0[bestTrial] + 2454833.0) print ' Transit duration = %.5f hours' % transitDuration[ bestTrial] print ' Maximum signal residue = %.4g \n' % (srMax[bestTrial] * bestSr) # end time if (status == 0): message = 'KEPBLS completed at' else: message = '\nKEPBLS aborted at' kepmsg.clock(message, logfile, verbose)
def keptransit(inputfile,outputfile,datacol,errorcol,periodini_d,rprsini,T0ini, Eccini,arsini,incini,omegaini,LDparams,secini,fixperiod,fixrprs,fixT0, fixEcc,fixars,fixinc,fixomega,fixsec,fixfluxoffset,removeflaggeddata,ftol=0.0001,fitter='nothing',norm=False, clobber=False, plot=True,verbose=0,logfile='logfile.dat',status=0,cmdLine=False): """ tmod.lightcurve(xdata,period,rprs,T0,Ecc,ars, incl, omega, ld, sec) input transit parameters are Period in days T0 rplanet / rstar a / rstar inclination limb darkening code number: 0 = uniform 1 = linear 2 = quadratic 3 = square root 4 = non linear LDarr: u -- linear limb-darkening (set NL=1) a, b -- quadratic limb-darkening (set NL=2) c, d -- root-square limb-darkening (set NL= -2) a1, a2, a3, a4 -- nonlinear limb-darkening (set NL=4) Nothing at all -- uniform limb-darkening (set NL=0) """ np.seterr(all="ignore") #write to a logfile hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPTRANSIT -- ' call += 'inputfile='+inputfile+' ' call += 'outputfile='+outputfile+' ' call += 'datacol='+str(datacol)+' ' call += 'errorcol='+str(errorcol)+' ' call += 'periodini_d='+str(periodini_d)+' ' call += 'rprsini='+str(rprsini)+' ' call += 'T0ini='+str(T0ini)+' ' call += 'Eccini='+str(Eccini)+' ' call += 'arsini='+str(arsini)+' ' call += 'incini='+str(incini)+' ' call += 'omegaini='+str(omegaini)+' ' call += 'LDparams='+str(LDparams)+' ' call += 'secini='+str(secini)+' ' call += 'fixperiod='+str(fixperiod)+' ' call += 'fixrprs='+str(fixrprs)+' ' call += 'fixT0='+str(fixT0)+' ' call += 'fixEcc='+str(fixEcc)+' ' call += 'fixars='+str(fixars)+' ' call += 'fixinc='+str(fixinc)+' ' call += 'fixomega='+str(fixomega)+' ' call += 'fixsec='+str(fixsec)+' ' call += 'fixfluxoffset='+str(fixfluxoffset)+' ' call += 'removeflaggeddata='+str(removeflaggeddata)+' ' call += 'ftol='+str(ftol)+' ' call += 'fitter='+str(fitter)+' ' call += 'norm='+str(norm)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' #chatter = 'n' #if (verbose): chatter = 'y' #call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) kepmsg.clock('KEPTRANSIT started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outputfile,logfile,verbose) if kepio.fileexists(outputfile): message = 'ERROR -- KEPTRANSIT: ' + outputfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(inputfile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr, inputfile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(inputfile,instr[1],logfile,verbose) if status == 0: intime_o = table.field('time') influx_o = table.field(datacol) inerr_o = table.field(errorcol) try: qualflag = table.field('SAP_QUALITY') except: qualflag = np.zeros(len(intime_o)) if status == 0: intime, indata, inerr, baddata = cutBadData(intime_o, influx_o, inerr_o,removeflaggeddata,qualflag) if status == 0 and norm: #first remove outliers before normalizing threesig = 3.* np.std(indata) mask = np.logical_and(indata< indata + threesig,indata > indata - threesig) #now normalize indata = indata / np.median(indata[mask]) if status == 0: #need to check if LD params are sensible and in right format LDparams = [float(i) for i in LDparams.split()] incini = incini * np.pi / 180. omegaini = omegaini * np.pi / 180. if arsini*np.cos(incini) > 1.0 + rprsini: message = 'The guess inclination and a/r* values result in a non-transing planet' status = kepmsg.err(logfile,message,verbose) if status == 0: fixed_dict = fix_params(fixperiod,fixrprs,fixT0, fixEcc,fixars,fixinc,fixomega,fixsec,fixfluxoffset) #force flux offset to be guessed at zero fluxoffsetini = 0.0 if status == 0: guess_params = [periodini_d,rprsini,T0ini,Eccini,arsini, incini, omegaini, secini,fluxoffsetini] print('cleaning done: about to fit transit') if fitter == 'leastsq': fit_output = leastsq(fit_tmod,guess_params, args=[LDparams,intime,indata,inerr,fixed_dict,guess_params], full_output=True,ftol=ftol) elif fitter == 'fmin': fit_output = fmin(fit_tmod2,guess_params, args=[LDparams,intime,indata,inerr,fixed_dict,guess_params], full_output=True,ftol=ftol,xtol=ftol) elif fitter == 'anneal': fit_output = anneal(fit_tmod2,guess_params, args=[LDparams,intime,indata,inerr,fixed_dict,guess_params], full_output=True) if status == 0: if fixed_dict['period'] == True: newperiod = guess_params[0] print('Fixed period (days) = ' + str(newperiod)) else: newperiod = fit_output[0][0] print('Fit period (days) = ' + str(newperiod)) if fixed_dict['rprs'] == True: newrprs = guess_params[1] print('Fixed R_planet / R_star = ' + str(newrprs)) else: newrprs = fit_output[0][1] print('Fit R_planet / R_star = ' + str(newrprs)) if fixed_dict['T0'] == True: newT0 = guess_params[2] print('Fixed T0 (BJD) = ' + str(newT0)) else: newT0 = fit_output[0][2] print('Fit T0 (BJD) = ' + str(newT0)) if fixed_dict['Ecc'] == True: newEcc = guess_params[3] print('Fixed eccentricity = ' + str(newEcc)) else: newEcc = fit_output[0][3] print('Fit eccentricity = ' + str(newEcc)) if fixed_dict['ars'] == True: newars = guess_params[4] print('Fixed a / R_star = ' + str(newars)) else: newars = fit_output[0][4] print('Fit a / R_star = ' + str(newars)) if fixed_dict['inc'] == True: newinc = guess_params[5] print('Fixed inclination (deg) = ' + str(newinc* 180. / np.pi)) else: newinc = fit_output[0][5] print('Fit inclination (deg) = ' + str(newinc* 180. / np.pi)) if fixed_dict['omega'] == True: newomega = guess_params[6] print('Fixed omega = ' + str(newomega)) else: newomega = fit_output[0][6] print('Fit omega = ' + str(newomega)) if fixed_dict['sec'] == True: newsec = guess_params[7] print('Fixed seconary eclipse depth = ' + str(newsec)) else: newsec = fit_output[0][7] print('Fit seconary eclipse depth = ' + str(newsec)) if fixfluxoffset == False: newfluxoffset = fit_output[0][8] print('Fit flux offset = ' + str(newfluxoffset)) modelfit = tmod.lightcurve(intime,newperiod,newrprs,newT0,newEcc, newars,newinc,newomega,LDparams,newsec) if fixfluxoffset == False: modelfit += newfluxoffset #output to a file phi, fluxfold, modelfold, errorfold, phiNotFold = fold_data(intime, modelfit,indata,inerr,newperiod,newT0) make_outfile(instr,outputfile,phiNotFold,modelfit, baddata) # end time if (status == 0): message = 'KEPTRANSIT completed at' else: message = '\nKEPTRANSIT aborted at' kepmsg.clock(message,logfile,verbose) if plot and status == 0: do_plot(intime,modelfit,indata,inerr,newperiod,newT0,cmdLine)
def kepdip(infile,outfile,datacol,dmethod,kneighb,hstd,plot,plotlab, clobber,verbose,logfile,status): """ Perform a k-nearest neighbor regression analysis. """ ## startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 16 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#9AFF9A' falpha = 0.3 ## log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPDIP -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'dmethod='+dmethod+' ' call += 'hstd='+str(hstd)+' ' call += 'kneighb='+str(kneighb)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' call += 'plotlab='+str(plotlab)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) ## start time kepmsg.clock('KEPDIP started at',logfile,verbose) ## test log file logfile = kepmsg.test(logfile) ## clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPDIP: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) ## open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if cadence == 0.0: tstart, tstop, ncad, cadence, status = kepio.cadence(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence ## fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) ## read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # read time and flux columns if status == 0: barytime, status = kepio.readtimecol(infile,table,logfile,verbose) if status == 0: flux, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 for i in range(len(table.field(0))): if (numpy.isfinite(barytime[i]) and numpy.isfinite(flux[i]) and flux[i] != 0.0): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) ## read table columns if status == 0: try: intime = instr[1].data.field('barytime') except: intime, status = kepio.readfitscol(infile,instr[1].data,'time',logfile,verbose) indata, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom ## smooth data if status == 0: # outdata = knn_predict(intime, indata, kmethod, kneighb) outdata_t, outdata_l, outdata_fmt = _find_dips(intime, indata, dmethod, kneighb, hstd) ## comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) ## clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 ptime2 = outdata_t - intime0 # print ptime,intime,intime0 xlab = 'BJD $-$ %d' % intime0 ## clean up y-axis units if status == 0: pout = indata * 1.0 pout2 = outdata_l * 1.0 nrm = len(str(int(numpy.nanmax(pout))))-1 pout = pout / 10**nrm pout2 = pout2 / 10**nrm ylab = '10$^%d$ %s' % (nrm, plotlab) ## data limits xmin = numpy.nanmin(ptime) xmax = numpy.nanmax(ptime) ymin = numpy.min(pout) ymax = numpy.nanmax(pout) xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) if (len(ptime2) > 0): ptime2 = insert(ptime2,[0],[ptime2[0]]) ptime2 = append(ptime2,[ptime2[-1]]) pout2 = insert(pout2,[0],[0.0]) pout2 = append(pout2,0.0) ## plot light curve if status == 0 and plot: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: print('ERROR -- KEPDIP: install latex for scientific plotting') status = 1 if status == 0 and plot: pylab.figure(1,figsize=[xsize,ysize]) ## plot regression data ax = pylab.axes([0.06,0.1,0.93,0.87]) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.scatter(ptime, pout, color='#214CAE', s=2) if (len(ptime2) > 0): pylab.scatter(ptime2, pout2, color='#47AE10', s=35, marker='o', linewidths=2, alpha=0.4) xlabel(xlab, {'color' : 'k'}) ylabel(ylab, {'color' : 'k'}) xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) else: ylim(1.0e-10,ymax+yr*0.01) pylab.grid() pylab.draw() pylab.savefig(re.sub('\.\S+','.png',outfile),dpi=100) ## write output file if status == 0: for i in range(len(outdata_fmt)): instr[1].data.field(datacol)[i] = outdata_fmt[i] instr.writeto(outfile) ## close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPDIP completed at' else: message = '\nKEPDIP aborted at' kepmsg.clock(message,logfile,verbose)
def kepdraw(infile,outfile,datacol,ploterr,errcol,quality, lcolor,lwidth,fcolor,falpha,labelsize,ticksize, xsize,ysize,fullrange,chooserange,y1,y2,plotgrid, ylabel,plottype,verbose,logfile,status,cmdLine=False): # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPDRAW -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+datacol+' ' perr = 'n' if (ploterr): perr = 'y' call += 'ploterr='+perr+ ' ' call += 'errcol='+errcol+' ' qual = 'n' if (quality): qual = 'y' call += 'quality='+qual+ ' ' call += 'lcolor='+str(lcolor)+' ' call += 'lwidth='+str(lwidth)+' ' call += 'fcolor='+str(fcolor)+' ' call += 'falpha='+str(falpha)+' ' call += 'labelsize='+str(labelsize)+' ' call += 'ticksize='+str(ticksize)+' ' call += 'xsize='+str(xsize)+' ' call += 'ysize='+str(ysize)+' ' frange = 'n' if (fullrange): frange = 'y' call += 'fullrange='+frange+ ' ' crange = 'n' if (chooserange): crange = 'y' call += 'chooserange='+crange+ ' ' call += 'ymin='+str(y1)+' ' call += 'ymax='+str(y2)+' ' pgrid = 'n' if (plotgrid): pgrid = 'y' call += 'plotgrid='+pgrid+ ' ' call += 'ylabel='+str(ylabel)+' ' call += 'plottype='+plottype+' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPDRAW started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # open input file if status == 0: struct, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(struct,infile,logfile,verbose,status) # read table structure if status == 0: table, status = kepio.readfitstab(infile,struct[1],logfile,verbose) # read table columns if status == 0: intime, status = kepio.readtimecol(infile,table,logfile,verbose) intime += bjdref indata, status = kepio.readfitscol(infile,table,datacol,logfile,verbose) indataerr, status = kepio.readfitscol(infile,table,errcol,logfile,verbose) qualty, status = kepio.readfitscol(infile,table,'SAP_QUALITY',logfile,verbose) # close infile if status == 0: status = kepio.closefits(struct,logfile,verbose) # remove infinities and bad data if status == 0: if numpy.isnan(numpy.nansum(indataerr)): indataerr[:] = 1.0e-5 work1 = numpy.array([intime, indata, indataerr, qualty],dtype='float64') work1 = numpy.rot90(work1,3) work1 = work1[~numpy.isnan(work1).any(1)] work1 = work1[~numpy.isinf(work1).any(1)] if quality: work1 = work1[work1[:,0] == 0.0] barytime = numpy.array(work1[:,3],dtype='float64') data = numpy.array(work1[:,2],dtype='float32') dataerr = numpy.array(work1[:,1],dtype='float32') if len(barytime) == 0: message = 'ERROR -- KEPDRAW: Plotting arrays are full of NaN' status = kepmsg.err(logfile,message,verbose) # clean up x-axis unit if status == 0: barytime0 = float(int(tstart / 100) * 100.0) barytime -= barytime0 xlab = 'BJD $-$ %d' % barytime0 # clean up y-axis units nrm = 0 try: nrm = len(str(int(numpy.nanmax(data))))-1 except: nrm = 0 data = data / 10**nrm if 'e$^-$ s$^{-1}$' in ylabel or 'default' in ylabel: if nrm == 0: ylab1 = 'e$^-$ s$^{-1}$' else: ylab1 = '10$^{%d}$ e$^-$ s$^{-1}$' % nrm else: ylab1 = re.sub('_','-',ylabel) # data limits xmin = numpy.nanmin(barytime) xmax = numpy.nanmax(barytime) ymin = numpy.nanmin(data) ymax = numpy.nanmax(data) xr = xmax - xmin yr = ymax - ymin barytime = insert(barytime,[0],[barytime[0]]) barytime = append(barytime,[barytime[-1]]) data = insert(data,[0],[-10000.0]) data = append(data,-10000.0) # define plot formats try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} pylab.rcParams.update(params) except: pass # define size of plot on monitor screen pylab.figure(figsize=[xsize,ysize]) # delete any fossil plots in the matplotlib window pylab.clf() # position axes inside the plotting window # ax = pylab.axes([0.1,0.11,0.89,0.87]) ax = pylab.subplot(111) pylab.subplots_adjust(0.06,0.15,0.92,0.83) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) ax.yaxis.set_major_locator(MaxNLocator(5)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=ticksize) # if plot type is 'fast' plot data time series as points if plottype == 'fast': pylab.plot(barytime,data,'o',color=lcolor) # if plot type is 'pretty' plot data time series as an unbroken line, retaining data gaps else: ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for i in range(1,len(data)-1): dt = barytime[i] - barytime[i-1] if dt < work1: ltime = numpy.append(ltime,barytime[i]) ldata = numpy.append(ldata,data[i]) else: pylab.plot(ltime,ldata,color=lcolor,linestyle='-',linewidth=lwidth) ltime = numpy.array([],dtype='float64') ldata = numpy.array([],dtype='float32') pylab.plot(ltime,ldata,color=lcolor,linestyle='-',linewidth=lwidth) # plot the fill color below data time series, with no data gaps pylab.fill(barytime,data,fc=fcolor,linewidth=0.0,alpha=falpha) # define plot x and y limits pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin-yr*0.01 <= 0.0 or fullrange: pylab.ylim(1.0e-10,ymax+yr*0.01) else: pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) if chooserange: pylab.ylim(y1,y2) # plot labels pylab.xlabel(xlab, {'color' : 'k'}) try: pylab.ylabel(ylab1, {'color' : 'k'}) except: ylab1 = '10**%d e-/s' % nrm pylab.ylabel(ylab1, {'color' : 'k'}) # make grid on plot # if plotgrid: pylab.grid() # TEMPORARY !!!!!!!!!!!!!!!!!!! # btime = numpy.arange(barytime[0],barytime[-1],0.25) + 0.125 # bflux = numpy.zeros((len(btime))) # j = 0 # work = numpy.array([]) # for i in range(1,len(barytime)-1): # if barytime[i] >= btime[j] - 0.125 and barytime[i] < btime[j] + 0.125: # work = numpy.append(work,data[i]) # else: # bflux[j] = numpy.mean(work) # work = numpy.array([]) # j += 1 # bflux[j] = numpy.mean(work) # # pylab.plot(btime,bflux,color='r',linestyle='',marker='D',markersize=20) # print numpy.std(bflux) # # pylab.plot([0.0,10000.0],[-49.5,-49.5],color='k',linestyle='--',linewidth=2.0) # pylab.plot([0.0,10000.0],[49.5,49.5],color='k',linestyle='--',linewidth=2.0) ## pylab.plot([0.0,10000.0],[15.5,15.5],color='k',linestyle=':',linewidth=4.0) ## pylab.plot([0.0,10000.0],[-15.5,-15.5],color='k',linestyle=':',linewidth=4.0) ## pylab.plot([0.0,10000.0],[-202,-202],color='k',linestyle='--',linewidth=2.0) ## pylab.plot([0.0,10000.0],[202,202],color='k',linestyle='--',linewidth=2.0) ## pylab.plot([0.0,10000.0],[0,0],color='k',linestyle=':',linewidth=4.0) ## pylab.plot([0.0,10000.0],[-81.*12.3,-81.*12.3],color='k',linestyle=':',linewidth=4.0) ax.minorticks_on() ax.tick_params('both', length=20, width=2, which='major') ax.tick_params('both', length=10, width=1, which='minor') # save plot to file if status == 0 and outfile.lower() != 'none': pylab.savefig(outfile) # render plot if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # end time if (status == 0): message = 'KEPDRAW completed at' else: message = '\nKEPDRAW aborted at' kepmsg.clock(message,logfile,verbose)
def keptest(infile,outfile,datacol,ploterr,errcol,quality, lcolor,lwidth,fcolor,falpha,labelsize,ticksize, xsize,ysize,fullrange,plotgrid,verbose,logfile,status): # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPTEST -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+datacol+' ' perr = 'n' if (ploterr): perr = 'y' call += 'ploterr='+perr+ ' ' call += 'errcol='+errcol+' ' qual = 'n' if (quality): qual = 'y' call += 'quality='+qual+ ' ' call += 'lcolor='+str(lcolor)+' ' call += 'lwidth='+str(lwidth)+' ' call += 'fcolor='+str(fcolor)+' ' call += 'falpha='+str(falpha)+' ' call += 'labelsize='+str(labelsize)+' ' call += 'ticksize='+str(ticksize)+' ' call += 'xsize='+str(xsize)+' ' call += 'ysize='+str(ysize)+' ' frange = 'n' if (fullrange): frange = 'y' call += 'fullrange='+frange+ ' ' pgrid = 'n' if (plotgrid): pgrid = 'y' call += 'plotgrid='+pgrid+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPTEST started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # open input file if status == 0: struct, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(struct,infile,logfile,verbose,status) # read table structure if status == 0: table, status = kepio.readfitstab(infile,struct[1],logfile,verbose) # read table columns if status == 0: intime, status = kepio.readtimecol(infile,table,logfile,verbose) #intime += bjdref indata, status = kepio.readfitscol(infile,table,datacol,logfile,verbose) if (ploterr): indataerr, status = kepio.readfitscol(infile,table,errcol,logfile,verbose) if status == 0: gaps = zeros(len(indata)) # read table quality column if status == 0 and quality: try: qualtest = table.field('SAP_QUALITY') except: message = 'ERROR -- KEPTEST: no SAP_QUALITY column found in file ' + infile message += '. Use keptest quality=n' status = kepmsg.err(logfile,message,verbose) if status == 0 and quality: gaps, status = kepio.readfitscol(infile,table,'SAP_QUALITY',logfile,verbose) # close infile if status == 0: status = kepio.closefits(struct,logfile,verbose) # remove infinities and bad data if status == 0: barytime = []; data = []; dataerr = [] if 'ap_raw' in datacol or 'ap_corr' in datacol: cadenom = cadence else: cadenom = 1.0 for i in range(len(intime)): if numpy.isfinite(indata[i]) and indata[i] != 0.0 and gaps[i] == 0: barytime.append(intime[i]) data.append(indata[i] / cadenom) if (ploterr): dataerr.append(indataerr[i]) barytime = numpy.array(barytime,dtype='float64') data = numpy.array(data,dtype='float64') if (ploterr): dataerr = numpy.array(dataerr,dtype='float64') # clean up x-axis unit if status == 0: barytime0 = float(int(tstart / 100) * 100.0) barytime -= barytime0 xlab = 'BJD $-$ %d' % barytime0 # clean up y-axis units try: nrm = len(str(int(data.max())))-1 except: nrm = 0 data = data / 10**nrm ylab1 = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits xmin = barytime.min() xmax = barytime.max() ymin = data.min() ymax = data.max() xr = xmax - xmin yr = ymax - ymin data[0] = ymin - yr * 2.0 data[-1] = ymin - yr * 2.0 if fullrange: data[0] = 0.0 data[-1] = 0.0 # define plot formats try: rc('text', usetex=True) rc('font',**{'family':'sans-serif','sans-serif':['sans-serif']}) params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} pylab.rcParams.update(params) except: pass # define size of plot on monitor screen pylab.figure(figsize=[xsize,ysize]) # delete any fossil plots in the matplotlib window pylab.clf() # position axes inside the plotting window ax = pylab.axes([0.06,0.1,0.93,0.88]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) # plot data time series as an unbroken line, retaining data gaps ltime = []; ldata = []; ldataerr = []; ldatagaps = [] dt = 0 # SVR svr_rbf = SVR(kernel='rbf', C=1, gamma=0.1) svr_lin = SVR(kernel='linear', C=1) svr_poly = SVR(kernel='poly', C=1, degree=2) svr_ltime = []; svr_ldata = [] for i in range(len(indata)): if i > 0: if numpy.isfinite(indata[i]) and indata[i] != 0.0 : # print intime[i], " ", indata[i] ltime.append(intime[i]) ldata.append(indata[i]) svr_ltime.append([intime[i]]) ltime = array(ltime, dtype=float64) ldata = array(ldata, dtype=float64) if len(ldata) > 0 and len(ltime) > 0 : pylab.scatter (ltime, ldata, s=1, color=lcolor, label='Data:Input lightcurve') svr_ltime = array(svr_ltime, dtype='float64') svr_ldata = array(ldata, dtype='float64') svr_ldata_rbf = svr_rbf.fit(svr_ltime, svr_ldata).predict(svr_ltime) ## Get the transits! # Identify the difference of data min. and the regression line # = An approximate initial dip value. ldata_min = min(ldata) ldata_min_i = ldata.tolist().index(ldata_min) fluxdip = svr_ldata_rbf[ldata_min_i] - ldata_min # fluxthresh = (svr_ldata_rbf[ldata_min_i] + ldata_min ) / 2.0 print "ldata min = ", ldata_min, "fluxdip =", fluxdip thresh_x = []; thresh_y = []; # Sequentially scan the inputs, look for y-points below the # initial mean. Group the points i = 0 while i < len(ldata): # print intime[i], " ", indata[i] fluxmin = fluxthresh = svr_ldata_rbf[i] - fluxdip/2.0 if ldata[i] < fluxthresh: thresh_y.append(fluxthresh); thresh_x.append(ltime[i]) # Identify the local min, calculate difference with regression line. while i < len(ldata) and ldata[i] < fluxthresh : if ldata[i] < fluxmin: fluxmin = ldata[i] fluxmin_i = i i += 1 # We got the local min, now plot the line, # converge the dip value with the newly calculated one. pylab.plot([ ltime[fluxmin_i], ltime[fluxmin_i] ], [ ldata[fluxmin_i], svr_ldata_rbf[fluxmin_i] ], 'r-', linewidth=1) fluxdip = (fluxdip + svr_ldata_rbf[fluxmin_i] - fluxmin)/2.0 i += 1 pylab.plot(thresh_x, thresh_y, c='c', label='Adapted transit threshold') pylab.scatter(thresh_x, thresh_y, c='k', s=1) pylab.plot(svr_ltime, svr_ldata_rbf, c='g', label='Cum. RBF model') if (ploterr): ldataerr = numpy.array(ldataerr,dtype='float32') # plot labels pylab.xlabel(xlab, {'color' : 'k'}) try: pylab.ylabel(ylab1, {'color' : 'k'}) except: ylab1 = '10**%d e-/s' % nrm pylab.ylabel(ylab1, {'color' : 'k'}) # make grid on plot if plotgrid: pylab.grid() # paint plot into window pylab.legend() pylab.draw() # save plot to file if status == 0 and outfile.lower() != 'none': pylab.savefig(outfile)
def kepsmooth(infile,outfile,datacol,function,fscale,plot,plotlab, clobber,verbose,logfile,status, cmdLine=False): ## startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 18 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 ## log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPSMOOTH -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'function='+str(function)+' ' call += 'fscale='+str(fscale)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' call += 'plotlab='+str(plotlab)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) ## start time kepmsg.clock('KEPSMOOTH started at',logfile,verbose) ## test log file logfile = kepmsg.test(logfile) ## clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPSMOOTH: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) ## open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if cadence == 0.0: tstart, tstop, ncad, cadence, status = kepio.cadence(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence ## fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) ## read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # read time and flux columns if status == 0: barytime, status = kepio.readtimecol(infile,table,logfile,verbose) if status == 0: flux, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 for i in range(len(table.field(0))): if (numpy.isfinite(barytime[i]) and numpy.isfinite(flux[i]) and flux[i] != 0.0): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) ## read table columns if status == 0: try: intime = instr[1].data.field('barytime') except: intime, status = kepio.readfitscol(infile,instr[1].data,'time',logfile,verbose) indata, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom ## smooth data if status == 0: outdata = kepfunc.smooth(indata,fscale/(cadence/86400),function) ## comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) ## clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 ## clean up y-axis units if status == 0: pout = indata * 1.0 pout2 = outdata * 1.0 nrm = len(str(int(numpy.nanmax(pout))))-1 pout = pout / 10**nrm pout2 = pout2 / 10**nrm ylab = '10$^%d$ %s' % (nrm, re.sub('_','-',plotlab)) ## data limits xmin = numpy.nanmin(ptime) xmax = numpy.nanmax(ptime) ymin = numpy.min(pout) ymax = numpy.nanmax(pout) xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) pout2 = insert(pout2,[0],[0.0]) pout2 = append(pout2,0.0) ## plot light curve if status == 0 and plot: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: print 'ERROR -- KEPSMOOTH: install latex for scientific plotting' status = 1 if status == 0 and plot: pylab.figure(1,figsize=[xsize,ysize]) # delete any fossil plots in the matplotlib window pylab.clf() # position axes inside the plotting window ax = pylab.subplot(111) pylab.subplots_adjust(0.06,0.1,0.93,0.88) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, 'rotation', 90) pylab.plot(ptime[1:-1],pout[1:-1],color='#ff9900',linestyle='-',linewidth=lwidth) fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) pylab.plot(ptime,pout2,color=lcolor,linestyle='-',linewidth=lwidth*4.0) pylab.xlabel(xlab, {'color' : 'k'}) pylab.ylabel(ylab, {'color' : 'k'}) xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) else: ylim(1.0e-10,ymax+yr*0.01) pylab.grid() # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() ## write output file if status == 0: for i in range(len(outdata)): instr[1].data.field(datacol)[i] = outdata[i] instr.writeto(outfile) ## close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPSMOOTH completed at' else: message = '\nKEPSMOOTH aborted at' kepmsg.clock(message,logfile,verbose)
def kepbinary(infile, outfile, datacol, m1, m2, r1, r2, period, bjd0, eccn, omega, inclination, c1, c2, c3, c4, albedo, depth, contamination, gamma, fitparams, eclipses, dopboost, tides, job, clobber, verbose, logfile, status): # startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 17 ysize = 7 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPBINARY -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'datacol=' + datacol + ' ' call += 'm1=' + str(m1) + ' ' call += 'm2=' + str(m2) + ' ' call += 'r1=' + str(r1) + ' ' call += 'r2=' + str(r2) + ' ' call += 'period=' + str(period) + ' ' call += 'bjd0=' + str(bjd0) + ' ' call += 'eccn=' + str(eccn) + ' ' call += 'omega=' + str(omega) + ' ' call += 'inclination=' + str(inclination) + ' ' call += 'c1=' + str(c1) + ' ' call += 'c2=' + str(c2) + ' ' call += 'c3=' + str(c3) + ' ' call += 'c4=' + str(c4) + ' ' call += 'albedo=' + str(albedo) + ' ' call += 'depth=' + str(depth) + ' ' call += 'contamination=' + str(contamination) + ' ' call += 'gamma=' + str(gamma) + ' ' call += 'fitparams=' + str(fitparams) + ' ' eclp = 'n' if (eclipses): eclp = 'y' call += 'eclipses=' + eclp + ' ' boost = 'n' if (dopboost): boost = 'y' call += 'dopboost=' + boost + ' ' distort = 'n' if (tides): distort = 'y' call += 'tides=' + distort + ' ' call += 'job=' + str(job) + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPBINARY started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # check and format the list of fit parameters if status == 0 and job == 'fit': allParams = [m1, m2, r1, r2, period, bjd0, eccn, omega, inclination] allNames = [ 'm1', 'm2', 'r1', 'r2', 'period', 'bjd0', 'eccn', 'omega', 'inclination' ] fitparams = re.sub('\|', ',', fitparams.strip()) fitparams = re.sub('\.', ',', fitparams.strip()) fitparams = re.sub(';', ',', fitparams.strip()) fitparams = re.sub(':', ',', fitparams.strip()) fitparams = re.sub('\s+', ',', fitparams.strip()) fitparams, status = kepio.parselist(fitparams, logfile, verbose) for fitparam in fitparams: if fitparam.strip() not in allNames: message = 'ERROR -- KEPBINARY: unknown field in list of fit parameters' status = kepmsg.err(logfile, message, verbose) # clobber output file if status == 0: if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPBINARY: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile, message, verbose) # open input file if status == 0: instr, status = kepio.openfits(infile, 'readonly', logfile, verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys( instr, infile, logfile, verbose, status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # check the data column exists if status == 0: try: instr[1].data.field(datacol) except: message = 'ERROR -- KEPBINARY: ' + datacol + ' column does not exist in ' + infile + '[1]' status = kepmsg.err(logfile, message, verbose) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr, file, logfile, verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile, instr[1], logfile, verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 try: for i in range(len(table.field(0))): if numpy.isfinite(table.field('barytime')[i]) and \ numpy.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] except: for i in range(len(table.field(0))): if numpy.isfinite(table.field('time')[i]) and \ numpy.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN', True, comment, instr[1], outfile, logfile, verbose) # read table columns if status == 0: try: time = instr[1].data.field('barytime') except: time, status = kepio.readfitscol(infile, instr[1].data, 'time', logfile, verbose) indata, status = kepio.readfitscol(infile, instr[1].data, datacol, logfile, verbose) if status == 0: time = time + bjdref indata = indata / cadenom # limb-darkening cofficients if status == 0: limbdark = numpy.array([c1, c2, c3, c4], dtype='float32') # time details for model if status == 0: npt = len(time) exptime = numpy.zeros((npt), dtype='float64') dtype = numpy.zeros((npt), dtype='int') for i in range(npt): try: exptime[i] = time[i + 1] - time[i] except: exptime[i] = time[i] - time[i - 1] # calculate binary model if status == 0: tmodel = kepsim.transitModel(1.0, m1, m2, r1, r2, period, inclination, bjd0, eccn, omega, depth, albedo, c1, c2, c3, c4, gamma, contamination, npt, time, exptime, dtype, eclipses, dopboost, tides) # re-normalize binary model to data if status == 0 and (job == 'overlay' or job == 'fit'): dmedian = numpy.median(indata) tmodel = tmodel / numpy.median(tmodel) * dmedian # define arrays of floating and frozen parameters if status == 0 and job == 'fit': params = [] paramNames = [] arguments = [] argNames = [] for i in range(len(allNames)): if allNames[i] in fitparams: params.append(allParams[i]) paramNames.append(allNames[i]) else: arguments.append(allParams[i]) argNames.append(allNames[i]) params.append(dmedian) params = numpy.array(params, dtype='float32') # subtract model from data if status == 0 and job == 'fit': deltam = numpy.abs(indata - tmodel) # fit statistics if status == 0 and job == 'fit': aveDelta = numpy.sum(deltam) / npt chi2 = math.sqrt( numpy.sum( (indata - tmodel) * (indata - tmodel) / (npt - len(params)))) # fit model to data using downhill simplex if status == 0 and job == 'fit': print '' print '%4s %11s %11s' % ('iter', 'delta', 'chi^2') print '----------------------------' print '%4d %.5E %.5E' % (0, aveDelta, chi2) bestFit = scipy.optimize.fmin( fitModel, params, args=(paramNames, dmedian, m1, m2, r1, r2, period, bjd0, eccn, omega, inclination, depth, albedo, c1, c2, c3, c4, gamma, contamination, npt, time, exptime, indata, dtype, eclipses, dopboost, tides), maxiter=1e4) # calculate best fit binary model if status == 0 and job == 'fit': print '' for i in range(len(paramNames)): if 'm1' in paramNames[i].lower(): m1 = bestFit[i] print ' M1 = %.3f Msun' % bestFit[i] elif 'm2' in paramNames[i].lower(): m2 = bestFit[i] print ' M2 = %.3f Msun' % bestFit[i] elif 'r1' in paramNames[i].lower(): r1 = bestFit[i] print ' R1 = %.4f Rsun' % bestFit[i] elif 'r2' in paramNames[i].lower(): r2 = bestFit[i] print ' R2 = %.4f Rsun' % bestFit[i] elif 'period' in paramNames[i].lower(): period = bestFit[i] elif 'bjd0' in paramNames[i].lower(): bjd0 = bestFit[i] print 'BJD0 = %.8f' % bestFit[i] elif 'eccn' in paramNames[i].lower(): eccn = bestFit[i] print ' e = %.3f' % bestFit[i] elif 'omega' in paramNames[i].lower(): omega = bestFit[i] print ' w = %.3f deg' % bestFit[i] elif 'inclination' in paramNames[i].lower(): inclination = bestFit[i] print ' i = %.3f deg' % bestFit[i] flux = bestFit[-1] print '' tmodel = kepsim.transitModel(flux, m1, m2, r1, r2, period, inclination, bjd0, eccn, omega, depth, albedo, c1, c2, c3, c4, gamma, contamination, npt, time, exptime, dtype, eclipses, dopboost, tides) # subtract model from data if status == 0: deltaMod = indata - tmodel # standard deviation of model if status == 0: stdDev = math.sqrt( numpy.sum((indata - tmodel) * (indata - tmodel)) / npt) # clean up x-axis unit if status == 0: time0 = float(int(tstart / 100) * 100.0) ptime = time - time0 xlab = 'BJD $-$ %d' % time0 # clean up y-axis units if status == 0: nrm = len(str(int(indata.max()))) - 1 pout = indata / 10**nrm pmod = tmodel / 10**nrm pres = deltaMod / stdDev if job == 'fit' or job == 'overlay': try: ylab1 = 'Flux (10$^%d$ e$^-$ s$^{-1}$)' % nrm ylab2 = 'Residual ($\sigma$)' except: ylab1 = 'Flux (10**%d e-/s)' % nrm ylab2 = 'Residual (sigma)' else: ylab1 = 'Normalized Flux' # dynamic range of model plot if status == 0 and job == 'model': xmin = ptime.min() xmax = ptime.max() ymin = tmodel.min() ymax = tmodel.max() # dynamic range of model/data overlay or fit if status == 0 and (job == 'overlay' or job == 'fit'): xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() tmin = pmod.min() tmax = pmod.max() ymin = numpy.array([ymin, tmin]).min() ymax = numpy.array([ymax, tmax]).max() rmin = pres.min() rmax = pres.max() # pad the dynamic range if status == 0: xr = (xmax - xmin) / 80 yr = (ymax - ymin) / 40 if job == 'overlay' or job == 'fit': rr = (rmax - rmin) / 40 # set up plot style if status == 0: labelsize = 24 ticksize = 16 xsize = 17 ysize = 7 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 params = { 'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 16, 'ytick.labelsize': 16 } pylab.rcParams.update(params) pylab.figure(figsize=[14, 10]) pylab.clf() # main plot window ax = pylab.axes([0.05, 0.3, 0.94, 0.68]) pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) # plot model time series if status == 0 and job == 'model': pylab.plot(ptime, tmodel, color='#0000ff', linestyle='-', linewidth=1.0) ptime = numpy.insert(ptime, [0.0], ptime[0]) ptime = numpy.append(ptime, ptime[-1]) tmodel = numpy.insert(tmodel, [0.0], 0.0) tmodel = numpy.append(tmodel, 0.0) pylab.fill(ptime, tmodel, fc='#ffff00', linewidth=0.0, alpha=0.2) # plot data time series and best fit if status == 0 and (job == 'overlay' or job == 'fit'): pylab.plot(ptime, pout, color='#0000ff', linestyle='-', linewidth=1.0) ptime = numpy.insert(ptime, [0.0], ptime[0]) ptime = numpy.append(ptime, ptime[-1]) pout = numpy.insert(pout, [0], 0.0) pout = numpy.append(pout, 0.0) pylab.fill(ptime, pout, fc='#ffff00', linewidth=0.0, alpha=0.2) pylab.plot(ptime[1:-1], pmod, color='r', linestyle='-', linewidth=2.0) # ranges and labels if status == 0: pylab.xlim(xmin - xr, xmax + xr) pylab.ylim(ymin - yr, ymax + yr) pylab.xlabel(xlab, {'color': 'k'}) pylab.ylabel(ylab1, {'color': 'k'}) # residual plot window if status == 0 and (job == 'overlay' or job == 'fit'): ax = pylab.axes([0.05, 0.07, 0.94, 0.23]) pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) # plot residual time series if status == 0 and (job == 'overlay' or job == 'fit'): pylab.plot([ptime[0], ptime[-1]], [0.0, 0.0], color='r', linestyle='--', linewidth=1.0) pylab.plot([ptime[0], ptime[-1]], [-1.0, -1.0], color='r', linestyle='--', linewidth=1.0) pylab.plot([ptime[0], ptime[-1]], [1.0, 1.0], color='r', linestyle='--', linewidth=1.0) pylab.plot(ptime[1:-1], pres, color='#0000ff', linestyle='-', linewidth=1.0) pres = numpy.insert(pres, [0], rmin) pres = numpy.append(pres, rmin) pylab.fill(ptime, pres, fc='#ffff00', linewidth=0.0, alpha=0.2) # ranges and labels of residual time series if status == 0 and (job == 'overlay' or job == 'fit'): pylab.xlim(xmin - xr, xmax + xr) pylab.ylim(rmin - rr, rmax + rr) pylab.xlabel(xlab, {'color': 'k'}) pylab.ylabel(ylab2, {'color': 'k'}) # display the plot if status == 0: pylab.draw()
def kepdetrend(infile,outfile,datacol,errcol,ranges1,npoly1,nsig1,niter1, ranges2,npoly2,nsig2,niter2,popnans,plot,clobber,verbose,logfile, status,cmdLine=False): # startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 16 ysize = 9 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPDETREND -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'errcol='+str(errcol)+' ' call += 'ranges1='+str(ranges1)+' ' call += 'npoly1='+str(npoly1)+' ' call += 'nsig1='+str(nsig1)+' ' call += 'niter1='+str(niter1)+' ' call += 'ranges2='+str(ranges2)+' ' call += 'npoly2='+str(npoly2)+' ' call += 'nsig2='+str(nsig2)+' ' call += 'niter2='+str(niter2)+' ' popn = 'n' if (popnans): popn = 'y' call += 'popnans='+popn+ ' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPDETREND started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPDETREND: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # filter input data table if status == 0: work1 = numpy.array([table.field('time'), table.field(datacol), table.field(errcol)]) work1 = numpy.rot90(work1,3) work1 = work1[~numpy.isnan(work1).any(1)] # read table columns if status == 0: intime = work1[:,2] + bjdref indata = work1[:,1] inerr = work1[:,0] print(intime) # time ranges for region 1 (region to be corrected) if status == 0: time1 = []; data1 = []; err1 = [] t1start, t1stop, status = kepio.timeranges(ranges1,logfile,verbose) if status == 0: cadencelis1, status = kepstat.filterOnRange(intime,t1start,t1stop) if status == 0: for i in range(len(cadencelis1)): time1.append(intime[cadencelis1[i]]) data1.append(indata[cadencelis1[i]]) if errcol.lower() != 'none': err1.append(inerr[cadencelis1[i]]) t0 = time1[0] time1 = array(time1,dtype='float64') - t0 data1 = array(data1,dtype='float32') if errcol.lower() != 'none': err1 = array(err1,dtype='float32') else: err1 = None # fit function to range 1 if status == 0: functype = 'poly' + str(npoly1) pinit = [data1.mean()] if npoly1 > 0: for i in range(npoly1): pinit.append(0) pinit = array(pinit,dtype='float32') coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx1, ploty1, status = \ kepfit.lsqclip(functype,pinit,time1,data1,err1,nsig1,nsig1,niter1, logfile,verbose) fit1 = indata * 0.0 for i in range(len(coeffs)): fit1 += coeffs[i] * (intime - t0)**i for i in range(len(intime)): if i not in cadencelis1: fit1[i] = 0.0 plotx1 += t0 print(coeffs) # time ranges for region 2 (region that is correct) if status == 0: time2 = []; data2 = []; err2 = [] t2start, t2stop, status = kepio.timeranges(ranges2,logfile,verbose) cadencelis2, status = kepstat.filterOnRange(intime,t2start,t2stop) for i in range(len(cadencelis2)): time2.append(intime[cadencelis2[i]]) data2.append(indata[cadencelis2[i]]) if errcol.lower() != 'none': err2.append(inerr[cadencelis2[i]]) t0 = time2[0] time2 = array(time2,dtype='float64') - t0 data2 = array(data2,dtype='float32') if errcol.lower() != 'none': err2 = array(err2,dtype='float32') else: err2 = None # fit function to range 2 if status == 0: functype = 'poly' + str(npoly2) pinit = [data2.mean()] if npoly2 > 0: for i in range(npoly2): pinit.append(0) pinit = array(pinit,dtype='float32') coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx2, ploty2, status = \ kepfit.lsqclip(functype,pinit,time2,data2,err2,nsig2,nsig2,niter2, logfile,verbose) fit2 = indata * 0.0 for i in range(len(coeffs)): fit2 += coeffs[i] * (intime - t0)**i for i in range(len(intime)): if i not in cadencelis1: fit2[i] = 0.0 plotx2 += t0 # normalize data if status == 0: outdata = indata - fit1 + fit2 if errcol.lower() != 'none': outerr = inerr * 1.0 # comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 plotx1 = plotx1 - intime0 plotx2 = plotx2 - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = outdata ploty1 ploty2 nrm = len(str(int(numpy.nanmax(indata))))-1 indata = indata / 10**nrm pout = pout / 10**nrm ploty1 = ploty1 / 10**nrm ploty2 = ploty2 / 10**nrm ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits xmin = ptime.min() xmax = ptime.max() ymin = indata.min() ymax = indata.max() omin = pout.min() omax = pout.max() xr = xmax - xmin yr = ymax - ymin oo = omax - omin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) indata = insert(indata,[0],[0.0]) indata = append(indata,[0.0]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) # plot light curve if status == 0 and plot: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: pass pylab.figure(figsize=[xsize,ysize]) pylab.clf() # plot original data ax = pylab.axes([0.06,0.523,0.93,0.45]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime,indata,color=lcolor,linestyle='-',linewidth=lwidth) pylab.fill(ptime,indata,color=fcolor,linewidth=0.0,alpha=falpha) pylab.plot(plotx1,ploty1,color='r',linestyle='-',linewidth=2.0) pylab.plot(plotx2,ploty2,color='g',linestyle='-',linewidth=2.0) pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin > 0.0: pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) else: pylab.ylim(1.0e-10,ymax+yr*0.01) pylab.ylabel(ylab, {'color' : 'k'}) pylab.grid() # plot detrended data ax = pylab.axes([0.06,0.073,0.93,0.45]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime,pout,color=lcolor,linestyle='-',linewidth=lwidth) pylab.fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin > 0.0: pylab.ylim(omin-oo*0.01,omax+oo*0.01) else: pylab.ylim(1.0e-10,omax+oo*0.01) pylab.xlabel(xlab, {'color' : 'k'}) try: pylab.ylabel(ylab, {'color' : 'k'}) except: ylab = '10**%d e-/s' % nrm pylab.ylabel(ylab, {'color' : 'k'}) # render plot if status == 0: if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # write output file if status == 0 and popnans: instr[1].data.field(datacol)[good_data] = outdata instr[1].data.field(errcol)[good_data] = outerr instr[1].data.field(datacol)[bad_data] = None instr[1].data.field(errcol)[bad_data] = None instr.writeto(outfile) elif status == 0 and not popnans: for i in range(len(outdata)): instr[1].data.field(datacol)[i] = outdata[i] if errcol.lower() != 'none': instr[1].data.field(errcol)[i] = outerr[i] instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPDETREND completed at' else: message = '\nKEPDETREND aborted at' kepmsg.clock(message,logfile,verbose)
def kepconvert(infile, outfile, conversion, columns, baddata, clobber, verbose, logfile, status): # startup parameters status = 0 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPCONVERT -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'conversion=' + conversion + ' ' call += 'columns=' + columns + ' ' writebad = 'n' if (baddata): writebad = 'y' call += 'baddata=' + writebad + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPCONVERT started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # data columns if status == 0: colnames = columns.strip().split(',') ncol = len(colnames) if ncol < 1: message = 'ERROR -- KEPCONVERT: no data columns specified' status = kepmsg.err(logfile, message, verbose) # input file exists if status == 0 and not kepio.fileexists(infile): message = 'ERROR -- KEPCONVERT: input file ' + infile + ' does not exist' status = kepmsg.err(logfile, message, verbose) # clobber output file if status == 0: if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPCONVERT: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile, message, verbose) # open FITS input file if status == 0 and conversion == 'fits2asc': instr, status = kepio.openfits(infile, 'readonly', logfile, verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys( instr, infile, logfile, verbose, status) # read FITS table data if status == 0 and conversion == 'fits2asc': table, status = kepio.readfitstab(infile, instr[1], logfile, verbose) # check columns exist in FITS file if not baddata and status == 0 and conversion == 'fits2asc': try: qualcol = table.field('SAP_QUALITY') == 0 except: message = 'No SAP_QUALITY column in data, are you using an old FITS file?' status = kepmsg.err(logfile, message, verbose) if status == 0 and conversion == 'fits2asc': work = [] for colname in colnames: try: if colname.lower() == 'time': work.append(table.field(colname) + bjdref) else: work.append(table.field(colname)) except: message = 'ERROR -- KEPCONVERT: no column ' + colname + ' in ' + infile status = kepmsg.err(logfile, message, verbose) if not baddata: for i in range(len(work)): work[i] = work[i][qualcol] # close input file if status == 0 and conversion == 'fits2asc': status = kepio.closefits(instr, logfile, verbose) ## write output file if status == 0 and conversion == 'fits2asc': # table, status = kepio.openascii(outfile,'w',logfile,verbose) # for i in range(len(work[0])): # txt = '' # for j in range(len(work)): # if numpy.isfinite(work[j][i]): # txt += str(work[j][i]) + ' ' # txt = txt.strip() # if len(re.sub('\s+',',',txt).split(',')) == ncol: # table.write(txt + '\n') # status = kepio.closeascii(table,logfile,verbose) savetxt(outfile, array(work).T) ## open and read ASCII input file if status == 0 and conversion == 'asc2fits': table, status = kepio.openascii(infile, 'r', logfile, verbose) ## organize ASCII table into arrays if status == 0 and conversion == 'asc2fits': work = [] for i in range(ncol): work.append([]) nline = 0 for line in table: line = line.strip() line = re.sub('\s+', ',', line) line = re.sub('\|', ',', line) line = re.sub(';', ',', line) if '#' not in line: nline + 1 line = line.split(',') if len(line) == ncol: for i in range(len(line)): try: work[i].append(float(line[i])) except: message = 'ERROR --KEPCONVERT: ' + str( line[i]) + ' is not float' status = kepmsg.err(logfile, message, verbose) break else: message = 'ERROR --KEPCONVERT: ' + str( ncol) + ' columns required but ' message += str( len(line)) + ' columns supplied by ' + infile message += ' at line' + str(nline) status = kepmsg.err(logfile, message, verbose) break for i in range(ncol): work[i] = numpy.array(work[i], dtype='float64') ## timing keywords for output file if status == 0 and conversion == 'asc2fits': for i in range(ncol): if 'time' in colnames[i].lower(): if work[i][1] > 54000.0 and work[i][1] < 60000.0: work[i] += 2.4e6 # work[i] += 2.4553e6 tstart = work[i].min() tstop = work[i].max() lc_start = tstart lc_end = tstop if lc_start > 2.4e6: lc_start -= 2.4e6 if lc_end > 2.4e6: lc_end -= 2.4e6 dts = [] for j in range(1, len(work[i])): dts.append(work[i][j] - work[i][j - 1]) dts = numpy.array(dts, dtype='float32') cadence = numpy.median(dts) if cadence * 86400.0 > 58.0 and cadence * 86400.0 < 61.0: obsmode = 'short cadence' elif cadence * 86400.0 > 1600.0 and cadence * 86400.0 < 2000.0: obsmode = 'long cadence' else: obsmode = 'unknown' ## Create the outfile primary extension if status == 0 and conversion == 'asc2fits': hdu0 = PrimaryHDU() try: hdu0.header.update('EXTNAME', 'PRIMARY', 'name of extension') hdu0.header.update('EXTVER', 1.0, 'extension version number') hdu0.header.update('ORIGIN', 'NASA/Ames', 'organization that generated this file') hdu0.header.update('DATE', time.asctime(time.localtime()), 'file creation date') hdu0.header.update('CREATOR', 'kepconvert', 'SW version used to create this file') hdu0.header.update('PROCVER', 'None', 'processing script version') hdu0.header.update('FILEVER', '2.0', 'file format version') hdu0.header.update('TIMVERSN', 'OGIP/93-003', 'OGIP memo number for file format') hdu0.header.update('TELESCOP', 'Kepler', 'telescope') hdu0.header.update('INSTRUME', 'Kepler photometer', 'detector type') hdu0.header.update('OBJECT', 'Unknown', 'string version of kepID') hdu0.header.update('KEPLERID', 'Unknown', 'unique Kepler target identifier') hdu0.header.update('CHANNEL', 'Unknown', 'CCD channel') hdu0.header.update('SKYGROUP', 'Unknown', 'roll-independent location of channel') hdu0.header.update('MODULE', 'Unknown', 'CCD module') hdu0.header.update('OUTPUT', 'Unknown', 'CCD output') hdu0.header.update( 'QUARTER', 'Unknown', 'mission quarter during which data was collected') hdu0.header.update( 'SEASON', 'Unknown', 'mission season during which data was collected') hdu0.header.update( 'DATA_REL', 'Unknown', 'version of data release notes describing data') hdu0.header.update('OBSMODE', obsmode, 'observing mode') hdu0.header.update('RADESYS', 'Unknown', 'reference frame of celestial coordinates') hdu0.header.update('RA_OBJ', 'Unknown', '[deg] right ascension from KIC') hdu0.header.update('DEC_OBJ', 'Unknown', '[deg] declination from KIC') hdu0.header.update('EQUINOX', 2000.0, 'equinox of celestial coordinate system') hdu0.header.update('PMRA', 'Unknown', '[arcsec/yr] RA proper motion') hdu0.header.update('PMDEC', 'Unknown', '[arcsec/yr] Dec proper motion') hdu0.header.update('PMTOTAL', 'Unknown', '[arcsec/yr] total proper motion') hdu0.header.update('PARALLAX', 'Unknown', '[arcsec] parallax') hdu0.header.update('GLON', 'Unknown', '[deg] galactic longitude') hdu0.header.update('GLAT', 'Unknown', '[deg] galactic latitude') hdu0.header.update('GMAG', 'Unknown', '[mag] SDSS g band magnitude from KIC') hdu0.header.update('RMAG', 'Unknown', '[mag] SDSS r band magnitude from KIC') hdu0.header.update('IMAG', 'Unknown', '[mag] SDSS i band magnitude from KIC') hdu0.header.update('ZMAG', 'Unknown', '[mag] SDSS z band magnitude from KIC') hdu0.header.update('D51MAG', 'Unknown', '[mag] D51 magnitude, from KIC') hdu0.header.update('JMAG', 'Unknown', '[mag] J band magnitude from 2MASS') hdu0.header.update('HMAG', 'Unknown', '[mag] H band magnitude from 2MASS') hdu0.header.update('KMAG', 'Unknown', '[mag] K band magnitude from 2MASS') hdu0.header.update('KEPMAG', 'Unknown', '[mag] Kepler magnitude (Kp) from KIC') hdu0.header.update('GRCOLOR', 'Unknown', '[mag] (g-r) color, SDSS bands') hdu0.header.update('JKCOLOR', 'Unknown', '[mag] (J-K) color, 2MASS bands') hdu0.header.update('GKCOLOR', 'Unknown', '[mag] (g-K) color, SDSS g - 2MASS K') hdu0.header.update('TEFF', 'Unknown', '[K] effective temperature from KIC') hdu0.header.update('LOGG', 'Unknown', '[cm/s2] log10 surface gravity from KIC') hdu0.header.update('FEH', 'Unknown', '[log10([Fe/H])] metallicity from KIC') hdu0.header.update('EBMINUSV', 'Unknown', '[mag] E(B-V) redenning from KIC') hdu0.header.update('AV', 'Unknown', '[mag] A_v extinction from KIC') hdu0.header.update('RADIUS', 'Unknown', '[solar radii] stellar radius from KIC') hdu0.header.update('TMINDEX', 'Unknown', 'unique 2MASS catalog ID from KIC') hdu0.header.update('SCPID', 'Unknown', 'unique SCP processing ID from KIC') hdulist = HDUList(hdu0) except: message = 'ERROR -- KEPCONVERT: cannot create primary extension in ' + outfile status = kepmsg.err(logfile, message, verbose) ## create the outfile HDU 1 extension if status == 0 and conversion == 'asc2fits': try: fitscol = [] for i in range(ncol): fitscol.append( Column(name=colnames[i], format='D', array=work[i])) fitscols = ColDefs(fitscol) hdu1 = new_table(fitscols) hdulist.append(hdu1) hdu1.header.update('INHERIT', True, 'inherit primary keywords') hdu1.header.update('EXTNAME', 'LIGHTCURVE', 'name of extension') hdu1.header.update('EXTVER', 1, 'extension version number') hdu1.header.update('TELESCOP', 'Kepler', 'telescope') hdu1.header.update('INSTRUME', 'Kepler photometer', 'detector type') hdu1.header.update('OBJECT', 'Unknown', 'string version of kepID') hdu1.header.update('KEPLERID', 'Unknown', 'unique Kepler target identifier') hdu1.header.update('RADESYS', 'Unknown', 'reference frame of celestial coordinates') hdu1.header.update('RA_OBJ', 'Unknown', '[deg] right ascension from KIC') hdu1.header.update('DEC_OBJ', 'Unknown', '[deg] declination from KIC') hdu1.header.update('EQUINOX', 2000.0, 'equinox of celestial coordinate system') hdu1.header.update('TIMEREF', 'Unknown', 'barycentric correction applied to times') hdu1.header.update('TASSIGN', 'Unknown', 'where time is assigned') hdu1.header.update('TIMESYS', 'Unknown', 'time system is barycentric JD') hdu1.header.update('BJDREFI', 0.0, 'integer part of BJD reference date') hdu1.header.update('BJDREFF', 0.0, 'fraction of day in BJD reference date') hdu1.header.update('TIMEUNIT', 'Unknown', 'time unit for TIME, TSTART and TSTOP') hdu1.header.update('TSTART', tstart, 'observation start time in JD - BJDREF') hdu1.header.update('TSTOP', tstop, 'observation stop time in JD - BJDREF') hdu1.header.update('LC_START', lc_start, 'observation start time in MJD') hdu1.header.update('LC_END', lc_end, 'observation stop time in MJD') hdu1.header.update('TELAPSE', tstop - tstart, '[d] TSTOP - TSTART') hdu1.header.update('LIVETIME', 'Unknown', '[d] TELAPSE multiplied by DEADC') hdu1.header.update('EXPOSURE', 'Unknown', '[d] time on source') hdu1.header.update('DEADC', 'Unknown', 'deadtime correction') hdu1.header.update('TIMEPIXR', 'Unknown', 'bin time beginning=0 middle=0.5 end=1') hdu1.header.update('TIERRELA', 'Unknown', '[d] relative time error') hdu1.header.update('TIERABSO', 'Unknown', '[d] absolute time error') hdu1.header.update('INT_TIME', 'Unknown', '[s] photon accumulation time per frame') hdu1.header.update('READTIME', 'Unknown', '[s] readout time per frame') hdu1.header.update('FRAMETIM', 'Unknown', '[s] frame time (INT_TIME + READTIME)') hdu1.header.update('NUM_FRM', 'Unknown', 'number of frames per time stamp') hdu1.header.update('TIMEDEL', 'Unknown', '[d] time resolution of data') hdu1.header.update('DATE-OBS', 'Unknown', 'TSTART as UT calendar date') hdu1.header.update('DATE-END', 'Unknown', 'TSTOP as UT calendar date') hdu1.header.update('BACKAPP', 'Unknown', 'background is subtracted') hdu1.header.update('DEADAPP', 'Unknown', 'deadtime applied') hdu1.header.update('VIGNAPP', 'Unknown', 'vignetting or collimator correction applied') hdu1.header.update('GAIN', 'Unknown', 'channel gain [electrons/count]') hdu1.header.update('READNOIS', 'Unknown', 'read noise [electrons]') hdu1.header.update('NREADOUT', 'Unknown', 'number of reads per cadence') hdu1.header.update('TIMSLICE', 'Unknown', 'time-slice readout sequence section') hdu1.header.update('MEANBLCK', 'Unknown', 'FSW mean black level [count]') hdu1.header.update('PDCSAPFL', 'Unknown', 'SAP PDC processing flags (bit code)') hdu1.header.update('PDCDIAFL', 'Unknown', 'DIA PDC processing flags (bit code)') hdu1.header.update( 'MISPXSAP', 'Unknown', 'no of optimal aperture pixels missing from SAP') hdu1.header.update( 'MISPXDIA', 'Unknown', 'no of optimal aperture pixels missing from DIA') hdu1.header.update('CROWDSAP', 'Unknown', 'crowding metric evaluated over SAP opt. ap.') hdu1.header.update('CROWDDIA', 'Unknown', 'crowding metric evaluated over DIA aperture') except: message = 'ERROR -- KEPCONVERT: cannot create light curve extension in ' + outfile status = kepmsg.err(logfile, message, verbose) ## history keyword in output file if status == 0 and conversion == 'asc2fits': status = kepkey.history(call, hdu0, outfile, logfile, verbose) ## filter data table if status == 0 and conversion == 'asc2fits': instr, status = kepio.filterNaN( hdulist, colnames[min(array([1, len(colnames) - 1], dtype='int'))], outfile, logfile, verbose) ## write output FITS file if status == 0 and conversion == 'asc2fits': hdulist.writeto(outfile, checksum=True) ## end time if (status == 0): message = 'KEPCONVERT completed at' else: message = '\nKEPCONVERT aborted at' kepmsg.clock(message, logfile, verbose)
def kepflatten(infile,outfile,datacol,errcol,nsig,stepsize,winsize,npoly,niter,ranges, plot,clobber,verbose,logfile,status,cmdLine=False): # startup parameters status = 0 labelsize = 32 ticksize = 18 xsize = 16 ysize = 10 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPFLATTEN -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'errcol='+str(errcol)+' ' call += 'nsig='+str(nsig)+' ' call += 'stepsize='+str(stepsize)+' ' call += 'winsize='+str(winsize)+' ' call += 'npoly='+str(npoly)+' ' call += 'niter='+str(niter)+' ' call += 'ranges='+str(ranges)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPFLATTEN started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # test winsize > stepsize if winsize < stepsize: message = 'ERROR -- KEPFLATTEN: winsize must be greater than stepsize' status = kepmsg.err(logfile,message,verbose) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPFLATTEN: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # filter input data table if status == 0: try: datac = table.field(datacol) except: message = 'ERROR -- KEPFLATTEN: cannot find or read data column ' + datacol status = kepmsg.err(logfile,message,verbose) if status == 0: try: err = table.field(errcol) except: message = 'WARNING -- KEPFLATTEN: cannot find or read error column ' + errcol errcol = 'None' if status == 0: if errcol.lower() == 'none' or errcol == 'PSF_FLUX_ERR': err = datac * cadence err = numpy.sqrt(numpy.abs(err)) / cadence work1 = numpy.array([table.field('time'), datac, err]) else: work1 = numpy.array([table.field('time'), datac, err]) work1 = numpy.rot90(work1,3) work1 = work1[~numpy.isnan(work1).any(1)] # read table columns if status == 0: intime = work1[:,2] + bjdref indata = work1[:,1] inerr = work1[:,0] if len(intime) == 0: message = 'ERROR -- KEPFLATTEN: one of the input arrays is all NaN' status = kepmsg.err(logfile,message,verbose) # time ranges for region to be corrected if status == 0: t1, t2, status = kepio.timeranges(ranges,logfile,verbose) cadencelis, status = kepstat.filterOnRange(intime,t1,t2) # find limits of each time step if status == 0: tstep1 = []; tstep2 = [] work = intime[0] while work <= intime[-1]: tstep1.append(work) tstep2.append(array([work+winsize,intime[-1]],dtype='float64').min()) work += stepsize # find cadence limits of each time step if status == 0: cstep1 = []; cstep2 = [] for n in range(len(tstep1)): for i in range(len(intime)-1): if intime[i] <= tstep1[n] and intime[i+1] > tstep1[n]: for j in range(i,len(intime)-1): if intime[j] < tstep2[n] and intime[j+1] >= tstep2[n]: cstep1.append(i) cstep2.append(j+1) # comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = copy(indata) nrm = len(str(int(pout.max())))-1 pout = pout / 10**nrm ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) # plot light curve if status == 0 and plot: plotLatex = True try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: plotLatex = False if status == 0 and plot: pylab.figure(figsize=[xsize,ysize]) pylab.clf() # plot data ax = pylab.axes([0.06,0.54,0.93,0.43]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90) pylab.setp(pylab.gca(),xticklabels=[]) pylab.plot(ptime[1:-1],pout[1:-1],color=lcolor,linestyle='-',linewidth=lwidth) pylab.fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) if not plotLatex: ylab = '10**%d electrons/sec' % nrm ylabel(ylab, {'color' : 'k'}) grid() # loop over each time step, fit data, determine rms if status == 0: fitarray = numpy.zeros((len(indata),len(cstep1)),dtype='float32') sigarray = numpy.zeros((len(indata),len(cstep1)),dtype='float32') fitarray[:,:] = numpy.nan sigarray[:,:] = numpy.nan masterfit = indata * 0.0 mastersigma = numpy.zeros(len(masterfit)) functype = 'poly' + str(npoly) for i in range(len(cstep1)): timeSeries = intime[cstep1[i]:cstep2[i]+1]-intime[cstep1[i]] dataSeries = indata[cstep1[i]:cstep2[i]+1] fitTimeSeries = numpy.array([],dtype='float32') fitDataSeries = numpy.array([],dtype='float32') pinit = [dataSeries.mean()] if npoly > 0: for j in range(npoly): pinit.append(0.0) pinit = array(pinit,dtype='float32') try: if len(fitarray[cstep1[i]:cstep2[i]+1,i]) > len(pinit): coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,timeSeries,dataSeries,None,nsig,nsig,niter, logfile,verbose) fitarray[cstep1[i]:cstep2[i]+1,i] = 0.0 sigarray[cstep1[i]:cstep2[i]+1,i] = sigma for j in range(len(coeffs)): fitarray[cstep1[i]:cstep2[i]+1,i] += coeffs[j] * timeSeries**j except: for j in range(cstep1[i],cstep2[i]+1): fitarray[cstep1[i]:cstep2[i]+1,i] = 0.0 sigarray[cstep1[i]:cstep2[i]+1,i] = 1.0e-10 message = 'WARNING -- KEPFLATTEN: could not fit range ' message += str(intime[cstep1[i]]) + '-' + str(intime[cstep2[i]]) kepmsg.warn(None,message) # find mean fit for each timestamp if status == 0: for i in range(len(indata)): masterfit[i] = scipy.stats.nanmean(fitarray[i,:]) mastersigma[i] = scipy.stats.nanmean(sigarray[i,:]) masterfit[-1] = masterfit[-4] #fudge masterfit[-2] = masterfit[-4] #fudge masterfit[-3] = masterfit[-4] #fudge pylab.plot(intime-intime0, masterfit / 10**nrm,'g',lw='3') # reject outliers if status == 0: rejtime = []; rejdata = []; naxis2 = 0 for i in range(len(masterfit)): if abs(indata[i] - masterfit[i]) > nsig * mastersigma[i] and i in cadencelis: rejtime.append(intime[i]) rejdata.append(indata[i]) rejtime = array(rejtime,dtype='float64') rejdata = array(rejdata,dtype='float32') if plot: pylab.plot(rejtime-intime0,rejdata / 10**nrm,'ro') # new data for output file if status == 0: outdata = indata / masterfit outerr = inerr / masterfit # plot ranges if status == 0 and plot: pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) else: pylab.ylim(1.0e-10,ymax+yr*0.01) # plot residual data if status == 0 and plot: ax = pylab.axes([0.06,0.09,0.93,0.43]) # force tick labels to be absolute rather than relative if status == 0 and plot: pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, 'rotation', 90) # clean up y-axis units if status == 0: pout = copy(outdata) ylab = 'Normalized Flux' # data limits if status == 0 and plot: ymin = pout.min() ymax = pout.max() yr = ymax - ymin pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) pylab.plot(ptime[1:-1],pout[1:-1],color=lcolor,linestyle='-',linewidth=lwidth) pylab.fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) pylab.xlabel(xlab, {'color' : 'k'}) pylab.ylabel(ylab, {'color' : 'k'}) pylab.grid() # plot ranges if status == 0 and plot: pylab.xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: pylab.ylim(ymin-yr*0.01,ymax+yr*0.01) else: pylab.ylim(1.0e-10,ymax+yr*0.01) # render plot if status == 0 and plot: pylab.savefig(re.sub('.fits','.png',outfile)) if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # add NaNs back into data if status == 0: n = 0 work1 = array([],dtype='float32') work2 = array([],dtype='float32') instr, status = kepio.openfits(infile,'readonly',logfile,verbose) table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) tn = table.field('time') dn = table.field(datacol) for i in range(len(table.field(0))): if numpy.isfinite(tn[i]) and numpy.isfinite(dn[i]) and numpy.isfinite(err[i]): try: work1 = numpy.append(work1,outdata[n]) work2 = numpy.append(work2,outerr[n]) n += 1 except: pass else: work1 = numpy.append(work1,numpy.nan) work2 = numpy.append(work2,numpy.nan) # history keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) # write output file try: col1 = pyfits.Column(name='DETSAP_FLUX',format='E13.7',array=work1) col2 = pyfits.Column(name='DETSAP_FLUX_ERR',format='E13.7',array=work2) cols = instr[1].data.columns + col1 + col2 instr[1] = pyfits.new_table(cols,header=instr[1].header) instr.writeto(outfile) except ValueError: try: instr[1].data.field('DETSAP_FLUX')[:] = work1 instr[1].data.field('DETSAP_FLUX_ERR')[:] = work2 instr.writeto(outfile) except: message = 'ERROR -- KEPFLATTEN: cannot add DETSAP_FLUX data to FITS file' status = kepmsg.err(logfile,message,verbose) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPFLATTEN completed at' else: message = '\nKEPFLATTEN aborted at' kepmsg.clock(message,logfile,verbose)
def kepsmooth( infile, outfile, datacol, function, fscale, plot, plotlab, clobber, verbose, logfile, status, cmdLine=False ): ## startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 18 ysize = 6 lcolor = "#0000ff" lwidth = 1.0 fcolor = "#ffff00" falpha = 0.2 ## log the call hashline = "----------------------------------------------------------------------------" kepmsg.log(logfile, hashline, verbose) call = "KEPSMOOTH -- " call += "infile=" + infile + " " call += "outfile=" + outfile + " " call += "datacol=" + str(datacol) + " " call += "function=" + str(function) + " " call += "fscale=" + str(fscale) + " " plotit = "n" if plot: plotit = "y" call += "plot=" + plotit + " " call += "plotlab=" + str(plotlab) + " " overwrite = "n" if clobber: overwrite = "y" call += "clobber=" + overwrite + " " chatter = "n" if verbose: chatter = "y" call += "verbose=" + chatter + " " call += "logfile=" + logfile kepmsg.log(logfile, call + "\n", verbose) ## start time kepmsg.clock("KEPSMOOTH started at", logfile, verbose) ## test log file logfile = kepmsg.test(logfile) ## clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = "ERROR -- KEPSMOOTH: " + outfile + " exists. Use clobber=yes" status = kepmsg.err(logfile, message, verbose) ## open input file if status == 0: instr, status = kepio.openfits(infile, "readonly", logfile, verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr, infile, logfile, verbose, status) if cadence == 0.0: tstart, tstop, ncad, cadence, status = kepio.cadence(instr, infile, logfile, verbose, status) if status == 0: try: work = instr[0].header["FILEVER"] cadenom = 1.0 except: cadenom = cadence ## fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr, file, logfile, verbose) ## read table structure if status == 0: table, status = kepio.readfitstab(infile, instr[1], logfile, verbose) # read time and flux columns if status == 0: barytime, status = kepio.readtimecol(infile, table, logfile, verbose) if status == 0: flux, status = kepio.readfitscol(infile, instr[1].data, datacol, logfile, verbose) # filter input data table if status == 0: try: nanclean = instr[1].header["NANCLEAN"] except: naxis2 = 0 for i in range(len(table.field(0))): if numpy.isfinite(barytime[i]) and numpy.isfinite(flux[i]) and flux[i] != 0.0: table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = "NaN cadences removed from data" status = kepkey.new("NANCLEAN", True, comment, instr[1], outfile, logfile, verbose) ## read table columns if status == 0: try: intime = instr[1].data.field("barytime") except: intime, status = kepio.readfitscol(infile, instr[1].data, "time", logfile, verbose) indata, status = kepio.readfitscol(infile, instr[1].data, datacol, logfile, verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom ## smooth data if status == 0: outdata = kepfunc.smooth(indata, fscale / (cadence / 86400), function) ## comment keyword in output file if status == 0: status = kepkey.history(call, instr[0], outfile, logfile, verbose) ## clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 xlab = "BJD $-$ %d" % intime0 ## clean up y-axis units if status == 0: pout = indata * 1.0 pout2 = outdata * 1.0 nrm = len(str(int(numpy.nanmax(pout)))) - 1 pout = pout / 10 ** nrm pout2 = pout2 / 10 ** nrm ylab = "10$^%d$ %s" % (nrm, re.sub("_", "-", plotlab)) ## data limits xmin = numpy.nanmin(ptime) xmax = numpy.nanmax(ptime) ymin = numpy.min(pout) ymax = numpy.nanmax(pout) xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime, [0], [ptime[0]]) ptime = append(ptime, [ptime[-1]]) pout = insert(pout, [0], [0.0]) pout = append(pout, 0.0) pout2 = insert(pout2, [0], [0.0]) pout2 = append(pout2, 0.0) ## plot light curve if status == 0 and plot: try: params = { "backend": "png", "axes.linewidth": 2.5, "axes.labelsize": labelsize, "axes.font": "sans-serif", "axes.fontweight": "bold", "text.fontsize": 12, "legend.fontsize": 12, "xtick.labelsize": ticksize, "ytick.labelsize": ticksize, } rcParams.update(params) except: print "ERROR -- KEPSMOOTH: install latex for scientific plotting" status = 1 if status == 0 and plot: pylab.figure(1, figsize=[xsize, ysize]) # delete any fossil plots in the matplotlib window pylab.clf() # position axes inside the plotting window ax = pylab.subplot(111) pylab.subplots_adjust(0.06, 0.1, 0.93, 0.88) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, "rotation", 90) pylab.plot(ptime[1:-1], pout[1:-1], color="#ff9900", linestyle="-", linewidth=lwidth) fill(ptime, pout, color=fcolor, linewidth=0.0, alpha=falpha) pylab.plot(ptime, pout2, color=lcolor, linestyle="-", linewidth=lwidth * 4.0) pylab.xlabel(xlab, {"color": "k"}) pylab.ylabel(ylab, {"color": "k"}) xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin >= 0.0: ylim(ymin - yr * 0.01, ymax + yr * 0.01) else: ylim(1.0e-10, ymax + yr * 0.01) pylab.grid() # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() ## write output file if status == 0: for i in range(len(outdata)): instr[1].data.field(datacol)[i] = outdata[i] instr.writeto(outfile) ## close input file if status == 0: status = kepio.closefits(instr, logfile, verbose) ## end time if status == 0: message = "KEPSMOOTH completed at" else: message = "\nKEPSMOOTH aborted at" kepmsg.clock(message, logfile, verbose)
def kepbinary(infile,outfile,datacol,m1,m2,r1,r2,period,bjd0,eccn,omega,inclination, c1,c2,c3,c4,albedo,depth,contamination,gamma,fitparams,eclipses,dopboost, tides,job,clobber,verbose,logfile,status): # startup parameters status = 0 labelsize = 24; ticksize = 16; xsize = 17; ysize = 7 lcolor = '#0000ff'; lwidth = 1.0; fcolor = '#ffff00'; falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPBINARY -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+datacol+' ' call += 'm1='+str(m1)+' ' call += 'm2='+str(m2)+' ' call += 'r1='+str(r1)+' ' call += 'r2='+str(r2)+' ' call += 'period='+str(period)+' ' call += 'bjd0='+str(bjd0)+' ' call += 'eccn='+str(eccn)+' ' call += 'omega='+str(omega)+' ' call += 'inclination='+str(inclination)+' ' call += 'c1='+str(c1)+' ' call += 'c2='+str(c2)+' ' call += 'c3='+str(c3)+' ' call += 'c4='+str(c4)+' ' call += 'albedo='+str(albedo)+' ' call += 'depth='+str(depth)+' ' call += 'contamination='+str(contamination)+' ' call += 'gamma='+str(gamma)+' ' call += 'fitparams='+str(fitparams)+' ' eclp = 'n' if (eclipses): eclp = 'y' call += 'eclipses='+eclp+ ' ' boost = 'n' if (dopboost): boost = 'y' call += 'dopboost='+boost+ ' ' distort = 'n' if (tides): distort = 'y' call += 'tides='+distort+ ' ' call += 'job='+str(job)+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPBINARY started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # check and format the list of fit parameters if status == 0 and job == 'fit': allParams = [m1,m2,r1,r2,period,bjd0,eccn,omega,inclination] allNames = ['m1','m2','r1','r2','period','bjd0','eccn','omega','inclination'] fitparams = re.sub('\|',',',fitparams.strip()) fitparams = re.sub('\.',',',fitparams.strip()) fitparams = re.sub(';',',',fitparams.strip()) fitparams = re.sub(':',',',fitparams.strip()) fitparams = re.sub('\s+',',',fitparams.strip()) fitparams, status = kepio.parselist(fitparams,logfile,verbose) for fitparam in fitparams: if fitparam.strip() not in allNames: message = 'ERROR -- KEPBINARY: unknown field in list of fit parameters' status = kepmsg.err(logfile,message,verbose) # clobber output file if status == 0: if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPBINARY: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # check the data column exists if status == 0: try: instr[1].data.field(datacol) except: message = 'ERROR -- KEPBINARY: ' + datacol + ' column does not exist in ' + infile + '[1]' status = kepmsg.err(logfile,message,verbose) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 try: for i in range(len(table.field(0))): if numpy.isfinite(table.field('barytime')[i]) and \ numpy.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] except: for i in range(len(table.field(0))): if numpy.isfinite(table.field('time')[i]) and \ numpy.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) # read table columns if status == 0: try: time = instr[1].data.field('barytime') except: time, status = kepio.readfitscol(infile,instr[1].data,'time',logfile,verbose) indata, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: time = time + bjdref indata = indata / cadenom # limb-darkening cofficients if status == 0: limbdark = numpy.array([c1,c2,c3,c4],dtype='float32') # time details for model if status == 0: npt = len(time) exptime = numpy.zeros((npt),dtype='float64') dtype = numpy.zeros((npt),dtype='int') for i in range(npt): try: exptime[i] = time[i+1] - time[i] except: exptime[i] = time[i] - time[i-1] # calculate binary model if status == 0: tmodel = kepsim.transitModel(1.0,m1,m2,r1,r2,period,inclination,bjd0,eccn,omega,depth, albedo,c1,c2,c3,c4,gamma,contamination,npt,time,exptime, dtype,eclipses,dopboost,tides) # re-normalize binary model to data if status == 0 and (job == 'overlay' or job == 'fit'): dmedian = numpy.median(indata) tmodel = tmodel / numpy.median(tmodel) * dmedian # define arrays of floating and frozen parameters if status == 0 and job =='fit': params = []; paramNames = []; arguments = []; argNames = [] for i in range(len(allNames)): if allNames[i] in fitparams: params.append(allParams[i]) paramNames.append(allNames[i]) else: arguments.append(allParams[i]) argNames.append(allNames[i]) params.append(dmedian) params = numpy.array(params,dtype='float32') # subtract model from data if status == 0 and job == 'fit': deltam = numpy.abs(indata - tmodel) # fit statistics if status == 0 and job == 'fit': aveDelta = numpy.sum(deltam) / npt chi2 = math.sqrt(numpy.sum((indata - tmodel) * (indata - tmodel) / (npt - len(params)))) # fit model to data using downhill simplex if status == 0 and job == 'fit': print '' print '%4s %11s %11s' % ('iter', 'delta', 'chi^2') print '----------------------------' print '%4d %.5E %.5E' % (0,aveDelta,chi2) bestFit = scipy.optimize.fmin(fitModel,params,args=(paramNames,dmedian,m1,m2,r1,r2,period,bjd0,eccn, omega,inclination,depth,albedo,c1,c2,c3,c4, gamma,contamination,npt,time,exptime,indata, dtype,eclipses,dopboost,tides),maxiter=1e4) # calculate best fit binary model if status == 0 and job == 'fit': print '' for i in range(len(paramNames)): if 'm1' in paramNames[i].lower(): m1 = bestFit[i] print ' M1 = %.3f Msun' % bestFit[i] elif 'm2' in paramNames[i].lower(): m2 = bestFit[i] print ' M2 = %.3f Msun' % bestFit[i] elif 'r1' in paramNames[i].lower(): r1 = bestFit[i] print ' R1 = %.4f Rsun' % bestFit[i] elif 'r2' in paramNames[i].lower(): r2 = bestFit[i] print ' R2 = %.4f Rsun' % bestFit[i] elif 'period' in paramNames[i].lower(): period = bestFit[i] elif 'bjd0' in paramNames[i].lower(): bjd0 = bestFit[i] print 'BJD0 = %.8f' % bestFit[i] elif 'eccn' in paramNames[i].lower(): eccn = bestFit[i] print ' e = %.3f' % bestFit[i] elif 'omega' in paramNames[i].lower(): omega = bestFit[i] print ' w = %.3f deg' % bestFit[i] elif 'inclination' in paramNames[i].lower(): inclination = bestFit[i] print ' i = %.3f deg' % bestFit[i] flux = bestFit[-1] print '' tmodel = kepsim.transitModel(flux,m1,m2,r1,r2,period,inclination,bjd0,eccn,omega,depth, albedo,c1,c2,c3,c4,gamma,contamination,npt,time,exptime, dtype,eclipses,dopboost,tides) # subtract model from data if status == 0: deltaMod = indata - tmodel # standard deviation of model if status == 0: stdDev = math.sqrt(numpy.sum((indata - tmodel) * (indata - tmodel)) / npt) # clean up x-axis unit if status == 0: time0 = float(int(tstart / 100) * 100.0) ptime = time - time0 xlab = 'BJD $-$ %d' % time0 # clean up y-axis units if status == 0: nrm = len(str(int(indata.max())))-1 pout = indata / 10**nrm pmod = tmodel / 10**nrm pres = deltaMod / stdDev if job == 'fit' or job == 'overlay': try: ylab1 = 'Flux (10$^%d$ e$^-$ s$^{-1}$)' % nrm ylab2 = 'Residual ($\sigma$)' except: ylab1 = 'Flux (10**%d e-/s)' % nrm ylab2 = 'Residual (sigma)' else: ylab1 = 'Normalized Flux' # dynamic range of model plot if status == 0 and job == 'model': xmin = ptime.min() xmax = ptime.max() ymin = tmodel.min() ymax = tmodel.max() # dynamic range of model/data overlay or fit if status == 0 and (job == 'overlay' or job == 'fit'): xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() tmin = pmod.min() tmax = pmod.max() ymin = numpy.array([ymin,tmin]).min() ymax = numpy.array([ymax,tmax]).max() rmin = pres.min() rmax = pres.max() # pad the dynamic range if status == 0: xr = (xmax - xmin) / 80 yr = (ymax - ymin) / 40 if job == 'overlay' or job == 'fit': rr = (rmax - rmin) / 40 # set up plot style if status == 0: labelsize = 24; ticksize = 16; xsize = 17; ysize = 7 lcolor = '#0000ff'; lwidth = 1.0; fcolor = '#ffff00'; falpha = 0.2 params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 24, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 16, 'ytick.labelsize': 16} pylab.rcParams.update(params) pylab.figure(figsize=[14,10]) pylab.clf() # main plot window ax = pylab.axes([0.05,0.3,0.94,0.68]) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) # plot model time series if status == 0 and job == 'model': pylab.plot(ptime,tmodel,color='#0000ff',linestyle='-',linewidth=1.0) ptime = numpy.insert(ptime,[0.0],ptime[0]) ptime = numpy.append(ptime,ptime[-1]) tmodel = numpy.insert(tmodel,[0.0],0.0) tmodel = numpy.append(tmodel,0.0) pylab.fill(ptime,tmodel,fc='#ffff00',linewidth=0.0,alpha=0.2) # plot data time series and best fit if status == 0 and (job == 'overlay' or job == 'fit'): pylab.plot(ptime,pout,color='#0000ff',linestyle='-',linewidth=1.0) ptime = numpy.insert(ptime,[0.0],ptime[0]) ptime = numpy.append(ptime,ptime[-1]) pout = numpy.insert(pout,[0],0.0) pout = numpy.append(pout,0.0) pylab.fill(ptime,pout,fc='#ffff00',linewidth=0.0,alpha=0.2) pylab.plot(ptime[1:-1],pmod,color='r',linestyle='-',linewidth=2.0) # ranges and labels if status == 0: pylab.xlim(xmin-xr,xmax+xr) pylab.ylim(ymin-yr,ymax+yr) pylab.xlabel(xlab, {'color' : 'k'}) pylab.ylabel(ylab1, {'color' : 'k'}) # residual plot window if status == 0 and (job == 'overlay' or job == 'fit'): ax = pylab.axes([0.05,0.07,0.94,0.23]) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) # plot residual time series if status == 0 and (job == 'overlay' or job == 'fit'): pylab.plot([ptime[0],ptime[-1]],[0.0,0.0],color='r',linestyle='--',linewidth=1.0) pylab.plot([ptime[0],ptime[-1]],[-1.0,-1.0],color='r',linestyle='--',linewidth=1.0) pylab.plot([ptime[0],ptime[-1]],[1.0,1.0],color='r',linestyle='--',linewidth=1.0) pylab.plot(ptime[1:-1],pres,color='#0000ff',linestyle='-',linewidth=1.0) pres = numpy.insert(pres,[0],rmin) pres = numpy.append(pres,rmin) pylab.fill(ptime,pres,fc='#ffff00',linewidth=0.0,alpha=0.2) # ranges and labels of residual time series if status == 0 and (job == 'overlay' or job == 'fit'): pylab.xlim(xmin-xr,xmax+xr) pylab.ylim(rmin-rr,rmax+rr) pylab.xlabel(xlab, {'color' : 'k'}) pylab.ylabel(ylab2, {'color' : 'k'}) # display the plot if status == 0: pylab.draw()
def kepoutlier(infile,outfile,datacol,nsig,stepsize,npoly,niter, operation,ranges,plot,plotfit,clobber,verbose,logfile,status, cmdLine=False): # startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 16 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPOUTLIER -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'nsig='+str(nsig)+' ' call += 'stepsize='+str(stepsize)+' ' call += 'npoly='+str(npoly)+' ' call += 'niter='+str(niter)+' ' call += 'operation='+str(operation)+' ' call += 'ranges='+str(ranges)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' plotf = 'n' if (plotfit): plotf = 'y' call += 'plotfit='+plotf+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPOUTLIER started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPOUTLIER: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 try: for i in range(len(table.field(0))): if numpy.isfinite(table.field('barytime')[i]) and \ numpy.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] except: for i in range(len(table.field(0))): if numpy.isfinite(table.field('time')[i]) and \ numpy.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) # read table columns if status == 0: try: intime = instr[1].data.field('barytime') + 2.4e6 except: intime, status = kepio.readfitscol(infile,instr[1].data,'time',logfile,verbose) indata, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom # time ranges for region to be corrected if status == 0: t1, t2, status = kepio.timeranges(ranges,logfile,verbose) cadencelis, status = kepstat.filterOnRange(intime,t1,t2) # find limits of each time step if status == 0: tstep1 = []; tstep2 = [] work = intime[0] while work < intime[-1]: tstep1.append(work) tstep2.append(array([work+stepsize,intime[-1]],dtype='float64').min()) work += stepsize # find cadence limits of each time step if status == 0: cstep1 = []; cstep2 = [] work1 = 0; work2 = 0 for i in range(len(intime)): if intime[i] >= intime[work1] and intime[i] < intime[work1] + stepsize: work2 = i else: cstep1.append(work1) cstep2.append(work2) work1 = i; work2 = i cstep1.append(work1) cstep2.append(work2) outdata = indata * 1.0 # comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = indata * 1.0 nrm = len(str(int(pout.max())))-1 pout = pout / 10**nrm ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) # plot light curve if status == 0 and plot: plotLatex = True try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: plotLatex = False if status == 0 and plot: pylab.figure(figsize=[xsize,ysize]) pylab.clf() # plot data ax = pylab.axes([0.06,0.1,0.93,0.87]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime,pout,color=lcolor,linestyle='-',linewidth=lwidth) fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) xlabel(xlab, {'color' : 'k'}) if not plotLatex: ylab = '10**%d electrons/sec' % nrm ylabel(ylab, {'color' : 'k'}) grid() # loop over each time step, fit data, determine rms if status == 0: masterfit = indata * 0.0 mastersigma = zeros(len(masterfit)) functype = 'poly' + str(npoly) for i in range(len(cstep1)): pinit = [indata[cstep1[i]:cstep2[i]+1].mean()] if npoly > 0: for j in range(npoly): pinit.append(0.0) pinit = array(pinit,dtype='float32') try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,intime[cstep1[i]:cstep2[i]+1]-intime[cstep1[i]], indata[cstep1[i]:cstep2[i]+1],None,nsig,nsig,niter,logfile, verbose) for j in range(len(coeffs)): masterfit[cstep1[i]:cstep2[i]+1] += coeffs[j] * \ (intime[cstep1[i]:cstep2[i]+1] - intime[cstep1[i]])**j for j in range(cstep1[i],cstep2[i]+1): mastersigma[j] = sigma if plotfit: pylab.plot(plotx+intime[cstep1[i]]-intime0,ploty / 10**nrm, 'g',lw='3') except: for j in range(cstep1[i],cstep2[i]+1): masterfit[j] = indata[j] mastersigma[j] = 1.0e10 message = 'WARNING -- KEPOUTLIER: could not fit range ' message += str(intime[cstep1[i]]) + '-' + str(intime[cstep2[i]]) kepmsg.warn(None,message) # reject outliers if status == 0: rejtime = []; rejdata = []; naxis2 = 0 for i in range(len(masterfit)): if abs(indata[i] - masterfit[i]) > nsig * mastersigma[i] and i in cadencelis: rejtime.append(intime[i]) rejdata.append(indata[i]) if operation == 'replace': [rnd] = kepstat.randarray([masterfit[i]],[mastersigma[i]]) table[naxis2] = table[i] table.field(datacol)[naxis2] = rnd naxis2 += 1 else: table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] rejtime = array(rejtime,dtype='float64') rejdata = array(rejdata,dtype='float32') pylab.plot(rejtime-intime0,rejdata / 10**nrm,'ro') # plot ranges xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) else: ylim(1.0e-10,ymax+yr*0.01) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # write output file if status == 0: instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) # end time if (status == 0): message = 'KEPOUTLIER completed at' else: message = '\nKEPOUTLIER aborted at' kepmsg.clock(message,logfile,verbose)
def kepfilter(infile,outfile,datacol,function,cutoff,passband,plot,plotlab, clobber,verbose,logfile,status,cmdLine=False): ## startup parameters status = 0 numpy.seterr(all="ignore") labelsize = 24 ticksize = 16 xsize = 16 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 ## log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPFILTER -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'function='+str(function)+' ' call += 'cutoff='+str(cutoff)+' ' call += 'passband='+str(passband)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' call += 'plotlab='+str(plotlab)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) ## start time kepmsg.clock('KEPFILTER started at',logfile,verbose) ## test log file logfile = kepmsg.test(logfile) ## clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPFILTER: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) ## open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence ## fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) ## read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # read time and flux columns if status == 0: barytime, status = kepio.readtimecol(infile,table,logfile,verbose) flux, status = kepio.readsapcol(infile,table,logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 for i in range(len(table.field(0))): if (numpy.isfinite(barytime[i]) and numpy.isfinite(flux[i]) and flux[i] != 0.0): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) ## read table columns if status == 0: intime, status = kepio.readtimecol(infile,instr[1].data,logfile,verbose) if status == 0: indata, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom ## define data sampling if status == 0: tr = 1.0 / (cadence / 86400) timescale = 1.0 / (cutoff / tr) ## define convolution function if status == 0: if function == 'boxcar': filtfunc = numpy.ones(numpy.ceil(timescale)) elif function == 'gauss': timescale /= 2 dx = numpy.ceil(timescale * 10 + 1) filtfunc = kepfunc.gauss() filtfunc = filtfunc([1.0,dx/2-1.0,timescale],linspace(0,dx-1,dx)) elif function == 'sinc': dx = numpy.ceil(timescale * 12 + 1) fx = linspace(0,dx-1,dx) fx = fx - dx / 2 + 0.5 fx /= timescale filtfunc = numpy.sinc(fx) filtfunc /= numpy.sum(filtfunc) ## pad time series at both ends with noise model if status == 0: ave, sigma = kepstat.stdev(indata[:len(filtfunc)]) padded = append(kepstat.randarray(np.ones(len(filtfunc)) * ave, np.ones(len(filtfunc)) * sigma), indata) ave, sigma = kepstat.stdev(indata[-len(filtfunc):]) padded = append(padded, kepstat.randarray(np.ones(len(filtfunc)) * ave, np.ones(len(filtfunc)) * sigma)) ## convolve data if status == 0: convolved = convolve(padded,filtfunc,'same') ## remove padding from the output array if status == 0: if function == 'boxcar': outdata = convolved[len(filtfunc):-len(filtfunc)] else: outdata = convolved[len(filtfunc):-len(filtfunc)] ## subtract low frequencies if status == 0 and passband == 'high': outmedian = median(outdata) outdata = indata - outdata + outmedian ## comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) ## clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 ## clean up y-axis units if status == 0: pout = indata * 1.0 pout2 = outdata * 1.0 nrm = len(str(int(numpy.nanmax(pout))))-1 pout = pout / 10**nrm pout2 = pout2 / 10**nrm ylab = '10$^%d$ %s' % (nrm, plotlab) ## data limits xmin = ptime.min() xmax = ptime.max() ymin = numpy.nanmin(pout) ymax = numpy.nanmax(pout) xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) pout2 = insert(pout2,[0],[0.0]) pout2 = append(pout2,0.0) ## plot light curve if status == 0 and plot: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: print 'ERROR -- KEPFILTER: install latex for scientific plotting' status = 1 if status == 0 and plot: pylab.figure(figsize=[xsize,ysize]) pylab.clf() ## plot filtered data ax = pylab.axes([0.06,0.1,0.93,0.87]) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime,pout,color='#ff9900',linestyle='-',linewidth=lwidth) fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) if passband == 'low': pylab.plot(ptime[1:-1],pout2[1:-1],color=lcolor,linestyle='-',linewidth=lwidth) else: pylab.plot(ptime,pout2,color=lcolor,linestyle='-',linewidth=lwidth) fill(ptime,pout2,color=lcolor,linewidth=0.0,alpha=falpha) xlabel(xlab, {'color' : 'k'}) ylabel(ylab, {'color' : 'k'}) xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) else: ylim(1.0e-10,ymax+yr*0.01) pylab.grid() # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() ## write output file if status == 0: for i in range(len(outdata)): instr[1].data.field(datacol)[i] = outdata[i] instr.writeto(outfile) ## close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPFILTER completed at' else: message = '\nKEPFILTER aborted at' kepmsg.clock(message,logfile,verbose)
def kepsmooth(infile,outfile,datacol,function,fscale,plot,plotlab, clobber,verbose,logfile,status, cmdLine=False): ## startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 18 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 ## log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPSMOOTH -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'function='+str(function)+' ' call += 'fscale='+str(fscale)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' call += 'plotlab='+str(plotlab)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) ## start time kepmsg.clock('KEPSMOOTH started at',logfile,verbose) ## test log file logfile = kepmsg.test(logfile) ## clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPSMOOTH: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) ## open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if cadence == 0.0: tstart, tstop, ncad, cadence, status = kepio.cadence(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence ## fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) ## read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # read time and flux columns if status == 0: barytime, status = kepio.readtimecol(infile,table,logfile,verbose) if status == 0: flux, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 for i in range(len(table.field(0))): if (numpy.isfinite(barytime[i]) and numpy.isfinite(flux[i]) and flux[i] != 0.0): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) ## read table columns if status == 0: try: intime = instr[1].data.field('barytime') except: intime, status = kepio.readfitscol(infile,instr[1].data,'time',logfile,verbose) indata, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom ## smooth data if status == 0: outdata = kepfunc.smooth(indata,fscale/(cadence/86400),function) ## comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) ## clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 ## clean up y-axis units if status == 0: pout = indata * 1.0 pout2 = outdata * 1.0 nrm = len(str(int(numpy.nanmax(pout))))-1 pout = pout / 10**nrm pout2 = pout2 / 10**nrm ylab = '10$^%d$ %s' % (nrm, re.sub('_','-',plotlab)) ## data limits xmin = numpy.nanmin(ptime) xmax = numpy.nanmax(ptime) ymin = numpy.min(pout) ymax = numpy.nanmax(pout) xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) pout2 = insert(pout2,[0],[0.0]) pout2 = append(pout2,0.0) ## plot light curve if status == 0 and plot: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: print('ERROR -- KEPSMOOTH: install latex for scientific plotting') status = 1 if status == 0 and plot: pylab.figure(1,figsize=[xsize,ysize]) # delete any fossil plots in the matplotlib window pylab.clf() # position axes inside the plotting window ax = pylab.subplot(111) pylab.subplots_adjust(0.06,0.1,0.93,0.88) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, 'rotation', 90) pylab.plot(ptime[1:-1],pout[1:-1],color='#ff9900',linestyle='-',linewidth=lwidth) fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) pylab.plot(ptime,pout2,color=lcolor,linestyle='-',linewidth=lwidth*4.0) pylab.xlabel(xlab, {'color' : 'k'}) pylab.ylabel(ylab, {'color' : 'k'}) xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) else: ylim(1.0e-10,ymax+yr*0.01) pylab.grid() # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() ## write output file if status == 0: for i in range(len(outdata)): instr[1].data.field(datacol)[i] = outdata[i] instr.writeto(outfile) ## close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPSMOOTH completed at' else: message = '\nKEPSMOOTH aborted at' kepmsg.clock(message,logfile,verbose)
def kepregr(infile, outfile, datacol, kmethod, kneighb, plot, plotlab, clobber, verbose, logfile, status): """ Perform a k-nearest neighbor regression analysis. """ ## startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 16 ysize = 6 lcolor = '#47AE10' lwidth = 1.0 fcolor = '#9AFF9A' falpha = 0.3 ## log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPREGR -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'datacol=' + str(datacol) + ' ' call += 'kmethod=' + str(kmethod) + ' ' call += 'kneighb=' + str(kneighb) + ' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot=' + plotit + ' ' call += 'plotlab=' + str(plotlab) + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) ## start time kepmsg.clock('KEPREGR started at', logfile, verbose) ## test log file logfile = kepmsg.test(logfile) ## clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPREGR: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile, message, verbose) ## open input file if status == 0: instr, status = kepio.openfits(infile, 'readonly', logfile, verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys( instr, infile, logfile, verbose, status) if cadence == 0.0: tstart, tstop, ncad, cadence, status = kepio.cadence( instr, infile, logfile, verbose, status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence ## fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr, file, logfile, verbose) ## read table structure if status == 0: table, status = kepio.readfitstab(infile, instr[1], logfile, verbose) # read time and flux columns if status == 0: barytime, status = kepio.readtimecol(infile, table, logfile, verbose) if status == 0: flux, status = kepio.readfitscol(infile, instr[1].data, datacol, logfile, verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 for i in range(len(table.field(0))): if (numpy.isfinite(barytime[i]) and numpy.isfinite(flux[i]) and flux[i] != 0.0): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN', True, comment, instr[1], outfile, logfile, verbose) ## read table columns if status == 0: try: intime = instr[1].data.field('barytime') except: intime, status = kepio.readfitscol(infile, instr[1].data, 'time', logfile, verbose) indata, status = kepio.readfitscol(infile, instr[1].data, datacol, logfile, verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom if status == 0: outdata = knn_predict(intime, indata, kmethod, kneighb) ## comment keyword in output file if status == 0: status = kepkey.history(call, instr[0], outfile, logfile, verbose) ## clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 # print ptime,intime,intime0 xlab = 'BJD $-$ %d' % intime0 ## clean up y-axis units if status == 0: pout = indata * 1.0 pout2 = outdata * 1.0 nrm = len(str(int(numpy.nanmax(pout)))) - 1 pout = pout / 10**nrm pout2 = pout2 / 10**nrm ylab = '10$^%d$ %s' % (nrm, plotlab) ## data limits xmin = numpy.nanmin(ptime) xmax = numpy.nanmax(ptime) ymin = numpy.min(pout) ymax = numpy.nanmax(pout) xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime, [0], [ptime[0]]) ptime = append(ptime, [ptime[-1]]) pout = insert(pout, [0], [0.0]) pout = append(pout, 0.0) pout2 = insert(pout2, [0], [0.0]) pout2 = append(pout2, 0.0) ## plot light curve if status == 0 and plot: try: params = { 'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize } rcParams.update(params) except: print('ERROR -- KEPREGR: install latex for scientific plotting') status = 1 if status == 0 and plot: pylab.figure(1, figsize=[xsize, ysize]) ## plot regression data ax = pylab.axes([0.06, 0.1, 0.93, 0.87]) pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) # pylab.plot(ptime,pout,color='#ff9900',linestyle='-',linewidth=lwidth) pylab.scatter(ptime, pout, color='#214CAE', s=5) fill(ptime, pout, color=fcolor, linewidth=0.0, alpha=falpha) pylab.plot(ptime[kneighb:-kneighb], pout2[kneighb:-kneighb], color=lcolor, linestyle='-', linewidth=lwidth * 2.0) xlabel(xlab, {'color': 'k'}) ylabel(ylab, {'color': 'k'}) xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin >= 0.0: ylim(ymin - yr * 0.01, ymax + yr * 0.01) else: ylim(1.0e-10, ymax + yr * 0.01) pylab.grid() pylab.draw() pylab.savefig(re.sub('\.\S+', '.png', outfile), dpi=100) ## write output file if status == 0: for i in range(len(outdata)): instr[1].data.field(datacol)[i] = outdata[i] instr.writeto(outfile) ## close input file if status == 0: status = kepio.closefits(instr, logfile, verbose) ## end time if (status == 0): message = 'KEPREGR completed at' else: message = '\nKEPREGR aborted at' kepmsg.clock(message, logfile, verbose)
def keptransitmodel(inputfile, datacol, errorcol, period_d, rprs, T0, Ecc, ars, inc, omega, LDparams, sec, norm=False, verbose=0, logfile='logfile.dat', status=0, cmdLine=False): #write to a logfile hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPTRANSIT -- ' call += 'inputfile=' + inputfile + ' ' call += 'datacol=' + str(datacol) + ' ' call += 'errorcol=' + str(errorcol) + ' ' call += 'period_d=' + str(period_d) + ' ' call += 'rprs=' + str(rprs) + ' ' call += 'T0=' + str(T0) + ' ' call += 'Ecc=' + str(Ecc) + ' ' call += 'ars=' + str(ars) + ' ' call += 'inc=' + str(inc) + ' ' call += 'omega=' + str(omega) + ' ' call += 'LDparams=' + str(LDparams) + ' ' call += 'sec=' + str(sec) + ' ' #to finish # open input file if status == 0: instr, status = kepio.openfits(inputfile, 'readonly', logfile, verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys( instr, inputfile, logfile, verbose, status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr, file, logfile, verbose) # read table structure if status == 0: table, status = kepio.readfitstab(inputfile, instr[1], logfile, verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 try: for i in range(len(table.field(0))): if np.isfinite(table.field('barytime')[i]) and \ np.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] except: for i in range(len(table.field(0))): if np.isfinite(table.field('time')[i]) and \ np.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] # comment = 'NaN cadences removed from data' # status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) # read table columns if status == 0: try: intime = instr[1].data.field('barytime') + 2.4e6 except: intime, status = kepio.readfitscol(inputfile, instr[1].data, 'time', logfile, verbose) indata, status = kepio.readfitscol(inputfile, instr[1].data, datacol, logfile, verbose) inerr, status = kepio.readfitscol(inputfile, instr[1].data, errorcol, logfile, verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom inerr = inerr / cadenom if status == 0 and norm: #first remove outliers before normalizing threesig = 3. * np.std(indata) mask = np.logical_and(indata < indata + threesig, indata > indata - threesig) #now normalize indata = indata / np.median(indata[mask]) if status == 0: #need to check if LD params are sensible and in right format LDparams = [float(i) for i in LDparams.split()] inc = inc * np.pi / 180. if status == 0: modelfit = tmod.lightcurve(intime, period_d, rprs, T0, Ecc, ars, inc, omega, LDparams, sec) if status == 0: phi, fluxfold, modelfold, errorfold, phiNotFold = fold_data( intime, modelfit, indata, inerr, period_d, T0) if status == 0: do_plot(intime, modelfit, indata, inerr, period_d, T0, cmdLine)
def kepstddev(infile,outfile,datacol,timescale,clobber,verbose,logfile,status,cmdLine=False): # startup parameters status = 0 labelsize = 44 ticksize = 36 xsize = 16 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPSTDDEV -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'timescale='+str(timescale)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPSTDDEV started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPSTDDEV: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # filter input data table if status == 0: work1 = numpy.array([table.field('time'), table.field(datacol)]) work1 = numpy.rot90(work1,3) work1 = work1[~numpy.isnan(work1).any(1)] # read table columns if status == 0: intime = work1[:,1] + bjdref indata = work1[:,0] # calculate STDDEV in units of ppm if status == 0: stddev = running_frac_std(intime,indata,timescale/24) * 1.0e6 astddev = numpy.std(indata) * 1.0e6 cdpp = stddev / sqrt(timescale * 3600.0 / cadence) # filter cdpp if status == 0: for i in range(len(cdpp)): if cdpp[i] > median(cdpp) * 10.0: cdpp[i] = cdpp[i-1] # calculate median STDDEV if status == 0: medcdpp = ones((len(cdpp)),dtype='float32') * median(cdpp[:]) # print '\nMedian %.1fhr standard deviation = %d ppm' % (timescale, median(stddev[:])) print('\nStandard deviation = %d ppm' % astddev) # calculate median STDDEV if status == 0: medcdpp = ones((len(cdpp)),dtype='float32') * median(cdpp[:]) print('Median %.1fhr CDPP = %d ppm' % (timescale, median(cdpp[:]))) # calculate RMS STDDEV if status == 0: rms, status = kepstat.rms(cdpp,zeros(len(stddev)),logfile,verbose) rmscdpp = ones((len(cdpp)),dtype='float32') * rms print(' RMS %.1fhr CDPP = %d ppm\n' % (timescale, rms)) # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = copy(cdpp) nrm = math.ceil(math.log10(median(cdpp))) - 1.0 # pout = pout / 10**nrm # ylab = '%.1fhr $\sigma$ (10$^%d$ ppm)' % (timescale,nrm) ylab = '%.1fhr $\sigma$ (ppm)' % timescale # data limits xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) # plot style if status == 0: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 36, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 32, 'ytick.labelsize': 36} pylab.rcParams.update(params) except: pass # define size of plot on monitor screen pylab.figure(figsize=[xsize,ysize]) # delete any fossil plots in the matplotlib window pylab.clf() # position first axes inside the plotting window ax = pylab.axes([0.07,0.15,0.92,0.83]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) ax.yaxis.set_major_locator(MaxNLocator(5)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90,fontsize=36) # plot flux vs time ltime = array([],dtype='float64') ldata = array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for i in range(1,len(ptime)-1): dt = ptime[i] - ptime[i-1] if dt < work1: ltime = append(ltime,ptime[i]) ldata = append(ldata,pout[i]) else: pylab.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0) ltime = array([],dtype='float64') ldata = array([],dtype='float32') pylab.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0) # plot the fill color below data time series, with no data gaps pylab.fill(ptime,pout,fc='#ffff00',linewidth=0.0,alpha=0.2) # plot median CDPP # pylab.plot(intime - intime0,medcdpp / 10**nrm,color='r',linestyle='-',linewidth=2.0) # pylab.plot(intime - intime0,medcdpp,color='r',linestyle='-',linewidth=2.0) # plot RMS CDPP # pylab.plot(intime - intime0,rmscdpp / 10**nrm,color='r',linestyle='--',linewidth=2.0) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin - yr * 0.01 <= 0.0: pylab.ylim(1.0e-10, ymax + yr * 0.01) else: pylab.ylim(ymin - yr * 0.01, ymax + yr * 0.01) # plot labels pylab.xlabel(xlab, {'color' : 'k'}) pylab.ylabel(ylab, {'color' : 'k'}) # make grid on plot pylab.grid() # render plot if status == 0: if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # add NaNs back into data if status == 0: n = 0 work1 = array([],dtype='float32') instr, status = kepio.openfits(infile,'readonly',logfile,verbose) table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) for i in range(len(table.field(0))): if isfinite(table.field('time')[i]) and isfinite(table.field(datacol)[i]): work1 = append(work1,cdpp[n]) n += 1 else: work1 = append(work1,nan) # write output file if status == 0: status = kepkey.new('MCDPP%d' % (timescale * 10.0),medcdpp[0], 'Median %.1fhr CDPP (ppm)' % timescale, instr[1],outfile,logfile,verbose) status = kepkey.new('RCDPP%d' % (timescale * 10.0),rmscdpp[0], 'RMS %.1fhr CDPP (ppm)' % timescale, instr[1],outfile,logfile,verbose) colname = 'CDPP_%d' % (timescale * 10) col1 = pyfits.Column(name=colname,format='E13.7',array=work1) cols = instr[1].data.columns + col1 instr[1] = pyfits.new_table(cols,header=instr[1].header) instr.writeto(outfile) # comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) # close FITS if status == 0: status = kepio.closefits(instr,logfile,verbose) # end time if (status == 0): message = 'KEPSTDDEV completed at' else: message = '\nKEPSTDDEV aborted at' kepmsg.clock(message,logfile,verbose)
def kepsff(infile, outfile, datacol, cenmethod, stepsize, npoly_cxcy, sigma_cxcy, npoly_ardx, npoly_dsdt, sigma_dsdt, npoly_arfl, sigma_arfl, plotres, clobber, verbose, logfile, status, cmdLine=False): # startup parameters status = 0 labelsize = 16 ticksize = 14 xsize = 20 ysize = 8 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPSFF -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'datacol=' + datacol + ' ' call += 'cenmethod=' + cenmethod + ' ' call += 'stepsize=' + str(stepsize) + ' ' call += 'npoly_cxcy=' + str(npoly_cxcy) + ' ' call += 'sigma_cxcy=' + str(sigma_cxcy) + ' ' call += 'npoly_ardx=' + str(npoly_ardx) + ' ' call += 'npoly_dsdt=' + str(npoly_dsdt) + ' ' call += 'sigma_dsdt=' + str(sigma_dsdt) + ' ' call += 'npoly_arfl=' + str(npoly_arfl) + ' ' call += 'sigma_arfl=' + str(sigma_arfl) + ' ' savep = 'n' if (plotres): savep = 'y' call += 'plotres=' + savep + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPSFF started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPSFF: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile, message, verbose) # open input file if status == 0: instr, status = kepio.openfits(infile, 'readonly', logfile, verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys( instr, infile, logfile, verbose, status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr, file, logfile, verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile, instr[1], logfile, verbose) # determine sequence of windows in time if status == 0: frametim = instr[1].header['FRAMETIM'] num_frm = instr[1].header['NUM_FRM'] exptime = frametim * num_frm / 86400 tstart = table.field('TIME')[0] tstop = table.field('TIME')[-1] winedge = arange(tstart, tstop, stepsize) if tstop > winedge[-1] + stepsize / 2: winedge = append(winedge, tstop) else: winedge[-1] = tstop winedge = (winedge - tstart) / exptime winedge = winedge.astype(int) if len(table.field('TIME')) > winedge[-1] + 1: winedge = append(winedge, len(table.field('TIME'))) elif len(table.field('TIME')) < winedge[-1]: winedge[-1] = len(table.field('TIME')) # step through the time windows if status == 0: for iw in range(1, len(winedge)): t1 = winedge[iw - 1] t2 = winedge[iw] # filter input data table work1 = numpy.array([ table.field('TIME')[t1:t2], table.field('CADENCENO')[t1:t2], table.field(datacol)[t1:t2], table.field('MOM_CENTR1')[t1:t2], table.field('MOM_CENTR2')[t1:t2], table.field('PSF_CENTR1')[t1:t2], table.field('PSF_CENTR2')[t1:t2], table.field('SAP_QUALITY')[t1:t2] ], 'float64') work1 = numpy.rot90(work1, 3) work2 = work1[~numpy.isnan(work1).any(1)] work2 = work2[(work2[:, 0] == 0.0) | (work2[:, 0] > 1e5)] # assign table columns intime = work2[:, 7] + bjdref cadenceno = work2[:, 6].astype(int) indata = work2[:, 5] mom_centr1 = work2[:, 4] mom_centr2 = work2[:, 3] psf_centr1 = work2[:, 2] psf_centr2 = work2[:, 1] sap_quality = work2[:, 0] if cenmethod == 'moments': centr1 = copy(mom_centr1) centr2 = copy(mom_centr2) else: centr1 = copy(psf_centr1) centr2 = copy(psf_centr2) # fit centroid data with low-order polynomial cfit = zeros((len(centr2))) csig = zeros((len(centr2))) functype = 'poly' + str(npoly_cxcy) pinit = array([nanmean(centr2)]) if npoly_cxcy > 0: for j in range(npoly_cxcy): pinit = append(pinit, 0.0) try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,centr1,centr2,None,sigma_cxcy,sigma_cxcy,10,logfile,verbose) for j in range(len(coeffs)): cfit += coeffs[j] * numpy.power(centr1, j) csig[:] = sigma except: message = 'ERROR -- KEPSFF: could not fit centroid data with polynomial. There are no data points within the range of input rows %d - %d. Either increase the stepsize (with an appreciation of the effects on light curve quality this will have!), or better yet - cut the timeseries up to remove large gaps in the input light curve using kepclip.' % ( t1, t2) status = kepmsg.err(logfile, message, verbose) # sys.exit('') os._exit(1) # reject outliers time_good = array([], 'float64') centr1_good = array([], 'float32') centr2_good = array([], 'float32') flux_good = array([], 'float32') cad_good = array([], 'int') for i in range(len(cfit)): if abs(centr2[i] - cfit[i]) < sigma_cxcy * csig[i]: time_good = append(time_good, intime[i]) centr1_good = append(centr1_good, centr1[i]) centr2_good = append(centr2_good, centr2[i]) flux_good = append(flux_good, indata[i]) cad_good = append(cad_good, cadenceno[i]) # covariance matrix for centroid time series centr = concatenate([[centr1_good] - mean(centr1_good), [centr2_good] - mean(centr2_good)]) covar = cov(centr) # eigenvector eigenvalues of covariance matrix [eval, evec] = numpy.linalg.eigh(covar) ex = arange(-10.0, 10.0, 0.1) epar = evec[1, 1] / evec[0, 1] * ex enor = evec[1, 0] / evec[0, 0] * ex ex = ex + mean(centr1) epar = epar + mean(centr2_good) enor = enor + mean(centr2_good) # rotate centroid data centr_rot = dot(evec.T, centr) # fit polynomial to rotated centroids rfit = zeros((len(centr2))) rsig = zeros((len(centr2))) functype = 'poly' + str(npoly_ardx) pinit = array([nanmean(centr_rot[0, :])]) pinit = array([1.0]) if npoly_ardx > 0: for j in range(npoly_ardx): pinit = append(pinit, 0.0) try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,centr_rot[1,:],centr_rot[0,:],None,100.0,100.0,1, logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile, message, verbose) rx = linspace(nanmin(centr_rot[1, :]), nanmax(centr_rot[1, :]), 100) ry = zeros((len(rx))) for i in range(len(coeffs)): ry = ry + coeffs[i] * numpy.power(rx, i) # calculate arclength of centroids s = zeros((len(rx))) for i in range(1, len(s)): work3 = ((ry[i] - ry[i - 1]) / (rx[i] - rx[i - 1]))**2 s[i] = s[i - 1] + math.sqrt(1.0 + work3) * (rx[i] - rx[i - 1]) # fit arclength as a function of strongest eigenvector sfit = zeros((len(centr2))) ssig = zeros((len(centr2))) functype = 'poly' + str(npoly_ardx) pinit = array([nanmean(s)]) if npoly_ardx > 0: for j in range(npoly_ardx): pinit = append(pinit, 0.0) try: acoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,rx,s,None,100.0,100.0,100,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile, message, verbose) # correlate arclength with detrended flux t = copy(time_good) c = copy(cad_good) y = copy(flux_good) z = centr_rot[1, :] x = zeros((len(z))) for i in range(len(acoeffs)): x = x + acoeffs[i] * numpy.power(z, i) # calculate time derivative of arclength s dx = zeros((len(x))) for i in range(1, len(x)): dx[i] = (x[i] - x[i - 1]) / (t[i] - t[i - 1]) dx[0] = dx[1] # fit polynomial to derivative and flag outliers (thruster firings) dfit = zeros((len(dx))) dsig = zeros((len(dx))) functype = 'poly' + str(npoly_dsdt) pinit = array([nanmean(dx)]) if npoly_dsdt > 0: for j in range(npoly_dsdt): pinit = append(pinit, 0.0) try: dcoeffs, errors, covar, iiter, dsigma, chi2, dof, fit, dumx, dumy, status = \ kepfit.lsqclip(functype,pinit,t,dx,None,3.0,3.0,10,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile, message, verbose) for i in range(len(dcoeffs)): dfit = dfit + dcoeffs[i] * numpy.power(t, i) centr1_pnt = array([], 'float32') centr2_pnt = array([], 'float32') time_pnt = array([], 'float64') flux_pnt = array([], 'float32') dx_pnt = array([], 'float32') s_pnt = array([], 'float32') time_thr = array([], 'float64') flux_thr = array([], 'float32') dx_thr = array([], 'float32') thr_cadence = [] for i in range(len(t)): if dx[i] < dfit[i] + sigma_dsdt * dsigma and dx[ i] > dfit[i] - sigma_dsdt * dsigma: time_pnt = append(time_pnt, time_good[i]) flux_pnt = append(flux_pnt, flux_good[i]) dx_pnt = append(dx_pnt, dx[i]) s_pnt = append(s_pnt, x[i]) centr1_pnt = append(centr1_pnt, centr1_good[i]) centr2_pnt = append(centr2_pnt, centr2_good[i]) else: time_thr = append(time_thr, time_good[i]) flux_thr = append(flux_thr, flux_good[i]) dx_thr = append(dx_thr, dx[i]) thr_cadence.append(cad_good[i]) # fit arclength-flux correlation cfit = zeros((len(time_pnt))) csig = zeros((len(time_pnt))) functype = 'poly' + str(npoly_arfl) pinit = array([nanmean(flux_pnt)]) if npoly_arfl > 0: for j in range(npoly_arfl): pinit = append(pinit, 0.0) try: ccoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plx, ply, status = \ kepfit.lsqclip(functype,pinit,s_pnt,flux_pnt,None,sigma_arfl,sigma_arfl,100,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile, message, verbose) # correction factors for unfiltered data centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)]) centr_rot = dot(evec.T, centr) yy = copy(indata) zz = centr_rot[1, :] xx = zeros((len(zz))) cfac = zeros((len(zz))) for i in range(len(acoeffs)): xx = xx + acoeffs[i] * numpy.power(zz, i) for i in range(len(ccoeffs)): cfac = cfac + ccoeffs[i] * numpy.power(xx, i) # apply correction to flux time-series out_detsap = indata / cfac # split time-series data for plotting tim_gd = array([], 'float32') flx_gd = array([], 'float32') tim_bd = array([], 'float32') flx_bd = array([], 'float32') for i in range(len(indata)): if intime[i] in time_pnt: tim_gd = append(tim_gd, intime[i]) flx_gd = append(flx_gd, out_detsap[i]) else: tim_bd = append(tim_bd, intime[i]) flx_bd = append(flx_bd, out_detsap[i]) # plot style and size status = kepplot.define(labelsize, ticksize, logfile, verbose) pylab.figure(figsize=[xsize, ysize]) pylab.clf() # plot x-centroid vs y-centroid ax = kepplot.location([0.04, 0.57, 0.16, 0.41]) # plot location px = copy(centr1) # clean-up x-axis units py = copy(centr2) # clean-up y-axis units pxmin = px.min() pxmax = px.max() pymin = py.min() pymax = py.max() pxr = pxmax - pxmin pyr = pymax - pymin pad = 0.05 if pxr > pyr: dely = (pxr - pyr) / 2 xlim(pxmin - pxr * pad, pxmax + pxr * pad) ylim(pymin - dely - pyr * pad, pymax + dely + pyr * pad) else: delx = (pyr - pxr) / 2 ylim(pymin - pyr * pad, pymax + pyr * pad) xlim(pxmin - delx - pxr * pad, pxmax + delx + pxr * pad) pylab.plot(px, py, color='#980000', markersize=5, marker='D', ls='') # plot data pylab.plot(centr1_good, centr2_good, color='#009900', markersize=5, marker='D', ls='') # plot data pylab.plot(ex, epar, color='k', ls='-') pylab.plot(ex, enor, color='k', ls='-') for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) kepplot.labels('CCD Column', 'CCD Row', 'k', 16) # labels pylab.grid() # grid lines # plot arclength fits vs drift along strongest eigenvector ax = kepplot.location([0.24, 0.57, 0.16, 0.41]) # plot location px = rx - rx[0] py = s - rx - (s[0] - rx[0]) # clean-up y-axis units py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose) # clean-up x-axis units kepplot.RangeOfPlot(px, py, 0.05, False) # data limits pylab.plot(px, py, color='#009900', markersize=5, marker='D', ls='') px = plotx - rx[0] # clean-up x-axis units py = ploty - plotx - (s[0] - rx[0]) # clean-up y-axis units py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose) # clean-up x-axis units pylab.plot(px, py, color='r', ls='-', lw=3) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) ylab = re.sub(' e\S+', ' pixels)', ylab) ylab = re.sub(' s\S+', '', ylab) ylab = re.sub('Flux', 's $-$ x\'', ylab) kepplot.labels('Linear Drift [x\'] (pixels)', ylab, 'k', 16) # labels pylab.grid() # grid lines # plot time derivative of arclength s ax = kepplot.location([0.04, 0.08, 0.16, 0.41]) # plot location px = copy(time_pnt) py = copy(dx_pnt) px, xlab, status = kepplot.cleanx(px, logfile, verbose) # clean-up x-axis units kepplot.RangeOfPlot(px, dx, 0.05, False) # data limits pylab.plot(px, py, color='#009900', markersize=5, marker='D', ls='') try: px = copy(time_thr) py = copy(dx_thr) px, xlab, status = kepplot.cleanx( px, logfile, verbose) # clean-up x-axis units pylab.plot(px, py, color='#980000', markersize=5, marker='D', ls='') except: pass px = copy(t) py = copy(dfit) px, xlab, status = kepplot.cleanx(px, logfile, verbose) # clean-up x-axis units pylab.plot(px, py, color='r', ls='-', lw=3) py = copy(dfit + sigma_dsdt * dsigma) pylab.plot(px, py, color='r', ls='--', lw=3) py = copy(dfit - sigma_dsdt * dsigma) pylab.plot(px, py, color='r', ls='--', lw=3) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) kepplot.labels(xlab, 'ds/dt (pixels day$^{-1}$)', 'k', 16) # labels pylab.grid() # grid lines # plot relation of arclength vs detrended flux ax = kepplot.location([0.24, 0.08, 0.16, 0.41]) # plot location px = copy(s_pnt) py = copy(flux_pnt) py, ylab, status = kepplot.cleany(py, 1.0, logfile, verbose) # clean-up x-axis units kepplot.RangeOfPlot(px, py, 0.05, False) # data limits pylab.plot(px, py, color='#009900', markersize=5, marker='D', ls='') pylab.plot(plx, ply, color='r', ls='-', lw=3) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) kepplot.labels('Arclength [s] (pixels)', ylab, 'k', 16) # labels pylab.grid() # grid lines # plot aperture photometry kepplot.location([0.44, 0.53, 0.55, 0.45]) # plot location px, xlab, status = kepplot.cleanx(intime, logfile, verbose) # clean-up x-axis units py, ylab, status = kepplot.cleany(indata, 1.0, logfile, verbose) # clean-up x-axis units kepplot.RangeOfPlot(px, py, 0.01, True) # data limits kepplot.plot1d(px, py, cadence, lcolor, lwidth, fcolor, falpha, True) # plot data kepplot.labels(' ', ylab, 'k', 16) # labels pylab.setp(pylab.gca(), xticklabels=[]) # remove x- or y-tick labels kepplot.labels(xlab, re.sub('Flux', 'Aperture Flux', ylab), 'k', 16) # labels pylab.grid() # grid lines # Plot corrected photometry kepplot.location([0.44, 0.08, 0.55, 0.45]) # plot location kepplot.RangeOfPlot(px, py, 0.01, True) # data limits px, xlab, status = kepplot.cleanx(tim_gd, logfile, verbose) # clean-up x-axis units py, ylab, status = kepplot.cleany(flx_gd, 1.0, logfile, verbose) # clean-up x-axis units kepplot.plot1d(px, py, cadence, lcolor, lwidth, fcolor, falpha, True) # plot data try: px, xlab, status = kepplot.cleanx( tim_bd, logfile, verbose) # clean-up x-axis units py = copy(flx_bd) pylab.plot(px, py, color='#980000', markersize=5, marker='D', ls='') except: pass kepplot.labels(xlab, re.sub('Flux', 'Corrected Flux', ylab), 'k', 16) # labels pylab.grid() # grid lines # render plot if plotres: kepplot.render(cmdLine) # save plot to file if plotres: pylab.savefig(re.sub('.fits', '_%d.png' % (iw + 1), outfile)) # correct fluxes within the output file intime = work1[:, 7] + bjdref cadenceno = work1[:, 6].astype(int) indata = work1[:, 5] mom_centr1 = work1[:, 4] mom_centr2 = work1[:, 3] psf_centr1 = work1[:, 2] psf_centr2 = work1[:, 1] centr1 = copy(mom_centr1) centr2 = copy(mom_centr2) centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)]) centr_rot = dot(evec.T, centr) yy = copy(indata) zz = centr_rot[1, :] xx = zeros((len(zz))) cfac = zeros((len(zz))) for i in range(len(acoeffs)): xx = xx + acoeffs[i] * numpy.power(zz, i) for i in range(len(ccoeffs)): cfac = cfac + ccoeffs[i] * numpy.power(xx, i) out_detsap = yy / cfac instr[1].data.field('SAP_FLUX')[t1:t2] /= cfac instr[1].data.field('PDCSAP_FLUX')[t1:t2] /= cfac try: instr[1].data.field('DETSAP_FLUX')[t1:t2] /= cfac except: pass # add quality flag to output file for thruster firings for i in range(len(intime)): if cadenceno[i] in thr_cadence: instr[1].data.field('SAP_QUALITY')[t1 + i] += 131072 # write output file if status == 0: instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr, logfile, verbose) # end time if (status == 0): message = 'KEPSFF completed at' else: message = '\nKEPSFF aborted at' kepmsg.clock(message, logfile, verbose)
def kepsff(infile,outfile,datacol,cenmethod,stepsize,npoly_cxcy,sigma_cxcy,npoly_ardx, npoly_dsdt,sigma_dsdt,npoly_arfl,sigma_arfl,plotres,clobber,verbose,logfile, status,cmdLine=False): # startup parameters status = 0 labelsize = 16 ticksize = 14 xsize = 20 ysize = 8 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPSFF -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+datacol+' ' call += 'cenmethod='+cenmethod+' ' call += 'stepsize='+str(stepsize)+' ' call += 'npoly_cxcy='+str(npoly_cxcy)+' ' call += 'sigma_cxcy='+str(sigma_cxcy)+' ' call += 'npoly_ardx='+str(npoly_ardx)+' ' call += 'npoly_dsdt='+str(npoly_dsdt)+' ' call += 'sigma_dsdt='+str(sigma_dsdt)+' ' call += 'npoly_arfl='+str(npoly_arfl)+' ' call += 'sigma_arfl='+str(sigma_arfl)+' ' savep = 'n' if (plotres): savep = 'y' call += 'plotres='+savep+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPSFF started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPSFF: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # determine sequence of windows in time if status == 0: frametim = instr[1].header['FRAMETIM'] num_frm = instr[1].header['NUM_FRM'] exptime = frametim * num_frm / 86400 tstart = table.field('TIME')[0] tstop = table.field('TIME')[-1] winedge = arange(tstart,tstop,stepsize) if tstop > winedge[-1] + stepsize / 2: winedge = append(winedge,tstop) else: winedge[-1] = tstop winedge = (winedge - tstart) / exptime winedge = winedge.astype(int) if len(table.field('TIME')) > winedge[-1] + 1: winedge = append(winedge,len(table.field('TIME'))) elif len(table.field('TIME')) < winedge[-1]: winedge[-1] = len(table.field('TIME')) # step through the time windows if status == 0: for iw in range(1,len(winedge)): t1 = winedge[iw-1] t2 = winedge[iw] # filter input data table work1 = numpy.array([table.field('TIME')[t1:t2], table.field('CADENCENO')[t1:t2], table.field(datacol)[t1:t2], table.field('MOM_CENTR1')[t1:t2], table.field('MOM_CENTR2')[t1:t2], table.field('PSF_CENTR1')[t1:t2], table.field('PSF_CENTR2')[t1:t2], table.field('SAP_QUALITY')[t1:t2]],'float64') work1 = numpy.rot90(work1,3) work2 = work1[~numpy.isnan(work1).any(1)] work2 = work2[(work2[:,0] == 0.0) | (work2[:,0] > 1e5)] # assign table columns intime = work2[:,7] + bjdref cadenceno = work2[:,6].astype(int) indata = work2[:,5] mom_centr1 = work2[:,4] mom_centr2 = work2[:,3] psf_centr1 = work2[:,2] psf_centr2 = work2[:,1] sap_quality = work2[:,0] if cenmethod == 'moments': centr1 = copy(mom_centr1) centr2 = copy(mom_centr2) else: centr1 = copy(psf_centr1) centr2 = copy(psf_centr2) # fit centroid data with low-order polynomial cfit = zeros((len(centr2))) csig = zeros((len(centr2))) functype = 'poly' + str(npoly_cxcy) pinit = array([nanmean(centr2)]) if npoly_cxcy > 0: for j in range(npoly_cxcy): pinit = append(pinit,0.0) try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,centr1,centr2,None,sigma_cxcy,sigma_cxcy,10,logfile,verbose) for j in range(len(coeffs)): cfit += coeffs[j] * numpy.power(centr1,j) csig[:] = sigma except: message = 'ERROR -- KEPSFF: could not fit centroid data with polynomial. There are no data points within the range of input rows %d - %d. Either increase the stepsize (with an appreciation of the effects on light curve quality this will have!), or better yet - cut the timeseries up to remove large gaps in the input light curve using kepclip.' % (t1,t2) status = kepmsg.err(logfile,message,verbose) # sys.exit('') os._exit(1) # reject outliers time_good = array([],'float64') centr1_good = array([],'float32') centr2_good = array([],'float32') flux_good = array([],'float32') cad_good = array([],'int') for i in range(len(cfit)): if abs(centr2[i] - cfit[i]) < sigma_cxcy * csig[i]: time_good = append(time_good,intime[i]) centr1_good = append(centr1_good,centr1[i]) centr2_good = append(centr2_good,centr2[i]) flux_good = append(flux_good,indata[i]) cad_good = append(cad_good,cadenceno[i]) # covariance matrix for centroid time series centr = concatenate([[centr1_good] - mean(centr1_good), [centr2_good] - mean(centr2_good)]) covar = cov(centr) # eigenvector eigenvalues of covariance matrix [eval, evec] = numpy.linalg.eigh(covar) ex = arange(-10.0,10.0,0.1) epar = evec[1,1] / evec[0,1] * ex enor = evec[1,0] / evec[0,0] * ex ex = ex + mean(centr1) epar = epar + mean(centr2_good) enor = enor + mean(centr2_good) # rotate centroid data centr_rot = dot(evec.T,centr) # fit polynomial to rotated centroids rfit = zeros((len(centr2))) rsig = zeros((len(centr2))) functype = 'poly' + str(npoly_ardx) pinit = array([nanmean(centr_rot[0,:])]) pinit = array([1.0]) if npoly_ardx > 0: for j in range(npoly_ardx): pinit = append(pinit,0.0) try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,centr_rot[1,:],centr_rot[0,:],None,100.0,100.0,1, logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile,message,verbose) rx = linspace(nanmin(centr_rot[1,:]),nanmax(centr_rot[1,:]),100) ry = zeros((len(rx))) for i in range(len(coeffs)): ry = ry + coeffs[i] * numpy.power(rx,i) # calculate arclength of centroids s = zeros((len(rx))) for i in range(1,len(s)): work3 = ((ry[i] - ry[i-1]) / (rx[i] - rx[i-1]))**2 s[i] = s[i-1] + math.sqrt(1.0 + work3) * (rx[i] - rx[i-1]) # fit arclength as a function of strongest eigenvector sfit = zeros((len(centr2))) ssig = zeros((len(centr2))) functype = 'poly' + str(npoly_ardx) pinit = array([nanmean(s)]) if npoly_ardx > 0: for j in range(npoly_ardx): pinit = append(pinit,0.0) try: acoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,rx,s,None,100.0,100.0,100,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile,message,verbose) # correlate arclength with detrended flux t = copy(time_good) c = copy(cad_good) y = copy(flux_good) z = centr_rot[1,:] x = zeros((len(z))) for i in range(len(acoeffs)): x = x + acoeffs[i] * numpy.power(z,i) # calculate time derivative of arclength s dx = zeros((len(x))) for i in range(1,len(x)): dx[i] = (x[i] - x[i-1]) / (t[i] - t[i-1]) dx[0] = dx[1] # fit polynomial to derivative and flag outliers (thruster firings) dfit = zeros((len(dx))) dsig = zeros((len(dx))) functype = 'poly' + str(npoly_dsdt) pinit = array([nanmean(dx)]) if npoly_dsdt > 0: for j in range(npoly_dsdt): pinit = append(pinit,0.0) try: dcoeffs, errors, covar, iiter, dsigma, chi2, dof, fit, dumx, dumy, status = \ kepfit.lsqclip(functype,pinit,t,dx,None,3.0,3.0,10,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile,message,verbose) for i in range(len(dcoeffs)): dfit = dfit + dcoeffs[i] * numpy.power(t,i) centr1_pnt = array([],'float32') centr2_pnt = array([],'float32') time_pnt = array([],'float64') flux_pnt = array([],'float32') dx_pnt = array([],'float32') s_pnt = array([],'float32') time_thr = array([],'float64') flux_thr = array([],'float32') dx_thr = array([],'float32') thr_cadence = [] for i in range(len(t)): if dx[i] < dfit[i] + sigma_dsdt * dsigma and dx[i] > dfit[i] - sigma_dsdt * dsigma: time_pnt = append(time_pnt,time_good[i]) flux_pnt = append(flux_pnt,flux_good[i]) dx_pnt = append(dx_pnt,dx[i]) s_pnt = append(s_pnt,x[i]) centr1_pnt = append(centr1_pnt,centr1_good[i]) centr2_pnt = append(centr2_pnt,centr2_good[i]) else: time_thr = append(time_thr,time_good[i]) flux_thr = append(flux_thr,flux_good[i]) dx_thr = append(dx_thr,dx[i]) thr_cadence.append(cad_good[i]) # fit arclength-flux correlation cfit = zeros((len(time_pnt))) csig = zeros((len(time_pnt))) functype = 'poly' + str(npoly_arfl) pinit = array([nanmean(flux_pnt)]) if npoly_arfl > 0: for j in range(npoly_arfl): pinit = append(pinit,0.0) try: ccoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plx, ply, status = \ kepfit.lsqclip(functype,pinit,s_pnt,flux_pnt,None,sigma_arfl,sigma_arfl,100,logfile,verbose) except: message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial' status = kepmsg.err(logfile,message,verbose) # correction factors for unfiltered data centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)]) centr_rot = dot(evec.T,centr) yy = copy(indata) zz = centr_rot[1,:] xx = zeros((len(zz))) cfac = zeros((len(zz))) for i in range(len(acoeffs)): xx = xx + acoeffs[i] * numpy.power(zz,i) for i in range(len(ccoeffs)): cfac = cfac + ccoeffs[i] * numpy.power(xx,i) # apply correction to flux time-series out_detsap = indata / cfac # split time-series data for plotting tim_gd = array([],'float32') flx_gd = array([],'float32') tim_bd = array([],'float32') flx_bd = array([],'float32') for i in range(len(indata)): if intime[i] in time_pnt: tim_gd = append(tim_gd,intime[i]) flx_gd = append(flx_gd,out_detsap[i]) else: tim_bd = append(tim_bd,intime[i]) flx_bd = append(flx_bd,out_detsap[i]) # plot style and size status = kepplot.define(labelsize,ticksize,logfile,verbose) pylab.figure(figsize=[xsize,ysize]) pylab.clf() # plot x-centroid vs y-centroid ax = kepplot.location([0.04,0.57,0.16,0.41]) # plot location px = copy(centr1) # clean-up x-axis units py = copy(centr2) # clean-up y-axis units pxmin = px.min() pxmax = px.max() pymin = py.min() pymax = py.max() pxr = pxmax - pxmin pyr = pymax - pymin pad = 0.05 if pxr > pyr: dely = (pxr - pyr) / 2 xlim(pxmin - pxr * pad, pxmax + pxr * pad) ylim(pymin - dely - pyr * pad, pymax + dely + pyr * pad) else: delx = (pyr - pxr) / 2 ylim(pymin - pyr * pad, pymax + pyr * pad) xlim(pxmin - delx - pxr * pad, pxmax + delx + pxr * pad) pylab.plot(px,py,color='#980000',markersize=5,marker='D',ls='') # plot data pylab.plot(centr1_good,centr2_good,color='#009900',markersize=5,marker='D',ls='') # plot data pylab.plot(ex,epar,color='k',ls='-') pylab.plot(ex,enor,color='k',ls='-') for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) kepplot.labels('CCD Column','CCD Row','k',16) # labels pylab.grid() # grid lines # plot arclength fits vs drift along strongest eigenvector ax = kepplot.location([0.24,0.57,0.16,0.41]) # plot location px = rx - rx[0] py = s - rx - (s[0] - rx[0]) # clean-up y-axis units py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose) # clean-up x-axis units kepplot.RangeOfPlot(px,py,0.05,False) # data limits pylab.plot(px,py,color='#009900',markersize=5,marker='D',ls='') px = plotx - rx[0] # clean-up x-axis units py = ploty-plotx - (s[0] - rx[0]) # clean-up y-axis units py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose) # clean-up x-axis units pylab.plot(px,py,color='r',ls='-',lw=3) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) ylab = re.sub(' e\S+',' pixels)',ylab) ylab = re.sub(' s\S+','',ylab) ylab = re.sub('Flux','s $-$ x\'',ylab) kepplot.labels('Linear Drift [x\'] (pixels)',ylab,'k',16) # labels pylab.grid() # grid lines # plot time derivative of arclength s ax = kepplot.location([0.04,0.08,0.16,0.41]) # plot location px = copy(time_pnt) py = copy(dx_pnt) px, xlab, status = kepplot.cleanx(px,logfile,verbose) # clean-up x-axis units kepplot.RangeOfPlot(px,dx,0.05,False) # data limits pylab.plot(px,py,color='#009900',markersize=5,marker='D',ls='') try: px = copy(time_thr) py = copy(dx_thr) px, xlab, status = kepplot.cleanx(px,logfile,verbose) # clean-up x-axis units pylab.plot(px,py,color='#980000',markersize=5,marker='D',ls='') except: pass px = copy(t) py = copy(dfit) px, xlab, status = kepplot.cleanx(px,logfile,verbose) # clean-up x-axis units pylab.plot(px,py,color='r',ls='-',lw=3) py = copy(dfit+sigma_dsdt*dsigma) pylab.plot(px,py,color='r',ls='--',lw=3) py = copy(dfit-sigma_dsdt*dsigma) pylab.plot(px,py,color='r',ls='--',lw=3) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) kepplot.labels(xlab,'ds/dt (pixels day$^{-1}$)','k',16) # labels pylab.grid() # grid lines # plot relation of arclength vs detrended flux ax = kepplot.location([0.24,0.08,0.16,0.41]) # plot location px = copy(s_pnt) py = copy(flux_pnt) py, ylab, status = kepplot.cleany(py,1.0,logfile,verbose) # clean-up x-axis units kepplot.RangeOfPlot(px,py,0.05,False) # data limits pylab.plot(px,py,color='#009900',markersize=5,marker='D',ls='') pylab.plot(plx,ply,color='r',ls='-',lw=3) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(14) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) kepplot.labels('Arclength [s] (pixels)',ylab,'k',16) # labels pylab.grid() # grid lines # plot aperture photometry kepplot.location([0.44,0.53,0.55,0.45]) # plot location px, xlab, status = kepplot.cleanx(intime,logfile,verbose) # clean-up x-axis units py, ylab, status = kepplot.cleany(indata,1.0,logfile,verbose) # clean-up x-axis units kepplot.RangeOfPlot(px,py,0.01,True) # data limits kepplot.plot1d(px,py,cadence,lcolor,lwidth,fcolor,falpha,True) # plot data kepplot.labels(' ',ylab,'k',16) # labels pylab.setp(pylab.gca(),xticklabels=[]) # remove x- or y-tick labels kepplot.labels(xlab,re.sub('Flux','Aperture Flux',ylab),'k',16) # labels pylab.grid() # grid lines # Plot corrected photometry kepplot.location([0.44,0.08,0.55,0.45]) # plot location kepplot.RangeOfPlot(px,py,0.01,True) # data limits px, xlab, status = kepplot.cleanx(tim_gd,logfile,verbose) # clean-up x-axis units py, ylab, status = kepplot.cleany(flx_gd,1.0,logfile,verbose) # clean-up x-axis units kepplot.plot1d(px,py,cadence,lcolor,lwidth,fcolor,falpha,True) # plot data try: px, xlab, status = kepplot.cleanx(tim_bd,logfile,verbose) # clean-up x-axis units py = copy(flx_bd) pylab.plot(px,py,color='#980000',markersize=5,marker='D',ls='') except: pass kepplot.labels(xlab,re.sub('Flux','Corrected Flux',ylab),'k',16) # labels pylab.grid() # grid lines # render plot if plotres: kepplot.render(cmdLine) # save plot to file if plotres: pylab.savefig(re.sub('.fits','_%d.png' % (iw + 1),outfile)) # correct fluxes within the output file intime = work1[:,7] + bjdref cadenceno = work1[:,6].astype(int) indata = work1[:,5] mom_centr1 = work1[:,4] mom_centr2 = work1[:,3] psf_centr1 = work1[:,2] psf_centr2 = work1[:,1] centr1 = copy(mom_centr1) centr2 = copy(mom_centr2) centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)]) centr_rot = dot(evec.T,centr) yy = copy(indata) zz = centr_rot[1,:] xx = zeros((len(zz))) cfac = zeros((len(zz))) for i in range(len(acoeffs)): xx = xx + acoeffs[i] * numpy.power(zz,i) for i in range(len(ccoeffs)): cfac = cfac + ccoeffs[i] * numpy.power(xx,i) out_detsap = yy / cfac instr[1].data.field('SAP_FLUX')[t1:t2] /= cfac instr[1].data.field('PDCSAP_FLUX')[t1:t2] /= cfac try: instr[1].data.field('DETSAP_FLUX')[t1:t2] /= cfac except: pass # add quality flag to output file for thruster firings for i in range(len(intime)): if cadenceno[i] in thr_cadence: instr[1].data.field('SAP_QUALITY')[t1+i] += 131072 # write output file if status == 0: instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) # end time if (status == 0): message = 'KEPSFF completed at' else: message = '\nKEPSFF aborted at' kepmsg.clock(message,logfile,verbose)
def kepconvert(infile,outfile,conversion,columns,baddata,clobber,verbose,logfile,status): # startup parameters status = 0 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPCONVERT -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'conversion='+conversion+' ' call += 'columns='+columns+ ' ' writebad = 'n' if (baddata): writebad = 'y' call += 'baddata='+writebad+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPCONVERT started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # data columns if status == 0: colnames = columns.strip().split(',') ncol = len(colnames) if ncol < 1: message = 'ERROR -- KEPCONVERT: no data columns specified' status = kepmsg.err(logfile,message,verbose) # input file exists if status == 0 and not kepio.fileexists(infile): message = 'ERROR -- KEPCONVERT: input file '+infile+' does not exist' status = kepmsg.err(logfile,message,verbose) # clobber output file if status == 0: if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPCONVERT: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open FITS input file if status == 0 and conversion == 'fits2asc': instr, status = kepio.openfits(infile,'readonly',logfile,verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) # read FITS table data if status == 0 and conversion == 'fits2asc': table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # check columns exist in FITS file if not baddata and status == 0 and conversion == 'fits2asc': try: qualcol = table.field('SAP_QUALITY') == 0 except: message = 'No SAP_QUALITY column in data, are you using an old FITS file?' status = kepmsg.err(logfile,message,verbose) if status == 0 and conversion == 'fits2asc': work = [] for colname in colnames: try: if colname.lower() == 'time': work.append(table.field(colname) + bjdref) else: work.append(table.field(colname)) except: message = 'ERROR -- KEPCONVERT: no column ' + colname + ' in ' + infile status = kepmsg.err(logfile,message,verbose) if not baddata: for i in range(len(work)): work[i] = work[i][qualcol] # close input file if status == 0 and conversion == 'fits2asc': status = kepio.closefits(instr,logfile,verbose) ## write output file if status == 0 and conversion == 'fits2asc': # table, status = kepio.openascii(outfile,'w',logfile,verbose) # for i in range(len(work[0])): # txt = '' # for j in range(len(work)): # if numpy.isfinite(work[j][i]): # txt += str(work[j][i]) + ' ' # txt = txt.strip() # if len(re.sub('\s+',',',txt).split(',')) == ncol: # table.write(txt + '\n') # status = kepio.closeascii(table,logfile,verbose) savetxt(outfile,array(work).T) ## open and read ASCII input file if status == 0 and conversion == 'asc2fits': table, status = kepio.openascii(infile,'r',logfile,verbose) ## organize ASCII table into arrays if status == 0 and conversion == 'asc2fits': work = [] for i in range(ncol): work.append([]) nline = 0 for line in table: line = line.strip() line = re.sub('\s+',',',line) line = re.sub('\|',',',line) line = re.sub(';',',',line) if '#' not in line: nline + 1 line = line.split(',') if len(line) == ncol: for i in range(len(line)): try: work[i].append(float(line[i])) except: message = 'ERROR --KEPCONVERT: ' + str(line[i]) + ' is not float' status = kepmsg.err(logfile,message,verbose) break else: message = 'ERROR --KEPCONVERT: ' + str(ncol) + ' columns required but ' message += str(len(line)) + ' columns supplied by ' + infile message += ' at line' + str(nline) status = kepmsg.err(logfile,message,verbose) break for i in range(ncol): work[i] = numpy.array(work[i],dtype='float64') ## timing keywords for output file if status == 0 and conversion == 'asc2fits': for i in range(ncol): if 'time' in colnames[i].lower(): if work[i][1] > 54000.0 and work[i][1] < 60000.0: work[i] += 2.4e6 # work[i] += 2.4553e6 tstart = work[i].min() tstop = work[i].max() lc_start = tstart lc_end = tstop if lc_start > 2.4e6: lc_start -= 2.4e6 if lc_end > 2.4e6: lc_end -= 2.4e6 dts = [] for j in range(1,len(work[i])): dts.append(work[i][j] - work[i][j-1]) dts = numpy.array(dts,dtype='float32') cadence = numpy.median(dts) if cadence * 86400.0 > 58.0 and cadence * 86400.0 < 61.0: obsmode = 'short cadence' elif cadence * 86400.0 > 1600.0 and cadence * 86400.0 < 2000.0: obsmode = 'long cadence' else: obsmode = 'unknown' ## Create the outfile primary extension if status == 0 and conversion == 'asc2fits': hdu0 = PrimaryHDU() try: hdu0.header.update('EXTNAME','PRIMARY','name of extension') hdu0.header.update('EXTVER',1.0,'extension version number') hdu0.header.update('ORIGIN','NASA/Ames','organization that generated this file') hdu0.header.update('DATE',time.asctime(time.localtime()),'file creation date') hdu0.header.update('CREATOR','kepconvert','SW version used to create this file') hdu0.header.update('PROCVER','None','processing script version') hdu0.header.update('FILEVER','2.0','file format version') hdu0.header.update('TIMVERSN','OGIP/93-003','OGIP memo number for file format') hdu0.header.update('TELESCOP','Kepler','telescope') hdu0.header.update('INSTRUME','Kepler photometer','detector type') hdu0.header.update('OBJECT','Unknown','string version of kepID') hdu0.header.update('KEPLERID','Unknown','unique Kepler target identifier') hdu0.header.update('CHANNEL','Unknown','CCD channel') hdu0.header.update('SKYGROUP','Unknown','roll-independent location of channel') hdu0.header.update('MODULE','Unknown','CCD module') hdu0.header.update('OUTPUT','Unknown','CCD output') hdu0.header.update('QUARTER','Unknown','mission quarter during which data was collected') hdu0.header.update('SEASON','Unknown','mission season during which data was collected') hdu0.header.update('DATA_REL','Unknown','version of data release notes describing data') hdu0.header.update('OBSMODE',obsmode,'observing mode') hdu0.header.update('RADESYS','Unknown','reference frame of celestial coordinates') hdu0.header.update('RA_OBJ','Unknown','[deg] right ascension from KIC') hdu0.header.update('DEC_OBJ','Unknown','[deg] declination from KIC') hdu0.header.update('EQUINOX',2000.0,'equinox of celestial coordinate system') hdu0.header.update('PMRA','Unknown','[arcsec/yr] RA proper motion') hdu0.header.update('PMDEC','Unknown','[arcsec/yr] Dec proper motion') hdu0.header.update('PMTOTAL','Unknown','[arcsec/yr] total proper motion') hdu0.header.update('PARALLAX','Unknown','[arcsec] parallax') hdu0.header.update('GLON','Unknown','[deg] galactic longitude') hdu0.header.update('GLAT','Unknown','[deg] galactic latitude') hdu0.header.update('GMAG','Unknown','[mag] SDSS g band magnitude from KIC') hdu0.header.update('RMAG','Unknown','[mag] SDSS r band magnitude from KIC') hdu0.header.update('IMAG','Unknown','[mag] SDSS i band magnitude from KIC') hdu0.header.update('ZMAG','Unknown','[mag] SDSS z band magnitude from KIC') hdu0.header.update('D51MAG','Unknown','[mag] D51 magnitude, from KIC') hdu0.header.update('JMAG','Unknown','[mag] J band magnitude from 2MASS') hdu0.header.update('HMAG','Unknown','[mag] H band magnitude from 2MASS') hdu0.header.update('KMAG','Unknown','[mag] K band magnitude from 2MASS') hdu0.header.update('KEPMAG','Unknown','[mag] Kepler magnitude (Kp) from KIC') hdu0.header.update('GRCOLOR','Unknown','[mag] (g-r) color, SDSS bands') hdu0.header.update('JKCOLOR','Unknown','[mag] (J-K) color, 2MASS bands') hdu0.header.update('GKCOLOR','Unknown','[mag] (g-K) color, SDSS g - 2MASS K') hdu0.header.update('TEFF','Unknown','[K] effective temperature from KIC') hdu0.header.update('LOGG','Unknown','[cm/s2] log10 surface gravity from KIC') hdu0.header.update('FEH','Unknown','[log10([Fe/H])] metallicity from KIC') hdu0.header.update('EBMINUSV','Unknown','[mag] E(B-V) redenning from KIC') hdu0.header.update('AV','Unknown','[mag] A_v extinction from KIC') hdu0.header.update('RADIUS','Unknown','[solar radii] stellar radius from KIC') hdu0.header.update('TMINDEX','Unknown','unique 2MASS catalog ID from KIC') hdu0.header.update('SCPID','Unknown','unique SCP processing ID from KIC') hdulist = HDUList(hdu0) except: message = 'ERROR -- KEPCONVERT: cannot create primary extension in ' + outfile status = kepmsg.err(logfile,message,verbose) ## create the outfile HDU 1 extension if status == 0 and conversion == 'asc2fits': try: fitscol = [] for i in range(ncol): fitscol.append(Column(name=colnames[i],format='D',array=work[i])) fitscols = ColDefs(fitscol) hdu1 = new_table(fitscols) hdulist.append(hdu1) hdu1.header.update('INHERIT',True,'inherit primary keywords') hdu1.header.update('EXTNAME','LIGHTCURVE','name of extension') hdu1.header.update('EXTVER',1,'extension version number') hdu1.header.update('TELESCOP','Kepler','telescope') hdu1.header.update('INSTRUME','Kepler photometer','detector type') hdu1.header.update('OBJECT','Unknown','string version of kepID') hdu1.header.update('KEPLERID','Unknown','unique Kepler target identifier') hdu1.header.update('RADESYS','Unknown','reference frame of celestial coordinates') hdu1.header.update('RA_OBJ','Unknown','[deg] right ascension from KIC') hdu1.header.update('DEC_OBJ','Unknown','[deg] declination from KIC') hdu1.header.update('EQUINOX',2000.0,'equinox of celestial coordinate system') hdu1.header.update('TIMEREF','Unknown','barycentric correction applied to times') hdu1.header.update('TASSIGN','Unknown','where time is assigned') hdu1.header.update('TIMESYS','Unknown','time system is barycentric JD') hdu1.header.update('BJDREFI',0.0,'integer part of BJD reference date') hdu1.header.update('BJDREFF',0.0,'fraction of day in BJD reference date') hdu1.header.update('TIMEUNIT','Unknown','time unit for TIME, TSTART and TSTOP') hdu1.header.update('TSTART',tstart,'observation start time in JD - BJDREF') hdu1.header.update('TSTOP',tstop,'observation stop time in JD - BJDREF') hdu1.header.update('LC_START',lc_start,'observation start time in MJD') hdu1.header.update('LC_END',lc_end,'observation stop time in MJD') hdu1.header.update('TELAPSE',tstop-tstart,'[d] TSTOP - TSTART') hdu1.header.update('LIVETIME','Unknown','[d] TELAPSE multiplied by DEADC') hdu1.header.update('EXPOSURE','Unknown','[d] time on source') hdu1.header.update('DEADC','Unknown','deadtime correction') hdu1.header.update('TIMEPIXR','Unknown','bin time beginning=0 middle=0.5 end=1') hdu1.header.update('TIERRELA','Unknown','[d] relative time error') hdu1.header.update('TIERABSO','Unknown','[d] absolute time error') hdu1.header.update('INT_TIME','Unknown','[s] photon accumulation time per frame') hdu1.header.update('READTIME','Unknown','[s] readout time per frame') hdu1.header.update('FRAMETIM','Unknown','[s] frame time (INT_TIME + READTIME)') hdu1.header.update('NUM_FRM','Unknown','number of frames per time stamp') hdu1.header.update('TIMEDEL','Unknown','[d] time resolution of data') hdu1.header.update('DATE-OBS','Unknown','TSTART as UT calendar date') hdu1.header.update('DATE-END','Unknown','TSTOP as UT calendar date') hdu1.header.update('BACKAPP','Unknown','background is subtracted') hdu1.header.update('DEADAPP','Unknown','deadtime applied') hdu1.header.update('VIGNAPP','Unknown','vignetting or collimator correction applied') hdu1.header.update('GAIN','Unknown','channel gain [electrons/count]') hdu1.header.update('READNOIS','Unknown','read noise [electrons]') hdu1.header.update('NREADOUT','Unknown','number of reads per cadence') hdu1.header.update('TIMSLICE','Unknown','time-slice readout sequence section') hdu1.header.update('MEANBLCK','Unknown','FSW mean black level [count]') hdu1.header.update('PDCSAPFL','Unknown','SAP PDC processing flags (bit code)') hdu1.header.update('PDCDIAFL','Unknown','DIA PDC processing flags (bit code)') hdu1.header.update('MISPXSAP','Unknown','no of optimal aperture pixels missing from SAP') hdu1.header.update('MISPXDIA','Unknown','no of optimal aperture pixels missing from DIA') hdu1.header.update('CROWDSAP','Unknown','crowding metric evaluated over SAP opt. ap.') hdu1.header.update('CROWDDIA','Unknown','crowding metric evaluated over DIA aperture') except: message = 'ERROR -- KEPCONVERT: cannot create light curve extension in ' + outfile status = kepmsg.err(logfile,message,verbose) ## history keyword in output file if status == 0 and conversion == 'asc2fits': status = kepkey.history(call,hdu0,outfile,logfile,verbose) ## filter data table if status == 0 and conversion == 'asc2fits': instr, status = kepio.filterNaN(hdulist,colnames[min(array([1,len(colnames)-1],dtype='int'))], outfile,logfile,verbose) ## write output FITS file if status == 0 and conversion == 'asc2fits': hdulist.writeto(outfile,checksum=True) ## end time if (status == 0): message = 'KEPCONVERT completed at' else: message = '\nKEPCONVERT aborted at' kepmsg.clock(message,logfile,verbose)
def kepdetrend(infile, outfile, datacol, errcol, ranges1, npoly1, nsig1, niter1, ranges2, npoly2, nsig2, niter2, popnans, plot, clobber, verbose, logfile, status, cmdLine=False): # startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 16 ysize = 9 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPDETREND -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'datacol=' + str(datacol) + ' ' call += 'errcol=' + str(errcol) + ' ' call += 'ranges1=' + str(ranges1) + ' ' call += 'npoly1=' + str(npoly1) + ' ' call += 'nsig1=' + str(nsig1) + ' ' call += 'niter1=' + str(niter1) + ' ' call += 'ranges2=' + str(ranges2) + ' ' call += 'npoly2=' + str(npoly2) + ' ' call += 'nsig2=' + str(nsig2) + ' ' call += 'niter2=' + str(niter2) + ' ' popn = 'n' if (popnans): popn = 'y' call += 'popnans=' + popn + ' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot=' + plotit + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPDETREND started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPDETREND: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile, message, verbose) # open input file if status == 0: instr, status = kepio.openfits(infile, 'readonly', logfile, verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys( instr, infile, logfile, verbose, status) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr, file, logfile, verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile, instr[1], logfile, verbose) # filter input data table if status == 0: work1 = numpy.array( [table.field('time'), table.field(datacol), table.field(errcol)]) work1 = numpy.rot90(work1, 3) work1 = work1[~numpy.isnan(work1).any(1)] # read table columns if status == 0: intime = work1[:, 2] + bjdref indata = work1[:, 1] inerr = work1[:, 0] print intime # time ranges for region 1 (region to be corrected) if status == 0: time1 = [] data1 = [] err1 = [] t1start, t1stop, status = kepio.timeranges(ranges1, logfile, verbose) if status == 0: cadencelis1, status = kepstat.filterOnRange(intime, t1start, t1stop) if status == 0: for i in range(len(cadencelis1)): time1.append(intime[cadencelis1[i]]) data1.append(indata[cadencelis1[i]]) if errcol.lower() != 'none': err1.append(inerr[cadencelis1[i]]) t0 = time1[0] time1 = array(time1, dtype='float64') - t0 data1 = array(data1, dtype='float32') if errcol.lower() != 'none': err1 = array(err1, dtype='float32') else: err1 = None # fit function to range 1 if status == 0: functype = 'poly' + str(npoly1) pinit = [data1.mean()] if npoly1 > 0: for i in range(npoly1): pinit.append(0) pinit = array(pinit, dtype='float32') coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx1, ploty1, status = \ kepfit.lsqclip(functype,pinit,time1,data1,err1,nsig1,nsig1,niter1, logfile,verbose) fit1 = indata * 0.0 for i in range(len(coeffs)): fit1 += coeffs[i] * (intime - t0)**i for i in range(len(intime)): if i not in cadencelis1: fit1[i] = 0.0 plotx1 += t0 print coeffs # time ranges for region 2 (region that is correct) if status == 0: time2 = [] data2 = [] err2 = [] t2start, t2stop, status = kepio.timeranges(ranges2, logfile, verbose) cadencelis2, status = kepstat.filterOnRange(intime, t2start, t2stop) for i in range(len(cadencelis2)): time2.append(intime[cadencelis2[i]]) data2.append(indata[cadencelis2[i]]) if errcol.lower() != 'none': err2.append(inerr[cadencelis2[i]]) t0 = time2[0] time2 = array(time2, dtype='float64') - t0 data2 = array(data2, dtype='float32') if errcol.lower() != 'none': err2 = array(err2, dtype='float32') else: err2 = None # fit function to range 2 if status == 0: functype = 'poly' + str(npoly2) pinit = [data2.mean()] if npoly2 > 0: for i in range(npoly2): pinit.append(0) pinit = array(pinit, dtype='float32') coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx2, ploty2, status = \ kepfit.lsqclip(functype,pinit,time2,data2,err2,nsig2,nsig2,niter2, logfile,verbose) fit2 = indata * 0.0 for i in range(len(coeffs)): fit2 += coeffs[i] * (intime - t0)**i for i in range(len(intime)): if i not in cadencelis1: fit2[i] = 0.0 plotx2 += t0 # normalize data if status == 0: outdata = indata - fit1 + fit2 if errcol.lower() != 'none': outerr = inerr * 1.0 # comment keyword in output file if status == 0: status = kepkey.history(call, instr[0], outfile, logfile, verbose) # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 plotx1 = plotx1 - intime0 plotx2 = plotx2 - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = outdata ploty1 ploty2 nrm = len(str(int(numpy.nanmax(indata)))) - 1 indata = indata / 10**nrm pout = pout / 10**nrm ploty1 = ploty1 / 10**nrm ploty2 = ploty2 / 10**nrm ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits xmin = ptime.min() xmax = ptime.max() ymin = indata.min() ymax = indata.max() omin = pout.min() omax = pout.max() xr = xmax - xmin yr = ymax - ymin oo = omax - omin ptime = insert(ptime, [0], [ptime[0]]) ptime = append(ptime, [ptime[-1]]) indata = insert(indata, [0], [0.0]) indata = append(indata, [0.0]) pout = insert(pout, [0], [0.0]) pout = append(pout, 0.0) # plot light curve if status == 0 and plot: try: params = { 'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize } rcParams.update(params) except: pass pylab.figure(figsize=[xsize, ysize]) pylab.clf() # plot original data ax = pylab.axes([0.06, 0.523, 0.93, 0.45]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime, indata, color=lcolor, linestyle='-', linewidth=lwidth) pylab.fill(ptime, indata, color=fcolor, linewidth=0.0, alpha=falpha) pylab.plot(plotx1, ploty1, color='r', linestyle='-', linewidth=2.0) pylab.plot(plotx2, ploty2, color='g', linestyle='-', linewidth=2.0) pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin > 0.0: pylab.ylim(ymin - yr * 0.01, ymax + yr * 0.01) else: pylab.ylim(1.0e-10, ymax + yr * 0.01) pylab.ylabel(ylab, {'color': 'k'}) pylab.grid() # plot detrended data ax = pylab.axes([0.06, 0.073, 0.93, 0.45]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime, pout, color=lcolor, linestyle='-', linewidth=lwidth) pylab.fill(ptime, pout, color=fcolor, linewidth=0.0, alpha=falpha) pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin > 0.0: pylab.ylim(omin - oo * 0.01, omax + oo * 0.01) else: pylab.ylim(1.0e-10, omax + oo * 0.01) pylab.xlabel(xlab, {'color': 'k'}) try: pylab.ylabel(ylab, {'color': 'k'}) except: ylab = '10**%d e-/s' % nrm pylab.ylabel(ylab, {'color': 'k'}) # render plot if status == 0: if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # write output file if status == 0 and popnans: instr[1].data.field(datacol)[good_data] = outdata instr[1].data.field(errcol)[good_data] = outerr instr[1].data.field(datacol)[bad_data] = None instr[1].data.field(errcol)[bad_data] = None instr.writeto(outfile) elif status == 0 and not popnans: for i in range(len(outdata)): instr[1].data.field(datacol)[i] = outdata[i] if errcol.lower() != 'none': instr[1].data.field(errcol)[i] = outerr[i] instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr, logfile, verbose) ## end time if (status == 0): message = 'KEPDETREND completed at' else: message = '\nKEPDETREND aborted at' kepmsg.clock(message, logfile, verbose)
def kepstddev(infile,outfile,datacol,timescale,clobber,verbose,logfile,status,cmdLine=False): # startup parameters status = 0 labelsize = 44 ticksize = 36 xsize = 16 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPSTDDEV -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'timescale='+str(timescale)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPSTDDEV started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPSTDDEV: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # filter input data table if status == 0: work1 = numpy.array([table.field('time'), table.field(datacol)]) work1 = numpy.rot90(work1,3) work1 = work1[~numpy.isnan(work1).any(1)] # read table columns if status == 0: intime = work1[:,1] + bjdref indata = work1[:,0] # calculate STDDEV in units of ppm if status == 0: stddev = running_frac_std(intime,indata,timescale/24) * 1.0e6 astddev = numpy.std(indata) * 1.0e6 cdpp = stddev / sqrt(timescale * 3600.0 / cadence) # filter cdpp if status == 0: for i in range(len(cdpp)): if cdpp[i] > median(cdpp) * 10.0: cdpp[i] = cdpp[i-1] # calculate median STDDEV if status == 0: medcdpp = ones((len(cdpp)),dtype='float32') * median(cdpp[:]) # print '\nMedian %.1fhr standard deviation = %d ppm' % (timescale, median(stddev[:])) print '\nStandard deviation = %d ppm' % astddev # calculate median STDDEV if status == 0: medcdpp = ones((len(cdpp)),dtype='float32') * median(cdpp[:]) print 'Median %.1fhr CDPP = %d ppm' % (timescale, median(cdpp[:])) # calculate RMS STDDEV if status == 0: rms, status = kepstat.rms(cdpp,zeros(len(stddev)),logfile,verbose) rmscdpp = ones((len(cdpp)),dtype='float32') * rms print ' RMS %.1fhr CDPP = %d ppm\n' % (timescale, rms) # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = copy(cdpp) nrm = math.ceil(math.log10(median(cdpp))) - 1.0 # pout = pout / 10**nrm # ylab = '%.1fhr $\sigma$ (10$^%d$ ppm)' % (timescale,nrm) ylab = '%.1fhr $\sigma$ (ppm)' % timescale # data limits xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) # plot style if status == 0: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': 36, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 32, 'ytick.labelsize': 36} pylab.rcParams.update(params) except: pass # define size of plot on monitor screen pylab.figure(figsize=[xsize,ysize]) # delete any fossil plots in the matplotlib window pylab.clf() # position first axes inside the plotting window ax = pylab.axes([0.07,0.15,0.92,0.83]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) ax.yaxis.set_major_locator(MaxNLocator(5)) # rotate y labels by 90 deg labels = ax.get_yticklabels() pylab.setp(labels, 'rotation', 90,fontsize=36) # plot flux vs time ltime = array([],dtype='float64') ldata = array([],dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for i in range(1,len(ptime)-1): dt = ptime[i] - ptime[i-1] if dt < work1: ltime = append(ltime,ptime[i]) ldata = append(ldata,pout[i]) else: pylab.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0) ltime = array([],dtype='float64') ldata = array([],dtype='float32') pylab.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0) # plot the fill color below data time series, with no data gaps pylab.fill(ptime,pout,fc='#ffff00',linewidth=0.0,alpha=0.2) # plot median CDPP # pylab.plot(intime - intime0,medcdpp / 10**nrm,color='r',linestyle='-',linewidth=2.0) # pylab.plot(intime - intime0,medcdpp,color='r',linestyle='-',linewidth=2.0) # plot RMS CDPP # pylab.plot(intime - intime0,rmscdpp / 10**nrm,color='r',linestyle='--',linewidth=2.0) # define plot x and y limits pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin - yr * 0.01 <= 0.0: pylab.ylim(1.0e-10, ymax + yr * 0.01) else: pylab.ylim(ymin - yr * 0.01, ymax + yr * 0.01) # plot labels pylab.xlabel(xlab, {'color' : 'k'}) pylab.ylabel(ylab, {'color' : 'k'}) # make grid on plot pylab.grid() # render plot if status == 0: if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() # add NaNs back into data if status == 0: n = 0 work1 = array([],dtype='float32') instr, status = kepio.openfits(infile,'readonly',logfile,verbose) table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) for i in range(len(table.field(0))): if isfinite(table.field('time')[i]) and isfinite(table.field(datacol)[i]): work1 = append(work1,cdpp[n]) n += 1 else: work1 = append(work1,nan) # write output file if status == 0: status = kepkey.new('MCDPP%d' % (timescale * 10.0),medcdpp[0], 'Median %.1fhr CDPP (ppm)' % timescale, instr[1],outfile,logfile,verbose) status = kepkey.new('RCDPP%d' % (timescale * 10.0),rmscdpp[0], 'RMS %.1fhr CDPP (ppm)' % timescale, instr[1],outfile,logfile,verbose) colname = 'CDPP_%d' % (timescale * 10) col1 = pyfits.Column(name=colname,format='E13.7',array=work1) cols = instr[1].data.columns + col1 instr[1] = pyfits.new_table(cols,header=instr[1].header) instr.writeto(outfile) # comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) # close FITS if status == 0: status = kepio.closefits(instr,logfile,verbose) # end time if (status == 0): message = 'KEPSTDDEV completed at' else: message = '\nKEPSTDDEV aborted at' kepmsg.clock(message,logfile,verbose)
def kepoutlier(infile,outfile,datacol,nsig,stepsize,npoly,niter, operation,ranges,plot,plotfit,clobber,verbose,logfile,status, cmdLine=False): # startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 16 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPOUTLIER -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'nsig='+str(nsig)+' ' call += 'stepsize='+str(stepsize)+' ' call += 'npoly='+str(npoly)+' ' call += 'niter='+str(niter)+' ' call += 'operation='+str(operation)+' ' call += 'ranges='+str(ranges)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' plotf = 'n' if (plotfit): plotf = 'y' call += 'plotfit='+plotf+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPOUTLIER started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPOUTLIER: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 try: for i in range(len(table.field(0))): if numpy.isfinite(table.field('barytime')[i]) and \ numpy.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] except: for i in range(len(table.field(0))): if numpy.isfinite(table.field('time')[i]) and \ numpy.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) # read table columns if status == 0: try: intime = instr[1].data.field('barytime') + 2.4e6 except: intime, status = kepio.readfitscol(infile,instr[1].data,'time',logfile,verbose) indata, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom # time ranges for region to be corrected if status == 0: t1, t2, status = kepio.timeranges(ranges,logfile,verbose) cadencelis, status = kepstat.filterOnRange(intime,t1,t2) # find limits of each time step if status == 0: tstep1 = []; tstep2 = [] work = intime[0] while work < intime[-1]: tstep1.append(work) tstep2.append(array([work+stepsize,intime[-1]],dtype='float64').min()) work += stepsize # find cadence limits of each time step if status == 0: cstep1 = []; cstep2 = [] work1 = 0; work2 = 0 for i in range(len(intime)): if intime[i] >= intime[work1] and intime[i] < intime[work1] + stepsize: work2 = i else: cstep1.append(work1) cstep2.append(work2) work1 = i; work2 = i cstep1.append(work1) cstep2.append(work2) outdata = indata * 1.0 # comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) # clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 # clean up y-axis units if status == 0: pout = indata * 1.0 nrm = len(str(int(pout.max())))-1 pout = pout / 10**nrm ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm # data limits xmin = ptime.min() xmax = ptime.max() ymin = pout.min() ymax = pout.max() xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) # plot light curve if status == 0 and plot: plotLatex = True try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: plotLatex = False if status == 0 and plot: pylab.figure(figsize=[xsize,ysize]) pylab.clf() # plot data ax = pylab.axes([0.06,0.1,0.93,0.87]) # force tick labels to be absolute rather than relative pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) # rotate y labels by 90 deg labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime,pout,color=lcolor,linestyle='-',linewidth=lwidth) fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) xlabel(xlab, {'color' : 'k'}) if not plotLatex: ylab = '10**%d electrons/sec' % nrm ylabel(ylab, {'color' : 'k'}) grid() # loop over each time step, fit data, determine rms if status == 0: masterfit = indata * 0.0 mastersigma = zeros(len(masterfit)) functype = 'poly' + str(npoly) for i in range(len(cstep1)): pinit = [indata[cstep1[i]:cstep2[i]+1].mean()] if npoly > 0: for j in range(npoly): pinit.append(0.0) pinit = array(pinit,dtype='float32') try: coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.lsqclip(functype,pinit,intime[cstep1[i]:cstep2[i]+1]-intime[cstep1[i]], indata[cstep1[i]:cstep2[i]+1],None,nsig,nsig,niter,logfile, verbose) for j in range(len(coeffs)): masterfit[cstep1[i]:cstep2[i]+1] += coeffs[j] * \ (intime[cstep1[i]:cstep2[i]+1] - intime[cstep1[i]])**j for j in range(cstep1[i],cstep2[i]+1): mastersigma[j] = sigma if plotfit: pylab.plot(plotx+intime[cstep1[i]]-intime0,ploty / 10**nrm, 'g',lw='3') except: for j in range(cstep1[i],cstep2[i]+1): masterfit[j] = indata[j] mastersigma[j] = 1.0e10 message = 'WARNING -- KEPOUTLIER: could not fit range ' message += str(intime[cstep1[i]]) + '-' + str(intime[cstep2[i]]) kepmsg.warn(None,message) # reject outliers if status == 0: rejtime = []; rejdata = []; naxis2 = 0 for i in range(len(masterfit)): if abs(indata[i] - masterfit[i]) > nsig * mastersigma[i] and i in cadencelis: rejtime.append(intime[i]) rejdata.append(indata[i]) if operation == 'replace': [rnd] = kepstat.randarray([masterfit[i]],[mastersigma[i]]) table[naxis2] = table[i] table.field(datacol)[naxis2] = rnd naxis2 += 1 else: table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] rejtime = array(rejtime,dtype='float64') rejdata = array(rejdata,dtype='float32') pylab.plot(rejtime-intime0,rejdata / 10**nrm,'ro') # plot ranges xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) else: ylim(1.0e-10,ymax+yr*0.01) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # write output file if status == 0: instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPOUTLIER completed at' else: message = '\nKEPOUTLIER aborted at' kepmsg.clock(message,logfile,verbose)
def keptransitmodel(inputfile,datacol,errorcol,period_d,rprs,T0, Ecc,ars,inc,omega,LDparams,sec,norm=False, verbose=0,logfile='logfile.dat',status=0,cmdLine=False): #write to a logfile hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPTRANSIT -- ' call += 'inputfile='+inputfile+' ' call += 'datacol='+str(datacol)+' ' call += 'errorcol='+str(errorcol)+' ' call += 'period_d='+str(period_d)+' ' call += 'rprs='+str(rprs)+' ' call += 'T0='+str(T0)+' ' call += 'Ecc='+str(Ecc)+' ' call += 'ars='+str(ars)+' ' call += 'inc='+str(inc)+' ' call += 'omega='+str(omega)+' ' call += 'LDparams='+str(LDparams)+' ' call += 'sec='+str(sec)+' ' #to finish # open input file if status == 0: instr, status = kepio.openfits(inputfile,'readonly',logfile,verbose) if status == 0: tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr, inputfile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # read table structure if status == 0: table, status = kepio.readfitstab(inputfile,instr[1],logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 try: for i in range(len(table.field(0))): if np.isfinite(table.field('barytime')[i]) and \ np.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] except: for i in range(len(table.field(0))): if np.isfinite(table.field('time')[i]) and \ np.isfinite(table.field(datacol)[i]): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] # comment = 'NaN cadences removed from data' # status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) # read table columns if status == 0: try: intime = instr[1].data.field('barytime') + 2.4e6 except: intime, status = kepio.readfitscol(inputfile,instr[1].data,'time',logfile,verbose) indata, status = kepio.readfitscol(inputfile,instr[1].data,datacol,logfile,verbose) inerr, status = kepio.readfitscol(inputfile,instr[1].data,errorcol,logfile,verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom inerr = inerr / cadenom if status == 0 and norm: #first remove outliers before normalizing threesig = 3.* np.std(indata) mask = np.logical_and(indata< indata + threesig,indata > indata - threesig) #now normalize indata = indata / np.median(indata[mask]) if status == 0: #need to check if LD params are sensible and in right format LDparams = [float(i) for i in LDparams.split()] inc = inc * np.pi / 180. if status == 0: modelfit = tmod.lightcurve(intime,period_d,rprs,T0,Ecc, ars,inc,omega,LDparams,sec) if status == 0: phi, fluxfold, modelfold, errorfold, phiNotFold = fold_data(intime, modelfit,indata,inerr,period_d,T0) if status == 0: do_plot(intime,modelfit,indata,inerr,period_d,T0,cmdLine)