def keppixseries(infile,outfile,plotfile,plottype,filter,function,cutoff,clobber,verbose,logfile,status, cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPPIXSERIES -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'plotfile='+plotfile+' ' call += 'plottype='+plottype+' ' filt = 'n' if (filter): filt = 'y' call += 'filter='+filt+ ' ' call += 'function='+function+' ' call += 'cutoff='+str(cutoff)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPPIXSERIES started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPPIXSERIES: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile,message,verbose) # open TPF FITS file if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition(infile,logfile,verbose) # print target data if status == 0: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # how many quality = 0 rows? if status == 0: npts = 0 nrows = len(fluxpixels) for i in range(nrows): if qual[i] == 0 and \ numpy.isfinite(barytime[i]) and \ numpy.isfinite(fluxpixels[i,ydim*xdim/2]): npts += 1 time = empty((npts)) timecorr = empty((npts)) cadenceno = empty((npts)) quality = empty((npts)) pixseries = empty((ydim,xdim,npts)) errseries = empty((ydim,xdim,npts)) # construct output light curves if status == 0: np = 0 for i in range(ydim): for j in range(xdim): npts = 0 for k in range(nrows): if qual[k] == 0 and \ numpy.isfinite(barytime[k]) and \ numpy.isfinite(fluxpixels[k,ydim*xdim/2]): time[npts] = barytime[k] timecorr[npts] = tcorr[k] cadenceno[npts] = cadno[k] quality[npts] = qual[k] pixseries[i,j,npts] = fluxpixels[k,np] errseries[i,j,npts] = errpixels[k,np] npts += 1 np += 1 # define data sampling if status == 0 and filter: tpf, status = kepio.openfits(infile,'readonly',logfile,verbose) if status == 0 and filter: cadence, status = kepkey.cadence(tpf[1],infile,logfile,verbose) tr = 1.0 / (cadence / 86400) timescale = 1.0 / (cutoff / tr) # define convolution function if status == 0 and filter: if function == 'boxcar': filtfunc = numpy.ones(numpy.ceil(timescale)) elif function == 'gauss': timescale /= 2 dx = numpy.ceil(timescale * 10 + 1) filtfunc = kepfunc.gauss() filtfunc = filtfunc([1.0,dx/2-1.0,timescale],linspace(0,dx-1,dx)) elif function == 'sinc': dx = numpy.ceil(timescale * 12 + 1) fx = linspace(0,dx-1,dx) fx = fx - dx / 2 + 0.5 fx /= timescale filtfunc = numpy.sinc(fx) filtfunc /= numpy.sum(filtfunc) # pad time series at both ends with noise model if status == 0 and filter: for i in range(ydim): for j in range(xdim): ave, sigma = kepstat.stdev(pixseries[i,j,:len(filtfunc)]) padded = numpy.append(kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma), pixseries[i,j,:]) ave, sigma = kepstat.stdev(pixseries[i,j,-len(filtfunc):]) padded = numpy.append(padded, kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma)) # convolve data if status == 0: convolved = convolve(padded,filtfunc,'same') # remove padding from the output array if status == 0: outdata = convolved[len(filtfunc):-len(filtfunc)] # subtract low frequencies if status == 0: outmedian = median(outdata) pixseries[i,j,:] = pixseries[i,j,:] - outdata + outmedian # construct output file if status == 0 and ydim*xdim < 1000: instruct, status = kepio.openfits(infile,'readonly',logfile,verbose) status = kepkey.history(call,instruct[0],outfile,logfile,verbose) hdulist = HDUList(instruct[0]) cols = [] cols.append(Column(name='TIME',format='D',unit='BJD - 2454833',disp='D12.7',array=time)) cols.append(Column(name='TIMECORR',format='E',unit='d',disp='E13.6',array=timecorr)) cols.append(Column(name='CADENCENO',format='J',disp='I10',array=cadenceno)) cols.append(Column(name='QUALITY',format='J',array=quality)) for i in range(ydim): for j in range(xdim): colname = 'COL%d_ROW%d' % (i+column,j+row) cols.append(Column(name=colname,format='E',disp='E13.6',array=pixseries[i,j,:])) hdu1 = new_table(ColDefs(cols)) try: hdu1.header.update('INHERIT',True,'inherit the primary header') except: status = 0 try: hdu1.header.update('EXTNAME','PIXELSERIES','name of extension') except: status = 0 try: hdu1.header.update('EXTVER',instruct[1].header['EXTVER'],'extension version number (not format version)') except: status = 0 try: hdu1.header.update('TELESCOP',instruct[1].header['TELESCOP'],'telescope') except: status = 0 try: hdu1.header.update('INSTRUME',instruct[1].header['INSTRUME'],'detector type') except: status = 0 try: hdu1.header.update('OBJECT',instruct[1].header['OBJECT'],'string version of KEPLERID') except: status = 0 try: hdu1.header.update('KEPLERID',instruct[1].header['KEPLERID'],'unique Kepler target identifier') except: status = 0 try: hdu1.header.update('RADESYS',instruct[1].header['RADESYS'],'reference frame of celestial coordinates') except: status = 0 try: hdu1.header.update('RA_OBJ',instruct[1].header['RA_OBJ'],'[deg] right ascension from KIC') except: status = 0 try: hdu1.header.update('DEC_OBJ',instruct[1].header['DEC_OBJ'],'[deg] declination from KIC') except: status = 0 try: hdu1.header.update('EQUINOX',instruct[1].header['EQUINOX'],'equinox of celestial coordinate system') except: status = 0 try: hdu1.header.update('TIMEREF',instruct[1].header['TIMEREF'],'barycentric correction applied to times') except: status = 0 try: hdu1.header.update('TASSIGN',instruct[1].header['TASSIGN'],'where time is assigned') except: status = 0 try: hdu1.header.update('TIMESYS',instruct[1].header['TIMESYS'],'time system is barycentric JD') except: status = 0 try: hdu1.header.update('BJDREFI',instruct[1].header['BJDREFI'],'integer part of BJD reference date') except: status = 0 try: hdu1.header.update('BJDREFF',instruct[1].header['BJDREFF'],'fraction of the day in BJD reference date') except: status = 0 try: hdu1.header.update('TIMEUNIT',instruct[1].header['TIMEUNIT'],'time unit for TIME, TSTART and TSTOP') except: status = 0 try: hdu1.header.update('TSTART',instruct[1].header['TSTART'],'observation start time in BJD-BJDREF') except: status = 0 try: hdu1.header.update('TSTOP',instruct[1].header['TSTOP'],'observation stop time in BJD-BJDREF') except: status = 0 try: hdu1.header.update('LC_START',instruct[1].header['LC_START'],'mid point of first cadence in MJD') except: status = 0 try: hdu1.header.update('LC_END',instruct[1].header['LC_END'],'mid point of last cadence in MJD') except: status = 0 try: hdu1.header.update('TELAPSE',instruct[1].header['TELAPSE'],'[d] TSTOP - TSTART') except: status = 0 try: hdu1.header.update('LIVETIME',instruct[1].header['LIVETIME'],'[d] TELAPSE multiplied by DEADC') except: status = 0 try: hdu1.header.update('EXPOSURE',instruct[1].header['EXPOSURE'],'[d] time on source') except: status = 0 try: hdu1.header.update('DEADC',instruct[1].header['DEADC'],'deadtime correction') except: status = 0 try: hdu1.header.update('TIMEPIXR',instruct[1].header['TIMEPIXR'],'bin time beginning=0 middle=0.5 end=1') except: status = 0 try: hdu1.header.update('TIERRELA',instruct[1].header['TIERRELA'],'[d] relative time error') except: status = 0 try: hdu1.header.update('TIERABSO',instruct[1].header['TIERABSO'],'[d] absolute time error') except: status = 0 try: hdu1.header.update('INT_TIME',instruct[1].header['INT_TIME'],'[s] photon accumulation time per frame') except: status = 0 try: hdu1.header.update('READTIME',instruct[1].header['READTIME'],'[s] readout time per frame') except: status = 0 try: hdu1.header.update('FRAMETIM',instruct[1].header['FRAMETIM'],'[s] frame time (INT_TIME + READTIME)') except: status = 0 try: hdu1.header.update('NUM_FRM',instruct[1].header['NUM_FRM'],'number of frames per time stamp') except: status = 0 try: hdu1.header.update('TIMEDEL',instruct[1].header['TIMEDEL'],'[d] time resolution of data') except: status = 0 try: hdu1.header.update('DATE-OBS',instruct[1].header['DATE-OBS'],'TSTART as UTC calendar date') except: status = 0 try: hdu1.header.update('DATE-END',instruct[1].header['DATE-END'],'TSTOP as UTC calendar date') except: status = 0 try: hdu1.header.update('BACKAPP',instruct[1].header['BACKAPP'],'background is subtracted') except: status = 0 try: hdu1.header.update('DEADAPP',instruct[1].header['DEADAPP'],'deadtime applied') except: status = 0 try: hdu1.header.update('VIGNAPP',instruct[1].header['VIGNAPP'],'vignetting or collimator correction applied') except: status = 0 try: hdu1.header.update('GAIN',instruct[1].header['GAIN'],'[electrons/count] channel gain') except: status = 0 try: hdu1.header.update('READNOIS',instruct[1].header['READNOIS'],'[electrons] read noise') except: status = 0 try: hdu1.header.update('NREADOUT',instruct[1].header['NREADOUT'],'number of read per cadence') except: status = 0 try: hdu1.header.update('TIMSLICE',instruct[1].header['TIMSLICE'],'time-slice readout sequence section') except: status = 0 try: hdu1.header.update('MEANBLCK',instruct[1].header['MEANBLCK'],'[count] FSW mean black level') except: status = 0 hdulist.append(hdu1) hdulist.writeto(outfile) status = kepkey.new('EXTNAME','APERTURE','name of extension',instruct[2],outfile,logfile,verbose) pyfits.append(outfile,instruct[2].data,instruct[2].header) status = kepio.closefits(instruct,logfile,verbose) else: message = 'WARNING -- KEPPIXSERIES: output FITS file requires > 999 columns. Non-compliant with FITS convention.' kepmsg.warn(logfile,message) # plot style if status == 0: try: params = {'backend': 'png', 'axes.linewidth': 2.0, 'axes.labelsize': 32, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 12, 'ytick.labelsize': 12} pylab.rcParams.update(params) except: pass # plot pixel array fmin = 1.0e33 fmax = -1.033 if status == 0: pylab.figure(num=None,figsize=[12,12]) pylab.clf() dx = 0.93 / xdim dy = 0.94 / ydim ax = pylab.axes([0.06,0.05,0.93,0.94]) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True)) pylab.gca().yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.xlim(numpy.min(pixcoord1) - 0.5,numpy.max(pixcoord1) + 0.5) pylab.ylim(numpy.min(pixcoord2) - 0.5,numpy.max(pixcoord2) + 0.5) pylab.xlabel('time', {'color' : 'k'}) pylab.ylabel('arbitrary flux', {'color' : 'k'}) for i in range(ydim): for j in range(xdim): tmin = amin(time) tmax = amax(time) try: numpy.isfinite(amin(pixseries[i,j,:])) numpy.isfinite(amin(pixseries[i,j,:])) fmin = amin(pixseries[i,j,:]) fmax = amax(pixseries[i,j,:]) except: ugh = 1 xmin = tmin - (tmax - tmin) / 40 xmax = tmax + (tmax - tmin) / 40 ymin = fmin - (fmax - fmin) / 20 ymax = fmax + (fmax - fmin) / 20 if kepstat.bitInBitmap(maskimg[i,j],2): pylab.axes([0.06+float(j)*dx,0.05+i*dy,dx,dy],axisbg='lightslategray') elif maskimg[i,j] == 0: pylab.axes([0.06+float(j)*dx,0.05+i*dy,dx,dy],axisbg='black') else: pylab.axes([0.06+float(j)*dx,0.05+i*dy,dx,dy]) if j == int(xdim / 2) and i == 0: pylab.setp(pylab.gca(),xticklabels=[],yticklabels=[]) elif j == 0 and i == int(ydim / 2): pylab.setp(pylab.gca(),xticklabels=[],yticklabels=[]) else: pylab.setp(pylab.gca(),xticklabels=[],yticklabels=[]) ptime = time * 1.0 ptime = numpy.insert(ptime,[0],ptime[0]) ptime = numpy.append(ptime,ptime[-1]) pflux = pixseries[i,j,:] * 1.0 pflux = numpy.insert(pflux,[0],-1000.0) pflux = numpy.append(pflux,-1000.0) pylab.plot(time,pixseries[i,j,:],color='#0000ff',linestyle='-',linewidth=0.5) if not kepstat.bitInBitmap(maskimg[i,j],2): pylab.fill(ptime,pflux,fc='lightslategray',linewidth=0.0,alpha=1.0) pylab.fill(ptime,pflux,fc='#FFF380',linewidth=0.0,alpha=1.0) if 'loc' in plottype: pylab.xlim(xmin,xmax) pylab.ylim(ymin,ymax) if 'glob' in plottype: pylab.xlim(xmin,xmax) pylab.ylim(1.0e-10,numpy.nanmax(pixseries) * 1.05) if 'full' in plottype: pylab.xlim(xmin,xmax) pylab.ylim(1.0e-10,ymax * 1.05) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() if plotfile.lower() != 'none': pylab.savefig(plotfile) # stop time if status == 0: kepmsg.clock('KEPPIXSERIES ended at',logfile,verbose) return
def keptrial(infile, outfile, datacol, errcol, fmin, fmax, nfreq, method, ntrials, plot, clobber, verbose, logfile, status, cmdLine=False): # startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 18 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPTRIAL -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'datacol=' + datacol + ' ' call += 'errcol=' + errcol + ' ' call += 'fmin=' + str(fmin) + ' ' call += 'fmax=' + str(fmax) + ' ' call += 'nfreq=' + str(nfreq) + ' ' call += 'method=' + method + ' ' call += 'ntrials=' + str(ntrials) + ' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot=' + plotit + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPTRIAL started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPTRIAL: ' + outfile + ' exists. Use clobber=yes' kepmsg.err(logfile, message, verbose) status = 1 # open input file if status == 0: instr, status = kepio.openfits(infile, 'readonly', logfile, verbose) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr, file, logfile, verbose) # input data if status == 0: try: barytime = instr[1].data.field('barytime') except: barytime, status = kepio.readfitscol(infile, instr[1].data, 'time', logfile, verbose) if status == 0: signal, status = kepio.readfitscol(infile, instr[1].data, datacol, logfile, verbose) if status == 0: err, status = kepio.readfitscol(infile, instr[1].data, errcol, logfile, verbose) # remove infinite data from time series if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: incols = [barytime, signal, err] [barytime, signal, err] = kepstat.removeinfinlc(signal, incols) # set up plot if status == 0: plotLatex = True try: params = { 'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize } rcParams.update(params) except: print('WARNING: install latex for scientific plotting') plotLatex = False # frequency steps and Monte Carlo iterations if status == 0: deltaf = (fmax - fmin) / nfreq freq = [] pmax = [] trial = [] for i in range(ntrials): trial.append(i + 1) # adjust data within the error bars work1 = kepstat.randarray(signal, err) # determine FT power fr, power = kepfourier.ft(barytime, work1, fmin, fmax, deltaf, False) # determine peak in FT pmax.append(-1.0e30) for j in range(len(fr)): if (power[j] > pmax[-1]): pmax[-1] = power[j] f1 = fr[j] freq.append(f1) # plot stop-motion histogram pylab.ion() pylab.figure(1, figsize=[7, 10]) clf() pylab.axes([0.08, 0.08, 0.88, 0.89]) pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) n, bins, patches = pylab.hist(freq, bins=nfreq, range=[fmin, fmax], align='mid', rwidth=1, ec='#0000ff', fc='#ffff00', lw=2) # fit normal distribution to histogram x = zeros(len(bins)) for j in range(1, len(bins)): x[j] = (bins[j] + bins[j - 1]) / 2 pinit = numpy.array([float(i), freq[-1], deltaf]) if i > 3: n = array(n, dtype='float32') coeffs, errors, covar, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.leastsquare('gauss',pinit,x[1:],n,None,logfile,verbose) fitfunc = kepfunc.gauss() f = arange(fmin, fmax, (fmax - fmin) / 100) fit = fitfunc(coeffs, f) pylab.plot(f, fit, 'r-', linewidth=2) if plotLatex: xlabel(r'Frequency (d$^{-1}$)', {'color': 'k'}) else: xlabel(r'Frequency (1/d)', {'color': 'k'}) ylabel('N', {'color': 'k'}) xlim(fmin, fmax) grid() # render plot if plot: if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # period results if status == 0: p = 1.0 / coeffs[1] perr = p * coeffs[2] / coeffs[1] f1 = fmin f2 = fmax gotbin = False for i in range(len(n)): if n[i] > 0 and not gotbin: f1 = bins[i] gotbin = True gotbin = False for i in range(len(n) - 1, 0, -1): if n[i] > 0 and not gotbin: f2 = bins[i + 1] gotbin = True powave, powstdev = kepstat.stdev(pmax) # print result if status == 0: print(' best period: %.10f days (%.7f min)' % (p, p * 1440.0)) print(' 1-sigma period error: %.10f days (%.7f min)' % (perr, perr * 1440.0)) print(' search range: %.10f - %.10f days ' % (1.0 / fmax, 1.0 / fmin)) print(' 100%% confidence range: %.10f - %.10f days ' % (1.0 / f2, 1.0 / f1)) # print ' detection confidence: %.2f sigma' % (powave / powstdev) print(' number of trials: %d' % ntrials) print(' number of frequency bins: %d' % nfreq) # history keyword in output file if status == 0: status = kepkey.history(call, instr[0], outfile, logfile, verbose) ## write output file if status == 0: col1 = Column(name='TRIAL', format='J', array=trial) col2 = Column(name='FREQUENCY', format='E', unit='1/day', array=freq) col3 = Column(name='POWER', format='E', array=pmax) cols = ColDefs([col1, col2, col3]) instr.append(new_table(cols)) try: instr[-1].header.update('EXTNAME', 'TRIALS', 'Extension name') except: status = 1 try: instr[-1].header.update('SEARCHR1', 1.0 / fmax, 'Search range lower bound (days)') except: status = 1 try: instr[-1].header.update('SEARCHR2', 1.0 / fmin, 'Search range upper bound (days)') except: status = 1 try: instr[-1].header.update('NFREQ', nfreq, 'Number of frequency bins') except: status = 1 try: instr[-1].header.update('PERIOD', p, 'Best period (days)') except: status = 1 try: instr[-1].header.update('PERIODE', perr, '1-sigma period error (days)') except: status = 1 # instr[-1].header.update('DETNCONF',powave/powstdev,'Detection significance (sigma)') try: instr[-1].header.update('CONFIDR1', 1.0 / f2, 'Trial confidence lower bound (days)') except: status = 1 try: instr[-1].header.update('CONFIDR2', 1.0 / f1, 'Trial confidence upper bound (days)') except: status = 1 try: instr[-1].header.update('NTRIALS', ntrials, 'Number of trials') except: status = 1 instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr, logfile, verbose) ## end time if (status == 0): message = 'KEPTRAIL completed at' else: message = '\nKEPTRIAL aborted at' kepmsg.clock(message, logfile, verbose)
def keptrial(infile,outfile,datacol,errcol,fmin,fmax,nfreq,method, ntrials,plot,clobber,verbose,logfile,status,cmdLine=False): # startup parameters status = 0 labelsize = 24 ticksize = 16 xsize = 18 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPTRIAL -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+datacol+' ' call += 'errcol='+errcol+' ' call += 'fmin='+str(fmin)+' ' call += 'fmax='+str(fmax)+' ' call += 'nfreq='+str(nfreq)+' ' call += 'method='+method+' ' call += 'ntrials='+str(ntrials)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) # start time kepmsg.clock('KEPTRIAL started at',logfile,verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPTRIAL: ' + outfile + ' exists. Use clobber=yes' kepmsg.err(logfile,message,verbose) status = 1 # open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) # fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) # input data if status == 0: try: barytime = instr[1].data.field('barytime') except: barytime, status = kepio.readfitscol(infile,instr[1].data,'time',logfile,verbose) if status == 0: signal, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: err, status = kepio.readfitscol(infile,instr[1].data,errcol,logfile,verbose) # remove infinite data from time series if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: incols = [barytime, signal, err] [barytime, signal, err] = kepstat.removeinfinlc(signal, incols) # set up plot if status == 0: plotLatex = True try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: print('WARNING: install latex for scientific plotting') plotLatex = False # frequency steps and Monte Carlo iterations if status == 0: deltaf = (fmax - fmin) / nfreq freq = []; pmax = []; trial = [] for i in range(ntrials): trial.append(i+1) # adjust data within the error bars work1 = kepstat.randarray(signal,err) # determine FT power fr, power = kepfourier.ft(barytime,work1,fmin,fmax,deltaf,False) # determine peak in FT pmax.append(-1.0e30) for j in range(len(fr)): if (power[j] > pmax[-1]): pmax[-1] = power[j] f1 = fr[j] freq.append(f1) # plot stop-motion histogram pylab.ion() pylab.figure(1,figsize=[7,10]) clf() pylab.axes([0.08,0.08,0.88,0.89]) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) n,bins,patches = pylab.hist(freq,bins=nfreq,range=[fmin,fmax], align='mid',rwidth=1,ec='#0000ff', fc='#ffff00',lw=2) # fit normal distribution to histogram x = zeros(len(bins)) for j in range(1,len(bins)): x[j] = (bins[j] + bins[j-1]) / 2 pinit = numpy.array([float(i),freq[-1],deltaf]) if i > 3: n = array(n,dtype='float32') coeffs, errors, covar, sigma, chi2, dof, fit, plotx, ploty, status = \ kepfit.leastsquare('gauss',pinit,x[1:],n,None,logfile,verbose) fitfunc = kepfunc.gauss() f = arange(fmin,fmax,(fmax-fmin)/100) fit = fitfunc(coeffs,f) pylab.plot(f,fit,'r-',linewidth=2) if plotLatex: xlabel(r'Frequency (d$^{-1}$)', {'color' : 'k'}) else: xlabel(r'Frequency (1/d)', {'color' : 'k'}) ylabel('N', {'color' : 'k'}) xlim(fmin,fmax) grid() # render plot if plot: if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() # period results if status == 0: p = 1.0 / coeffs[1] perr = p * coeffs[2] / coeffs[1] f1 = fmin; f2 = fmax gotbin = False for i in range(len(n)): if n[i] > 0 and not gotbin: f1 = bins[i] gotbin = True gotbin = False for i in range(len(n)-1,0,-1): if n[i] > 0 and not gotbin: f2 = bins[i+1] gotbin = True powave, powstdev = kepstat.stdev(pmax) # print result if status == 0: print(' best period: %.10f days (%.7f min)' % (p, p * 1440.0)) print(' 1-sigma period error: %.10f days (%.7f min)' % (perr, perr * 1440.0)) print(' search range: %.10f - %.10f days ' % (1.0 / fmax, 1.0 / fmin)) print(' 100%% confidence range: %.10f - %.10f days ' % (1.0 / f2, 1.0 / f1)) # print ' detection confidence: %.2f sigma' % (powave / powstdev) print(' number of trials: %d' % ntrials) print(' number of frequency bins: %d' % nfreq) # history keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) ## write output file if status == 0: col1 = Column(name='TRIAL',format='J',array=trial) col2 = Column(name='FREQUENCY',format='E',unit='1/day',array=freq) col3 = Column(name='POWER',format='E',array=pmax) cols = ColDefs([col1,col2,col3]) instr.append(new_table(cols)) try: instr[-1].header.update('EXTNAME','TRIALS','Extension name') except: status = 1 try: instr[-1].header.update('SEARCHR1',1.0 / fmax,'Search range lower bound (days)') except: status = 1 try: instr[-1].header.update('SEARCHR2',1.0 / fmin,'Search range upper bound (days)') except: status = 1 try: instr[-1].header.update('NFREQ',nfreq,'Number of frequency bins') except: status = 1 try: instr[-1].header.update('PERIOD',p,'Best period (days)') except: status = 1 try: instr[-1].header.update('PERIODE',perr,'1-sigma period error (days)') except: status = 1 # instr[-1].header.update('DETNCONF',powave/powstdev,'Detection significance (sigma)') try: instr[-1].header.update('CONFIDR1',1.0 / f2,'Trial confidence lower bound (days)') except: status = 1 try: instr[-1].header.update('CONFIDR2',1.0 / f1,'Trial confidence upper bound (days)') except: status = 1 try: instr[-1].header.update('NTRIALS',ntrials,'Number of trials') except: status = 1 instr.writeto(outfile) # close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPTRAIL completed at' else: message = '\nKEPTRIAL aborted at' kepmsg.clock(message,logfile,verbose)
def keppixseries(infile, outfile, plotfile, plottype, filter, function, cutoff, clobber, verbose, logfile, status, cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPPIXSERIES -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'plotfile=' + plotfile + ' ' call += 'plottype=' + plottype + ' ' filt = 'n' if (filter): filt = 'y' call += 'filter=' + filt + ' ' call += 'function=' + function + ' ' call += 'cutoff=' + str(cutoff) + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPPIXSERIES started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPPIXSERIES: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile, message, verbose) # open TPF FITS file if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition( infile, logfile, verbose) # print target data if status == 0: print '' print ' KepID: %s' % kepid print ' RA (J2000): %s' % ra print 'Dec (J2000): %s' % dec print ' KepMag: %s' % kepmag print ' SkyGroup: %2s' % skygroup print ' Season: %2s' % str(season) print ' Channel: %2s' % channel print ' Module: %2s' % module print ' Output: %1s' % output print '' # how many quality = 0 rows? if status == 0: npts = 0 nrows = len(fluxpixels) for i in range(nrows): if qual[i] == 0 and \ numpy.isfinite(barytime[i]) and \ numpy.isfinite(fluxpixels[i,ydim*xdim/2]): npts += 1 time = empty((npts)) timecorr = empty((npts)) cadenceno = empty((npts)) quality = empty((npts)) pixseries = empty((ydim, xdim, npts)) errseries = empty((ydim, xdim, npts)) # construct output light curves if status == 0: np = 0 for i in range(ydim): for j in range(xdim): npts = 0 for k in range(nrows): if qual[k] == 0 and \ numpy.isfinite(barytime[k]) and \ numpy.isfinite(fluxpixels[k,ydim*xdim/2]): time[npts] = barytime[k] timecorr[npts] = tcorr[k] cadenceno[npts] = cadno[k] quality[npts] = qual[k] pixseries[i, j, npts] = fluxpixels[k, np] errseries[i, j, npts] = errpixels[k, np] npts += 1 np += 1 # define data sampling if status == 0 and filter: tpf, status = kepio.openfits(infile, 'readonly', logfile, verbose) if status == 0 and filter: cadence, status = kepkey.cadence(tpf[1], infile, logfile, verbose) tr = 1.0 / (cadence / 86400) timescale = 1.0 / (cutoff / tr) # define convolution function if status == 0 and filter: if function == 'boxcar': filtfunc = numpy.ones(numpy.ceil(timescale)) elif function == 'gauss': timescale /= 2 dx = numpy.ceil(timescale * 10 + 1) filtfunc = kepfunc.gauss() filtfunc = filtfunc([1.0, dx / 2 - 1.0, timescale], linspace(0, dx - 1, dx)) elif function == 'sinc': dx = numpy.ceil(timescale * 12 + 1) fx = linspace(0, dx - 1, dx) fx = fx - dx / 2 + 0.5 fx /= timescale filtfunc = numpy.sinc(fx) filtfunc /= numpy.sum(filtfunc) # pad time series at both ends with noise model if status == 0 and filter: for i in range(ydim): for j in range(xdim): ave, sigma = kepstat.stdev(pixseries[i, j, :len(filtfunc)]) padded = numpy.append(kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma), pixseries[i,j,:]) ave, sigma = kepstat.stdev(pixseries[i, j, -len(filtfunc):]) padded = numpy.append(padded, kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma)) # convolve data if status == 0: convolved = convolve(padded, filtfunc, 'same') # remove padding from the output array if status == 0: outdata = convolved[len(filtfunc):-len(filtfunc)] # subtract low frequencies if status == 0: outmedian = median(outdata) pixseries[i, j, :] = pixseries[i, j, :] - outdata + outmedian # construct output file if status == 0 and ydim * xdim < 1000: instruct, status = kepio.openfits(infile, 'readonly', logfile, verbose) status = kepkey.history(call, instruct[0], outfile, logfile, verbose) hdulist = HDUList(instruct[0]) cols = [] cols.append( Column(name='TIME', format='D', unit='BJD - 2454833', disp='D12.7', array=time)) cols.append( Column(name='TIMECORR', format='E', unit='d', disp='E13.6', array=timecorr)) cols.append( Column(name='CADENCENO', format='J', disp='I10', array=cadenceno)) cols.append(Column(name='QUALITY', format='J', array=quality)) for i in range(ydim): for j in range(xdim): colname = 'COL%d_ROW%d' % (i + column, j + row) cols.append( Column(name=colname, format='E', disp='E13.6', array=pixseries[i, j, :])) hdu1 = new_table(ColDefs(cols)) try: hdu1.header.update('INHERIT', True, 'inherit the primary header') except: status = 0 try: hdu1.header.update('EXTNAME', 'PIXELSERIES', 'name of extension') except: status = 0 try: hdu1.header.update( 'EXTVER', instruct[1].header['EXTVER'], 'extension version number (not format version)') except: status = 0 try: hdu1.header.update('TELESCOP', instruct[1].header['TELESCOP'], 'telescope') except: status = 0 try: hdu1.header.update('INSTRUME', instruct[1].header['INSTRUME'], 'detector type') except: status = 0 try: hdu1.header.update('OBJECT', instruct[1].header['OBJECT'], 'string version of KEPLERID') except: status = 0 try: hdu1.header.update('KEPLERID', instruct[1].header['KEPLERID'], 'unique Kepler target identifier') except: status = 0 try: hdu1.header.update('RADESYS', instruct[1].header['RADESYS'], 'reference frame of celestial coordinates') except: status = 0 try: hdu1.header.update('RA_OBJ', instruct[1].header['RA_OBJ'], '[deg] right ascension from KIC') except: status = 0 try: hdu1.header.update('DEC_OBJ', instruct[1].header['DEC_OBJ'], '[deg] declination from KIC') except: status = 0 try: hdu1.header.update('EQUINOX', instruct[1].header['EQUINOX'], 'equinox of celestial coordinate system') except: status = 0 try: hdu1.header.update('TIMEREF', instruct[1].header['TIMEREF'], 'barycentric correction applied to times') except: status = 0 try: hdu1.header.update('TASSIGN', instruct[1].header['TASSIGN'], 'where time is assigned') except: status = 0 try: hdu1.header.update('TIMESYS', instruct[1].header['TIMESYS'], 'time system is barycentric JD') except: status = 0 try: hdu1.header.update('BJDREFI', instruct[1].header['BJDREFI'], 'integer part of BJD reference date') except: status = 0 try: hdu1.header.update('BJDREFF', instruct[1].header['BJDREFF'], 'fraction of the day in BJD reference date') except: status = 0 try: hdu1.header.update('TIMEUNIT', instruct[1].header['TIMEUNIT'], 'time unit for TIME, TSTART and TSTOP') except: status = 0 try: hdu1.header.update('TSTART', instruct[1].header['TSTART'], 'observation start time in BJD-BJDREF') except: status = 0 try: hdu1.header.update('TSTOP', instruct[1].header['TSTOP'], 'observation stop time in BJD-BJDREF') except: status = 0 try: hdu1.header.update('LC_START', instruct[1].header['LC_START'], 'mid point of first cadence in MJD') except: status = 0 try: hdu1.header.update('LC_END', instruct[1].header['LC_END'], 'mid point of last cadence in MJD') except: status = 0 try: hdu1.header.update('TELAPSE', instruct[1].header['TELAPSE'], '[d] TSTOP - TSTART') except: status = 0 try: hdu1.header.update('LIVETIME', instruct[1].header['LIVETIME'], '[d] TELAPSE multiplied by DEADC') except: status = 0 try: hdu1.header.update('EXPOSURE', instruct[1].header['EXPOSURE'], '[d] time on source') except: status = 0 try: hdu1.header.update('DEADC', instruct[1].header['DEADC'], 'deadtime correction') except: status = 0 try: hdu1.header.update('TIMEPIXR', instruct[1].header['TIMEPIXR'], 'bin time beginning=0 middle=0.5 end=1') except: status = 0 try: hdu1.header.update('TIERRELA', instruct[1].header['TIERRELA'], '[d] relative time error') except: status = 0 try: hdu1.header.update('TIERABSO', instruct[1].header['TIERABSO'], '[d] absolute time error') except: status = 0 try: hdu1.header.update('INT_TIME', instruct[1].header['INT_TIME'], '[s] photon accumulation time per frame') except: status = 0 try: hdu1.header.update('READTIME', instruct[1].header['READTIME'], '[s] readout time per frame') except: status = 0 try: hdu1.header.update('FRAMETIM', instruct[1].header['FRAMETIM'], '[s] frame time (INT_TIME + READTIME)') except: status = 0 try: hdu1.header.update('NUM_FRM', instruct[1].header['NUM_FRM'], 'number of frames per time stamp') except: status = 0 try: hdu1.header.update('TIMEDEL', instruct[1].header['TIMEDEL'], '[d] time resolution of data') except: status = 0 try: hdu1.header.update('DATE-OBS', instruct[1].header['DATE-OBS'], 'TSTART as UTC calendar date') except: status = 0 try: hdu1.header.update('DATE-END', instruct[1].header['DATE-END'], 'TSTOP as UTC calendar date') except: status = 0 try: hdu1.header.update('BACKAPP', instruct[1].header['BACKAPP'], 'background is subtracted') except: status = 0 try: hdu1.header.update('DEADAPP', instruct[1].header['DEADAPP'], 'deadtime applied') except: status = 0 try: hdu1.header.update('VIGNAPP', instruct[1].header['VIGNAPP'], 'vignetting or collimator correction applied') except: status = 0 try: hdu1.header.update('GAIN', instruct[1].header['GAIN'], '[electrons/count] channel gain') except: status = 0 try: hdu1.header.update('READNOIS', instruct[1].header['READNOIS'], '[electrons] read noise') except: status = 0 try: hdu1.header.update('NREADOUT', instruct[1].header['NREADOUT'], 'number of read per cadence') except: status = 0 try: hdu1.header.update('TIMSLICE', instruct[1].header['TIMSLICE'], 'time-slice readout sequence section') except: status = 0 try: hdu1.header.update('MEANBLCK', instruct[1].header['MEANBLCK'], '[count] FSW mean black level') except: status = 0 hdulist.append(hdu1) hdulist.writeto(outfile) status = kepkey.new('EXTNAME', 'APERTURE', 'name of extension', instruct[2], outfile, logfile, verbose) pyfits.append(outfile, instruct[2].data, instruct[2].header) status = kepio.closefits(instruct, logfile, verbose) else: message = 'WARNING -- KEPPIXSERIES: output FITS file requires > 999 columns. Non-compliant with FITS convention.' kepmsg.warn(logfile, message) # plot style if status == 0: try: params = { 'backend': 'png', 'axes.linewidth': 2.0, 'axes.labelsize': 32, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 12, 'ytick.labelsize': 12 } pylab.rcParams.update(params) except: pass # plot pixel array fmin = 1.0e33 fmax = -1.033 if status == 0: pylab.figure(num=None, figsize=[12, 12]) pylab.clf() dx = 0.93 / xdim dy = 0.94 / ydim ax = pylab.axes([0.06, 0.05, 0.93, 0.94]) pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().xaxis.set_major_locator( matplotlib.ticker.MaxNLocator(integer=True)) pylab.gca().yaxis.set_major_locator( matplotlib.ticker.MaxNLocator(integer=True)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.xlim(numpy.min(pixcoord1) - 0.5, numpy.max(pixcoord1) + 0.5) pylab.ylim(numpy.min(pixcoord2) - 0.5, numpy.max(pixcoord2) + 0.5) pylab.xlabel('time', {'color': 'k'}) pylab.ylabel('arbitrary flux', {'color': 'k'}) for i in range(ydim): for j in range(xdim): tmin = amin(time) tmax = amax(time) try: numpy.isfinite(amin(pixseries[i, j, :])) numpy.isfinite(amin(pixseries[i, j, :])) fmin = amin(pixseries[i, j, :]) fmax = amax(pixseries[i, j, :]) except: ugh = 1 xmin = tmin - (tmax - tmin) / 40 xmax = tmax + (tmax - tmin) / 40 ymin = fmin - (fmax - fmin) / 20 ymax = fmax + (fmax - fmin) / 20 if kepstat.bitInBitmap(maskimg[i, j], 2): pylab.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy], axisbg='lightslategray') elif maskimg[i, j] == 0: pylab.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy], axisbg='black') else: pylab.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy]) if j == int(xdim / 2) and i == 0: pylab.setp(pylab.gca(), xticklabels=[], yticklabels=[]) elif j == 0 and i == int(ydim / 2): pylab.setp(pylab.gca(), xticklabels=[], yticklabels=[]) else: pylab.setp(pylab.gca(), xticklabels=[], yticklabels=[]) ptime = time * 1.0 ptime = numpy.insert(ptime, [0], ptime[0]) ptime = numpy.append(ptime, ptime[-1]) pflux = pixseries[i, j, :] * 1.0 pflux = numpy.insert(pflux, [0], -1000.0) pflux = numpy.append(pflux, -1000.0) pylab.plot(time, pixseries[i, j, :], color='#0000ff', linestyle='-', linewidth=0.5) if not kepstat.bitInBitmap(maskimg[i, j], 2): pylab.fill(ptime, pflux, fc='lightslategray', linewidth=0.0, alpha=1.0) pylab.fill(ptime, pflux, fc='#FFF380', linewidth=0.0, alpha=1.0) if 'loc' in plottype: pylab.xlim(xmin, xmax) pylab.ylim(ymin, ymax) if 'glob' in plottype: pylab.xlim(xmin, xmax) pylab.ylim(1.0e-10, numpy.nanmax(pixseries) * 1.05) if 'full' in plottype: pylab.xlim(xmin, xmax) pylab.ylim(1.0e-10, ymax * 1.05) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() if plotfile.lower() != 'none': pylab.savefig(plotfile) # stop time if status == 0: kepmsg.clock('KEPPIXSERIES ended at', logfile, verbose) return
def kepfilter(infile,outfile,datacol,function,cutoff,passband,plot,plotlab, clobber,verbose,logfile,status,cmdLine=False): ## startup parameters status = 0 numpy.seterr(all="ignore") labelsize = 24 ticksize = 16 xsize = 16 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 ## log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPFILTER -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'function='+str(function)+' ' call += 'cutoff='+str(cutoff)+' ' call += 'passband='+str(passband)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' call += 'plotlab='+str(plotlab)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) ## start time kepmsg.clock('KEPFILTER started at',logfile,verbose) ## test log file logfile = kepmsg.test(logfile) ## clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPFILTER: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) ## open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence ## fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) ## read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # read time and flux columns if status == 0: barytime, status = kepio.readtimecol(infile,table,logfile,verbose) flux, status = kepio.readsapcol(infile,table,logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 for i in range(len(table.field(0))): if (numpy.isfinite(barytime[i]) and numpy.isfinite(flux[i]) and flux[i] != 0.0): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) ## read table columns if status == 0: intime, status = kepio.readtimecol(infile,instr[1].data,logfile,verbose) if status == 0: indata, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom ## define data sampling if status == 0: tr = 1.0 / (cadence / 86400) timescale = 1.0 / (cutoff / tr) ## define convolution function if status == 0: if function == 'boxcar': filtfunc = numpy.ones(numpy.ceil(timescale)) elif function == 'gauss': timescale /= 2 dx = numpy.ceil(timescale * 10 + 1) filtfunc = kepfunc.gauss() filtfunc = filtfunc([1.0,dx/2-1.0,timescale],linspace(0,dx-1,dx)) elif function == 'sinc': dx = numpy.ceil(timescale * 12 + 1) fx = linspace(0,dx-1,dx) fx = fx - dx / 2 + 0.5 fx /= timescale filtfunc = numpy.sinc(fx) filtfunc /= numpy.sum(filtfunc) ## pad time series at both ends with noise model if status == 0: ave, sigma = kepstat.stdev(indata[:len(filtfunc)]) padded = append(kepstat.randarray(np.ones(len(filtfunc)) * ave, np.ones(len(filtfunc)) * sigma), indata) ave, sigma = kepstat.stdev(indata[-len(filtfunc):]) padded = append(padded, kepstat.randarray(np.ones(len(filtfunc)) * ave, np.ones(len(filtfunc)) * sigma)) ## convolve data if status == 0: convolved = convolve(padded,filtfunc,'same') ## remove padding from the output array if status == 0: if function == 'boxcar': outdata = convolved[len(filtfunc):-len(filtfunc)] else: outdata = convolved[len(filtfunc):-len(filtfunc)] ## subtract low frequencies if status == 0 and passband == 'high': outmedian = median(outdata) outdata = indata - outdata + outmedian ## comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) ## clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 ## clean up y-axis units if status == 0: pout = indata * 1.0 pout2 = outdata * 1.0 nrm = len(str(int(numpy.nanmax(pout))))-1 pout = pout / 10**nrm pout2 = pout2 / 10**nrm ylab = '10$^%d$ %s' % (nrm, plotlab) ## data limits xmin = ptime.min() xmax = ptime.max() ymin = numpy.nanmin(pout) ymax = numpy.nanmax(pout) xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) pout2 = insert(pout2,[0],[0.0]) pout2 = append(pout2,0.0) ## plot light curve if status == 0 and plot: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: print 'ERROR -- KEPFILTER: install latex for scientific plotting' status = 1 if status == 0 and plot: pylab.figure(figsize=[xsize,ysize]) pylab.clf() ## plot filtered data ax = pylab.axes([0.06,0.1,0.93,0.87]) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime,pout,color='#ff9900',linestyle='-',linewidth=lwidth) fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) if passband == 'low': pylab.plot(ptime[1:-1],pout2[1:-1],color=lcolor,linestyle='-',linewidth=lwidth) else: pylab.plot(ptime,pout2,color=lcolor,linestyle='-',linewidth=lwidth) fill(ptime,pout2,color=lcolor,linewidth=0.0,alpha=falpha) xlabel(xlab, {'color' : 'k'}) ylabel(ylab, {'color' : 'k'}) xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) else: ylim(1.0e-10,ymax+yr*0.01) pylab.grid() # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() ## write output file if status == 0: for i in range(len(outdata)): instr[1].data.field(datacol)[i] = outdata[i] instr.writeto(outfile) ## close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPFILTER completed at' else: message = '\nKEPFILTER aborted at' kepmsg.clock(message,logfile,verbose)
def leastsquare(functype,pinit,xdata,ydata,yerr,logfile,verbose): status = 0 coeffs = [] # functional form if (functype == 'poly0'): fitfunc = kepfunc.poly0() if (functype == 'poly1'): fitfunc = kepfunc.poly1() if (functype == 'poly2'): fitfunc = kepfunc.poly2() if (functype == 'poly3'): fitfunc = kepfunc.poly3() if (functype == 'poly4'): fitfunc = kepfunc.poly4() if (functype == 'poly5'): fitfunc = kepfunc.poly5() if (functype == 'poly6'): fitfunc = kepfunc.poly6() if (functype == 'poly7'): fitfunc = kepfunc.poly7() if (functype == 'poly8'): fitfunc = kepfunc.poly8() if (functype == 'poly1con'): fitfunc = kepfunc.poly1con() if (functype == 'gauss'): fitfunc = kepfunc.gauss() if (functype == 'gauss0'): fitfunc = kepfunc.gauss0() if (functype == 'congauss'): fitfunc = kepfunc.congauss() if (functype == 'sine'): fitfunc = kepfunc.sine() if (functype == 'moffat0'): fitfunc = kepfunc.moffat0() if (functype == 'conmoffat'): fitfunc = kepfunc.conmoffat() # define error coefficent calculation errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err # if no data errors, substitude rms of fit if (yerr == None): yerr = [] rerr = [] for i in range(len(ydata)): rerr.append(1.e10) try: out = optimize.leastsq(errfunc,pinit,args=(xdata,ydata,rerr),full_output=1) except: message = 'ERROR -- KEPFIT.LEASTSQUARE: failed to fit data' status = kepmsg.err(logfile,message,verbose) if functype == 'poly0': out = [numpy.mean(ydata),sqrt(numpy.mean(ydata))] if (functype == 'poly0' or functype == 'sineCompareBinPSF'): coeffs.append(out[0]) else: coeffs = out[0] if (len(coeffs) > 1): fit = fitfunc(coeffs,xdata) else: fit = numpy.zeros(len(xdata)) for i in range(len(fit)): fit[i] = coeffs[0] sigma, status = kepstat.rms(ydata,fit,logfile,verbose) for i in range(len(ydata)): yerr.append(sigma) # fit data try: out = optimize.leastsq(errfunc, pinit, args=(xdata, ydata, yerr), full_output=1) except: message = 'ERROR -- KEPFIT.LEASTSQUARE: failed to fit data' status = kepmsg.err(logfile,message,verbose) if functype == 'poly0': out = [numpy.mean(ydata),sqrt(numpy.mean(ydata))] # define coefficients coeffs = [] covar = [] if (functype == 'poly0' or functype == 'poly1con' or functype == 'sineCompareBinPSF'): coeffs.append(out[0]) covar.append(out[1]) else: coeffs = out[0] covar = out[1] # calculate 1-sigma error on coefficients errors = [] if (covar == None): message = 'WARNING -- KEPFIT.leastsquare: NULL covariance matrix' # kepmsg.log(logfile,message,verbose) for i in range(len(coeffs)): if (covar != None and len(coeffs) > 1): errors.append(sqrt(covar[i][i])) else: errors.append(coeffs[i]) # generate fit points for rms calculation if (len(coeffs) > 1): fit = fitfunc(coeffs,xdata) else: fit = numpy.zeros(len(xdata)) for i in range(len(fit)): fit[i] = coeffs[0] sigma, status = kepstat.rms(ydata,fit,logfile,verbose) # generate fit points for plotting dx = xdata[len(xdata)-1] - xdata[0] plotx = linspace(xdata.min(),xdata.max(),10000) ploty = fitfunc(coeffs,plotx) if (len(coeffs) == 1): ploty = [] for i in range(len(plotx)): ploty.append(coeffs[0]) ploty = numpy.array(ploty) # reduced chi^2 calculation chi2 = 0 dof = len(ydata) - len(coeffs) for i in range(len(ydata)): chi2 += (ydata[i] - fit[i])**2 / yerr[i] chi2 /= dof return coeffs, errors, covar, sigma, chi2, dof, fit, plotx, ploty, status
def kephalophot(infile, outfile, plotfile, plottype, filter, function, cutoff, clobber, verbose, logfile, status, cmdLine=False): # input arguments status = 0 seterr(all="ignore") # log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = 'KEPHALOPHOT -- ' call += 'infile=' + infile + ' ' call += 'outfile=' + outfile + ' ' call += 'plotfile=' + plotfile + ' ' call += 'plottype=' + plottype + ' ' filt = 'n' if (filter): filt = 'y' call += 'filter=' + filt + ' ' call += 'function=' + function + ' ' call += 'cutoff=' + str(cutoff) + ' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber=' + overwrite + ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose=' + chatter + ' ' call += 'logfile=' + logfile kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPHALOPHOT started at', logfile, verbose) # test log file logfile = kepmsg.test(logfile) # clobber output file if clobber: status = kepio.clobber(outfile, logfile, verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPHALOPHOT: ' + outfile + ' exists. Use --clobber' status = kepmsg.err(logfile, message, verbose) # open TPF FITS file if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \ kepio.readTPF(infile,'TIME',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \ kepio.readTPF(infile,'TIMECORR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \ kepio.readTPF(infile,'CADENCENO',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \ kepio.readTPF(infile,'FLUX',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \ kepio.readTPF(infile,'FLUX_ERR',logfile,verbose) if status == 0: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual, status = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) # read mask defintion data from TPF file if status == 0: maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition( infile, logfile, verbose) # print target data if status == 0: print('') print(' KepID: %s' % kepid) print(' RA (J2000): %s' % ra) print('Dec (J2000): %s' % dec) print(' KepMag: %s' % kepmag) print(' SkyGroup: %2s' % skygroup) print(' Season: %2s' % str(season)) print(' Channel: %2s' % channel) print(' Module: %2s' % module) print(' Output: %1s' % output) print('') # how many quality = 0 rows? how many pixels? if status == 0: np = ydim * xdim nrows = len(fluxpixels) npts = 0 for i in range(nrows): if qual[i] < 1e4 and \ numpy.isfinite(barytime[i]) and \ numpy.isfinite(fluxpixels[i,ydim*xdim/2]): npts += 1 time = empty((npts)) timecorr = empty((npts)) cadenceno = empty((npts)) quality = empty((npts)) pixseries = zeros((npts, np)) errseries = zeros((npts, np)) # pixseries = empty((ydim,xdim,npts)) # errseries = empty((ydim,xdim,npts)) # construct output light curves if status == 0: for i in range(np): npts = 0 for j in range(nrows): if qual[j] < 1e4 and \ numpy.isfinite(barytime[j]) and \ numpy.isfinite(fluxpixels[j,i]): time[npts] = barytime[j] timecorr[npts] = tcorr[j] cadenceno[npts] = cadno[j] quality[npts] = qual[j] pixseries[npts, i] = fluxpixels[j, i] errseries[npts, i] = errpixels[j, i] npts += 1 # define data sampling if status == 0 and filter: tpf, status = kepio.openfits(infile, 'readonly', logfile, verbose) if status == 0 and filter: cadence, status = kepkey.cadence(tpf[1], infile, logfile, verbose) tr = 1.0 / (cadence / 86400) timescale = 1.0 / (cutoff / tr) # define convolution function if status == 0 and filter: if function == 'boxcar': filtfunc = numpy.ones(numpy.ceil(timescale)) elif function == 'gauss': timescale /= 2 dx = numpy.ceil(timescale * 10 + 1) filtfunc = kepfunc.gauss() filtfunc = filtfunc([1.0, dx / 2 - 1.0, timescale], linspace(0, dx - 1, dx)) elif function == 'sinc': dx = numpy.ceil(timescale * 12 + 1) fx = linspace(0, dx - 1, dx) fx = fx - dx / 2 + 0.5 fx /= timescale filtfunc = numpy.sinc(fx) filtfunc /= numpy.sum(filtfunc) # pad time series at both ends with noise model if status == 0 and filter: for i in range(ydim): for j in range(xdim): ave, sigma = kepstat.stdev(pixseries[i, j, :len(filtfunc)]) padded = numpy.append(kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma), pixseries[i,j,:]) ave, sigma = kepstat.stdev(pixseries[i, j, -len(filtfunc):]) padded = numpy.append(padded, kepstat.randarray(numpy.ones(len(filtfunc)) * ave, \ numpy.ones(len(filtfunc)) * sigma)) # convolve data if status == 0: convolved = convolve(padded, filtfunc, 'same') # remove padding from the output array if status == 0: outdata = convolved[len(filtfunc):-len(filtfunc)] # subtract low frequencies if status == 0: outmedian = median(outdata) pixseries[i, j, :] = pixseries[i, j, :] - outdata + outmedian # construct weighted time series if status == 0: wgt = numpy.ones((np, 3)) twgt = numpy.ones((np, 3)) wgt /= sum(wgt, axis=0) satlvl = 0.8 * numpy.max(numpy.max(pixseries, axis=1)) brk1 = 9.7257203 brk2 = 45. ind1 = where(time - time[0] < brk1) ind2 = where((time - time[0] >= brk1) & (time - time[0] < brk2)) ind3 = where(time - time[0] >= brk2) z = numpy.array([0.0, 0.0, 0.0]) for i in range(np): if max(pixseries[ind1, i].flatten()) > satlvl or max( pixseries[ind1, i].flatten()) <= 100: wgt[i, 0] = 0 z[0] += 1 if max(pixseries[ind2, i].flatten()) > satlvl or max( pixseries[ind2, i].flatten()) <= 100: wgt[i, 1] = 0 z[1] += 1 if max(pixseries[ind3, i].flatten()) > satlvl or max( pixseries[ind3, i].flatten()) <= 100: wgt[i, 2] = 0 z[2] += 1 print(z) print(np - z) sf1 = numpy.dot(pixseries[ind1, :], wgt[:, 0]).flatten() sf2 = numpy.dot(pixseries[ind2, :], wgt[:, 1]).flatten() sf3 = numpy.dot(pixseries[ind3, :], wgt[:, 2]).flatten() sf1 /= numpy.median(sf1) sf2 /= numpy.median(sf2) sf3 /= numpy.median(sf3) originalflux = numpy.concatenate([sf1, sf2, sf3]) # a=numpy.array([0.0,0.0,0.0]) # t=0 # ca = numpy.array([0.0,0.0,0.0]) # ct = 0 # sig1 = numpy.std(sf1) # sig2 = numpy.std(sf2) # sig3 = numpy.std(sf3) # while 1: # j = int(numpy.floor(numpy.random.random()*np)) # if sum(wgt[j,:]) == 0: continue # if ct == 1000: # print(ca) # if ca[0] < 333 and ca[1] < 333 and ca[2] < 333: break # ca = numpy.array([0.0,0.0,0.0]) # ct = 0 # t += 1 # ct += 1 # wgt /= sum(wgt,axis=0) # twgt=copy(wgt) # twgt[j,:]*=numpy.random.normal(1.0,0.05,3) # twgt /= sum(twgt,axis=0) # tsf1 = numpy.dot(pixseries[ind1,:],twgt[:,0]).flatten() # tsf2 = numpy.dot(pixseries[ind2,:],twgt[:,1]).flatten() # tsf3 = numpy.dot(pixseries[ind3,:],twgt[:,2]).flatten() # tsf1 /= numpy.median(tsf1) # tsf2 /= numpy.median(tsf2) # tsf3 /= numpy.median(tsf3) # tsig1 = numpy.std(tsf1) # tsig2 = numpy.std(tsf2) # tsig3 = numpy.std(tsf3) # if tsig1 < sig1: # wgt[:,0] = twgt[:,0] # sig1 = tsig1 # a[0] += 1 # ca[0] += 1 # if tsig2 < sig2: # wgt[:,1] = twgt[:,1] # sig2 = tsig2 # a[1] += 1 # ca[1] += 1 # if tsig3 < sig3: # wgt[:,2] = twgt[:,2] # sig3 = tsig3 # a[2] += 1 # ca[2] += 1 # print(100*a/t) # sf1 = numpy.dot(pixseries[ind1,:],wgt[:,0]).flatten() # sf2 = numpy.dot(pixseries[ind2,:],wgt[:,1]).flatten() # sf3 = numpy.dot(pixseries[ind3,:],wgt[:,2]).flatten() # sf1 /= numpy.median(sf1) # sf2 /= numpy.median(sf2) # sf3 /= numpy.median(sf3) # # a=numpy.array([0.0,0.0,0.0]) # t=0 # ca = numpy.array([0.0,0.0,0.0]) # ct = 0 # sig1 = sum(numpy.fabs(sf1[1:]-sf1[:-1])) # sig2 = sum(numpy.fabs(sf2[1:]-sf2[:-1])) # sig3 = sum(numpy.fabs(sf3[1:]-sf3[:-1])) # while 1: # j = int(numpy.floor(numpy.random.random()*np)) # if sum(wgt[j,:]) == 0: continue # if ct == 1000: # print(ca) # if ca[0] < 167 and ca[1] < 167 and ca[2] < 167: break# # ca = numpy.array([0.0,0.0,0.0]) # ct = 0 # t += 1 # ct += 1 # wgt /= sum(wgt,axis=0) # twgt=copy(wgt) # twgt[j,:]*=numpy.random.normal(1.0,0.05,3) # twgt /= sum(twgt,axis=0) # tsf1 = numpy.dot(pixseries[ind1,:],twgt[:,0]).flatten() # tsf2 = numpy.dot(pixseries[ind2,:],twgt[:,1]).flatten() # tsf3 = numpy.dot(pixseries[ind3,:],twgt[:,2]).flatten() # tsf1 /= numpy.median(tsf1) # tsf2 /= numpy.median(tsf2) # tsf3 /= numpy.median(tsf3) # tsig1 = sum(numpy.fabs(tsf1[1:]-tsf1[:-1])) # tsig2 = sum(numpy.fabs(tsf2[1:]-tsf2[:-1])) # tsig3 = sum(numpy.fabs(tsf3[1:]-tsf3[:-1])) # if tsig1 < sig1: # wgt[:,0] = twgt[:,0] # sig1 = tsig1 # a[0] += 1 # ca[0] += 1 # if tsig2 < sig2: # wgt[:,1] = twgt[:,1] # sig2 = tsig2 # a[1] += 1 # ca[1] += 1 # if tsig3 < sig3: # wgt[:,2] = twgt[:,2] # sig3 = tsig3 # a[2] += 1 # ca[2] += 1 # print(100*a/t) # sf1 = numpy.dot(pixseries[ind1,:],wgt[:,0]).flatten() # sf2 = numpy.dot(pixseries[ind2,:],wgt[:,1]).flatten() # sf3 = numpy.dot(pixseries[ind3,:],wgt[:,2]).flatten() # sf1 /= numpy.median(sf1) # sf2 /= numpy.median(sf2) # sf3 /= numpy.median(sf3) a = numpy.array([0.0, 0.0, 0.0]) t = 0 ca = numpy.array([0.0, 0.0, 0.0]) ct = 0 sig1 = sum(numpy.fabs(sf1[2:] - 2 * sf1[1:-1] + sf1[:-2])) sig2 = sum(numpy.fabs(sf2[2:] - 2 * sf2[1:-1] + sf2[:-2])) sig3 = sum(numpy.fabs(sf3[2:] - 2 * sf3[1:-1] + sf3[:-2])) while 1: j = int(numpy.floor(numpy.random.random() * np)) if sum(wgt[j, :]) == 0: continue if ct == 1000: print(ca) if ca[0] < 20 and ca[1] < 20 and ca[2] < 20: break if t > 1000000: break ca = numpy.array([0.0, 0.0, 0.0]) ct = 0 t += 1 ct += 1 wgt /= sum(wgt, axis=0) twgt = copy(wgt) twgt[j, :] *= numpy.random.normal(1.0, 0.05, 3) twgt /= sum(twgt, axis=0) tsf1 = numpy.dot(pixseries[ind1, :], twgt[:, 0]).flatten() tsf2 = numpy.dot(pixseries[ind2, :], twgt[:, 1]).flatten() tsf3 = numpy.dot(pixseries[ind3, :], twgt[:, 2]).flatten() tsf1 /= numpy.median(tsf1) tsf2 /= numpy.median(tsf2) tsf3 /= numpy.median(tsf3) tsig1 = sum(numpy.fabs(tsf1[2:] - 2 * tsf1[1:-1] + tsf1[:-2])) tsig2 = sum(numpy.fabs(tsf2[2:] - 2 * tsf2[1:-1] + tsf2[:-2])) tsig3 = sum(numpy.fabs(tsf3[2:] - 2 * tsf3[1:-1] + tsf3[:-2])) if tsig1 < sig1: wgt[:, 0] = twgt[:, 0] sig1 = tsig1 a[0] += 1 ca[0] += 1 if tsig2 < sig2: wgt[:, 1] = twgt[:, 1] sig2 = tsig2 a[1] += 1 ca[1] += 1 if tsig3 < sig3: wgt[:, 2] = twgt[:, 2] sig3 = tsig3 a[2] += 1 ca[2] += 1 print(100 * a / t) sf1 = numpy.dot(pixseries[ind1, :], wgt[:, 0]).flatten() sf2 = numpy.dot(pixseries[ind2, :], wgt[:, 1]).flatten() sf3 = numpy.dot(pixseries[ind3, :], wgt[:, 2]).flatten() sf1 /= numpy.median(sf1) sf2 /= numpy.median(sf2) sf3 /= numpy.median(sf3) finalflux = numpy.concatenate([sf1, sf2, sf3]) # construct output file if status == 0: instruct, status = kepio.openfits(infile, 'readonly', logfile, verbose) status = kepkey.history(call, instruct[0], outfile, logfile, verbose) hdulist = HDUList(instruct[0]) cols = [] cols.append( Column(name='TIME', format='D', unit='BJD - 2454833', disp='D12.7', array=time)) cols.append( Column(name='TIMECORR', format='E', unit='d', disp='E13.6', array=timecorr)) cols.append( Column(name='CADENCENO', format='J', disp='I10', array=cadenceno)) cols.append(Column(name='QUALITY', format='J', array=quality)) cols.append( Column(name='ORGFLUX', format='E', disp='E13.6', array=originalflux)) cols.append( Column(name='FLUX', format='E', disp='E13.6', array=finalflux)) # for i in range(ydim): # for j in range(xdim): # colname = 'COL%d_ROW%d' % (i+column,j+row) # cols.append(Column(name=colname,format='E',disp='E13.6',array=pixseries[i,j,:])) hdu1 = new_table(ColDefs(cols)) try: hdu1.header.update('INHERIT', True, 'inherit the primary header') except: status = 0 try: hdu1.header.update('EXTNAME', 'PIXELSERIES', 'name of extension') except: status = 0 try: hdu1.header.update( 'EXTVER', instruct[1].header['EXTVER'], 'extension version number (not format version)') except: status = 0 try: hdu1.header.update('TELESCOP', instruct[1].header['TELESCOP'], 'telescope') except: status = 0 try: hdu1.header.update('INSTRUME', instruct[1].header['INSTRUME'], 'detector type') except: status = 0 try: hdu1.header.update('OBJECT', instruct[1].header['OBJECT'], 'string version of KEPLERID') except: status = 0 try: hdu1.header.update('KEPLERID', instruct[1].header['KEPLERID'], 'unique Kepler target identifier') except: status = 0 try: hdu1.header.update('RADESYS', instruct[1].header['RADESYS'], 'reference frame of celestial coordinates') except: status = 0 try: hdu1.header.update('RA_OBJ', instruct[1].header['RA_OBJ'], '[deg] right ascension from KIC') except: status = 0 try: hdu1.header.update('DEC_OBJ', instruct[1].header['DEC_OBJ'], '[deg] declination from KIC') except: status = 0 try: hdu1.header.update('EQUINOX', instruct[1].header['EQUINOX'], 'equinox of celestial coordinate system') except: status = 0 try: hdu1.header.update('TIMEREF', instruct[1].header['TIMEREF'], 'barycentric correction applied to times') except: status = 0 try: hdu1.header.update('TASSIGN', instruct[1].header['TASSIGN'], 'where time is assigned') except: status = 0 try: hdu1.header.update('TIMESYS', instruct[1].header['TIMESYS'], 'time system is barycentric JD') except: status = 0 try: hdu1.header.update('BJDREFI', instruct[1].header['BJDREFI'], 'integer part of BJD reference date') except: status = 0 try: hdu1.header.update('BJDREFF', instruct[1].header['BJDREFF'], 'fraction of the day in BJD reference date') except: status = 0 try: hdu1.header.update('TIMEUNIT', instruct[1].header['TIMEUNIT'], 'time unit for TIME, TSTART and TSTOP') except: status = 0 try: hdu1.header.update('TSTART', instruct[1].header['TSTART'], 'observation start time in BJD-BJDREF') except: status = 0 try: hdu1.header.update('TSTOP', instruct[1].header['TSTOP'], 'observation stop time in BJD-BJDREF') except: status = 0 try: hdu1.header.update('LC_START', instruct[1].header['LC_START'], 'mid point of first cadence in MJD') except: status = 0 try: hdu1.header.update('LC_END', instruct[1].header['LC_END'], 'mid point of last cadence in MJD') except: status = 0 try: hdu1.header.update('TELAPSE', instruct[1].header['TELAPSE'], '[d] TSTOP - TSTART') except: status = 0 try: hdu1.header.update('LIVETIME', instruct[1].header['LIVETIME'], '[d] TELAPSE multiplied by DEADC') except: status = 0 try: hdu1.header.update('EXPOSURE', instruct[1].header['EXPOSURE'], '[d] time on source') except: status = 0 try: hdu1.header.update('DEADC', instruct[1].header['DEADC'], 'deadtime correction') except: status = 0 try: hdu1.header.update('TIMEPIXR', instruct[1].header['TIMEPIXR'], 'bin time beginning=0 middle=0.5 end=1') except: status = 0 try: hdu1.header.update('TIERRELA', instruct[1].header['TIERRELA'], '[d] relative time error') except: status = 0 try: hdu1.header.update('TIERABSO', instruct[1].header['TIERABSO'], '[d] absolute time error') except: status = 0 try: hdu1.header.update('INT_TIME', instruct[1].header['INT_TIME'], '[s] photon accumulation time per frame') except: status = 0 try: hdu1.header.update('READTIME', instruct[1].header['READTIME'], '[s] readout time per frame') except: status = 0 try: hdu1.header.update('FRAMETIM', instruct[1].header['FRAMETIM'], '[s] frame time (INT_TIME + READTIME)') except: status = 0 try: hdu1.header.update('NUM_FRM', instruct[1].header['NUM_FRM'], 'number of frames per time stamp') except: status = 0 try: hdu1.header.update('TIMEDEL', instruct[1].header['TIMEDEL'], '[d] time resolution of data') except: status = 0 try: hdu1.header.update('DATE-OBS', instruct[1].header['DATE-OBS'], 'TSTART as UTC calendar date') except: status = 0 try: hdu1.header.update('DATE-END', instruct[1].header['DATE-END'], 'TSTOP as UTC calendar date') except: status = 0 try: hdu1.header.update('BACKAPP', instruct[1].header['BACKAPP'], 'background is subtracted') except: status = 0 try: hdu1.header.update('DEADAPP', instruct[1].header['DEADAPP'], 'deadtime applied') except: status = 0 try: hdu1.header.update('VIGNAPP', instruct[1].header['VIGNAPP'], 'vignetting or collimator correction applied') except: status = 0 try: hdu1.header.update('GAIN', instruct[1].header['GAIN'], '[electrons/count] channel gain') except: status = 0 try: hdu1.header.update('READNOIS', instruct[1].header['READNOIS'], '[electrons] read noise') except: status = 0 try: hdu1.header.update('NREADOUT', instruct[1].header['NREADOUT'], 'number of read per cadence') except: status = 0 try: hdu1.header.update('TIMSLICE', instruct[1].header['TIMSLICE'], 'time-slice readout sequence section') except: status = 0 try: hdu1.header.update('MEANBLCK', instruct[1].header['MEANBLCK'], '[count] FSW mean black level') except: status = 0 hdulist.append(hdu1) hdulist.writeto(outfile) status = kepkey.new('EXTNAME', 'APERTURE', 'name of extension', instruct[2], outfile, logfile, verbose) pyfits.append(outfile, instruct[2].data, instruct[2].header) wgt1 = numpy.reshape(wgt[:, 0], (ydim, xdim)) wgt2 = numpy.reshape(wgt[:, 1], (ydim, xdim)) wgt3 = numpy.reshape(wgt[:, 2], (ydim, xdim)) hdu3 = ImageHDU(data=wgt1, header=instruct[2].header, name='WEIGHTS1') hdu4 = ImageHDU(data=wgt2, header=instruct[2].header, name='WEIGHTS2') hdu5 = ImageHDU(data=wgt3, header=instruct[2].header, name='WEIGHTS3') pyfits.append(outfile, hdu3.data, hdu3.header) pyfits.append(outfile, hdu4.data, hdu4.header) pyfits.append(outfile, hdu5.data, hdu5.header) status = kepio.closefits(instruct, logfile, verbose) else: message = 'WARNING -- KEPHALOPHOT: output FITS file requires > 999 columns. Non-compliant with FITS convention.' kepmsg.warn(logfile, message) # plot style if status == 0: try: params = { 'backend': 'png', 'axes.linewidth': 2.0, 'axes.labelsize': 32, 'axes.font': 'sans-serif', 'axes.fontweight': 'bold', 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 12, 'ytick.labelsize': 12 } pylab.rcParams.update(params) except: pass # plot pixel array fmin = 1.0e33 fmax = -1.033 if status == 0: pylab.figure(num=None, figsize=[12, 12]) pylab.clf() dx = 0.93 #/ xdim dy = 0.94 #/ ydim ax = pylab.axes([0.06, 0.05, 0.93, 0.94]) pylab.gca().xaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter( pylab.ScalarFormatter(useOffset=False)) pylab.gca().xaxis.set_major_locator( matplotlib.ticker.MaxNLocator(integer=True)) pylab.gca().yaxis.set_major_locator( matplotlib.ticker.MaxNLocator(integer=True)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.xlim(numpy.min(pixcoord1) - 0.5, numpy.max(pixcoord1) + 0.5) pylab.ylim(numpy.min(pixcoord2) - 0.5, numpy.max(pixcoord2) + 0.5) pylab.xlabel('time', {'color': 'k'}) pylab.ylabel('arbitrary flux', {'color': 'k'}) tmin = amin(time) tmax = amax(time) try: numpy.isfinite(amin(finalflux)) numpy.isfinite(amin(finalflux)) fmin = amin(finalflux) fmax = amax(finalflux) except: ugh = 1 xmin = tmin - (tmax - tmin) / 40 xmax = tmax + (tmax - tmin) / 40 ymin = fmin - (fmax - fmin) / 20 ymax = fmax + (fmax - fmin) / 20 pylab.axes([0.06, 0.05, dx, dy]) pylab.setp(pylab.gca(), xticklabels=[], yticklabels=[]) ptime = time * 1.0 ptime = numpy.insert(ptime, [0], ptime[0]) ptime = numpy.append(ptime, ptime[-1]) pflux = finalflux * 1.0 pflux = numpy.insert(pflux, [0], -1000.0) pflux = numpy.append(pflux, -1000.0) pylab.plot(time, finalflux, color='#0000ff', linestyle='-', linewidth=0.5) pylab.fill(ptime, pflux, fc='#FFF380', linewidth=0.0, alpha=1.0) if 'loc' in plottype: pylab.xlim(xmin, xmax) pylab.ylim(ymin, ymax) if 'glob' in plottype: pylab.xlim(xmin, xmax) pylab.ylim(1.0e-10, numpy.nanmax(pixseries) * 1.05) if 'full' in plottype: pylab.xlim(xmin, xmax) pylab.ylim(1.0e-10, ymax * 1.05) # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() if plotfile.lower() != 'none': pylab.savefig(plotfile) # stop time if status == 0: kepmsg.clock('KEPHALOPHOT ended at', logfile, verbose) return
def leastsquare(functype, pinit, xdata, ydata, yerr, logfile, verbose): status = 0 coeffs = [] # functional form if (functype == 'poly0'): fitfunc = kepfunc.poly0() if (functype == 'poly1'): fitfunc = kepfunc.poly1() if (functype == 'poly2'): fitfunc = kepfunc.poly2() if (functype == 'poly3'): fitfunc = kepfunc.poly3() if (functype == 'poly4'): fitfunc = kepfunc.poly4() if (functype == 'poly5'): fitfunc = kepfunc.poly5() if (functype == 'poly6'): fitfunc = kepfunc.poly6() if (functype == 'poly7'): fitfunc = kepfunc.poly7() if (functype == 'poly8'): fitfunc = kepfunc.poly8() if (functype == 'poly1con'): fitfunc = kepfunc.poly1con() if (functype == 'gauss'): fitfunc = kepfunc.gauss() if (functype == 'gauss0'): fitfunc = kepfunc.gauss0() if (functype == 'congauss'): fitfunc = kepfunc.congauss() if (functype == 'sine'): fitfunc = kepfunc.sine() if (functype == 'moffat0'): fitfunc = kepfunc.moffat0() if (functype == 'conmoffat'): fitfunc = kepfunc.conmoffat() # define error coefficent calculation errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err # if no data errors, substitude rms of fit if (yerr == None): yerr = [] rerr = [] for i in range(len(ydata)): rerr.append(1.e10) try: out = optimize.leastsq(errfunc, pinit, args=(xdata, ydata, rerr), full_output=1) except: message = 'ERROR -- KEPFIT.LEASTSQUARE: failed to fit data' status = kepmsg.err(logfile, message, verbose) if functype == 'poly0': out = [numpy.mean(ydata), sqrt(numpy.mean(ydata))] if (functype == 'poly0' or functype == 'sineCompareBinPSF'): coeffs.append(out[0]) else: coeffs = out[0] if (len(coeffs) > 1): fit = fitfunc(coeffs, xdata) else: fit = numpy.zeros(len(xdata)) for i in range(len(fit)): fit[i] = coeffs[0] sigma, status = kepstat.rms(ydata, fit, logfile, verbose) for i in range(len(ydata)): yerr.append(sigma) # fit data try: out = optimize.leastsq(errfunc, pinit, args=(xdata, ydata, yerr), full_output=1) except: message = 'ERROR -- KEPFIT.LEASTSQUARE: failed to fit data' status = kepmsg.err(logfile, message, verbose) if functype == 'poly0': out = [numpy.mean(ydata), sqrt(numpy.mean(ydata))] # define coefficients coeffs = [] covar = [] if (functype == 'poly0' or functype == 'poly1con' or functype == 'sineCompareBinPSF'): coeffs.append(out[0]) covar.append(out[1]) else: coeffs = out[0] covar = out[1] # calculate 1-sigma error on coefficients errors = [] if (covar == None): message = 'WARNING -- KEPFIT.leastsquare: NULL covariance matrix' # kepmsg.log(logfile,message,verbose) for i in range(len(coeffs)): if (covar != None and len(coeffs) > 1): errors.append(sqrt(abs(covar[i][i]))) else: errors.append(coeffs[i]) # generate fit points for rms calculation if (len(coeffs) > 1): fit = fitfunc(coeffs, xdata) else: fit = numpy.zeros(len(xdata)) for i in range(len(fit)): fit[i] = coeffs[0] sigma, status = kepstat.rms(ydata, fit, logfile, verbose) # generate fit points for plotting dx = xdata[len(xdata) - 1] - xdata[0] plotx = linspace(xdata.min(), xdata.max(), 10000) ploty = fitfunc(coeffs, plotx) if (len(coeffs) == 1): ploty = [] for i in range(len(plotx)): ploty.append(coeffs[0]) ploty = numpy.array(ploty) # reduced chi^2 calculation chi2 = 0 dof = len(ydata) - len(coeffs) for i in range(len(ydata)): chi2 += (ydata[i] - fit[i])**2 / yerr[i] chi2 /= dof return coeffs, errors, covar, sigma, chi2, dof, fit, plotx, ploty, status
def kepfilter(infile,outfile,datacol,function,cutoff,passband,plot,plotlab, clobber,verbose,logfile,status,cmdLine=False): ## startup parameters status = 0 numpy.seterr(all="ignore") labelsize = 24 ticksize = 16 xsize = 16 ysize = 6 lcolor = '#0000ff' lwidth = 1.0 fcolor = '#ffff00' falpha = 0.2 ## log the call hashline = '----------------------------------------------------------------------------' kepmsg.log(logfile,hashline,verbose) call = 'KEPFILTER -- ' call += 'infile='+infile+' ' call += 'outfile='+outfile+' ' call += 'datacol='+str(datacol)+' ' call += 'function='+str(function)+' ' call += 'cutoff='+str(cutoff)+' ' call += 'passband='+str(passband)+' ' plotit = 'n' if (plot): plotit = 'y' call += 'plot='+plotit+ ' ' call += 'plotlab='+str(plotlab)+' ' overwrite = 'n' if (clobber): overwrite = 'y' call += 'clobber='+overwrite+ ' ' chatter = 'n' if (verbose): chatter = 'y' call += 'verbose='+chatter+' ' call += 'logfile='+logfile kepmsg.log(logfile,call+'\n',verbose) ## start time kepmsg.clock('KEPFILTER started at',logfile,verbose) ## test log file logfile = kepmsg.test(logfile) ## clobber output file if clobber: status = kepio.clobber(outfile,logfile,verbose) if kepio.fileexists(outfile): message = 'ERROR -- KEPFILTER: ' + outfile + ' exists. Use clobber=yes' status = kepmsg.err(logfile,message,verbose) ## open input file if status == 0: instr, status = kepio.openfits(infile,'readonly',logfile,verbose) tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,infile,logfile,verbose,status) if status == 0: try: work = instr[0].header['FILEVER'] cadenom = 1.0 except: cadenom = cadence ## fudge non-compliant FITS keywords with no values if status == 0: instr = kepkey.emptykeys(instr,file,logfile,verbose) ## read table structure if status == 0: table, status = kepio.readfitstab(infile,instr[1],logfile,verbose) # read time and flux columns if status == 0: barytime, status = kepio.readtimecol(infile,table,logfile,verbose) flux, status = kepio.readsapcol(infile,table,logfile,verbose) # filter input data table if status == 0: try: nanclean = instr[1].header['NANCLEAN'] except: naxis2 = 0 for i in range(len(table.field(0))): if (numpy.isfinite(barytime[i]) and numpy.isfinite(flux[i]) and flux[i] != 0.0): table[naxis2] = table[i] naxis2 += 1 instr[1].data = table[:naxis2] comment = 'NaN cadences removed from data' status = kepkey.new('NANCLEAN',True,comment,instr[1],outfile,logfile,verbose) ## read table columns if status == 0: intime, status = kepio.readtimecol(infile,instr[1].data,logfile,verbose) if status == 0: indata, status = kepio.readfitscol(infile,instr[1].data,datacol,logfile,verbose) if status == 0: intime = intime + bjdref indata = indata / cadenom ## define data sampling if status == 0: tr = 1.0 / (cadence / 86400) timescale = 1.0 / (cutoff / tr) ## define convolution function if status == 0: if function == 'boxcar': filtfunc = numpy.ones(numpy.ceil(timescale)) elif function == 'gauss': timescale /= 2 dx = numpy.ceil(timescale * 10 + 1) filtfunc = kepfunc.gauss() filtfunc = filtfunc([1.0,dx/2-1.0,timescale],linspace(0,dx-1,dx)) elif function == 'sinc': dx = numpy.ceil(timescale * 12 + 1) fx = linspace(0,dx-1,dx) fx = fx - dx / 2 + 0.5 fx /= timescale filtfunc = numpy.sinc(fx) filtfunc /= numpy.sum(filtfunc) ## pad time series at both ends with noise model if status == 0: ave, sigma = kepstat.stdev(indata[:len(filtfunc)]) padded = append(kepstat.randarray(np.ones(len(filtfunc)) * ave, np.ones(len(filtfunc)) * sigma), indata) ave, sigma = kepstat.stdev(indata[-len(filtfunc):]) padded = append(padded, kepstat.randarray(np.ones(len(filtfunc)) * ave, np.ones(len(filtfunc)) * sigma)) ## convolve data if status == 0: convolved = convolve(padded,filtfunc,'same') ## remove padding from the output array if status == 0: if function == 'boxcar': outdata = convolved[len(filtfunc):-len(filtfunc)] else: outdata = convolved[len(filtfunc):-len(filtfunc)] ## subtract low frequencies if status == 0 and passband == 'high': outmedian = median(outdata) outdata = indata - outdata + outmedian ## comment keyword in output file if status == 0: status = kepkey.history(call,instr[0],outfile,logfile,verbose) ## clean up x-axis unit if status == 0: intime0 = float(int(tstart / 100) * 100.0) if intime0 < 2.4e6: intime0 += 2.4e6 ptime = intime - intime0 xlab = 'BJD $-$ %d' % intime0 ## clean up y-axis units if status == 0: pout = indata * 1.0 pout2 = outdata * 1.0 nrm = len(str(int(numpy.nanmax(pout))))-1 pout = pout / 10**nrm pout2 = pout2 / 10**nrm ylab = '10$^%d$ %s' % (nrm, plotlab) ## data limits xmin = ptime.min() xmax = ptime.max() ymin = numpy.nanmin(pout) ymax = numpy.nanmax(pout) xr = xmax - xmin yr = ymax - ymin ptime = insert(ptime,[0],[ptime[0]]) ptime = append(ptime,[ptime[-1]]) pout = insert(pout,[0],[0.0]) pout = append(pout,0.0) pout2 = insert(pout2,[0],[0.0]) pout2 = append(pout2,0.0) ## plot light curve if status == 0 and plot: try: params = {'backend': 'png', 'axes.linewidth': 2.5, 'axes.labelsize': labelsize, 'axes.font': 'sans-serif', 'axes.fontweight' : 'bold', 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': ticksize, 'ytick.labelsize': ticksize} rcParams.update(params) except: print('ERROR -- KEPFILTER: install latex for scientific plotting') status = 1 if status == 0 and plot: pylab.figure(figsize=[xsize,ysize]) pylab.clf() ## plot filtered data ax = pylab.axes([0.06,0.1,0.93,0.87]) pylab.gca().xaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False)) labels = ax.get_yticklabels() setp(labels, 'rotation', 90, fontsize=12) pylab.plot(ptime,pout,color='#ff9900',linestyle='-',linewidth=lwidth) fill(ptime,pout,color=fcolor,linewidth=0.0,alpha=falpha) if passband == 'low': pylab.plot(ptime[1:-1],pout2[1:-1],color=lcolor,linestyle='-',linewidth=lwidth) else: pylab.plot(ptime,pout2,color=lcolor,linestyle='-',linewidth=lwidth) fill(ptime,pout2,color=lcolor,linewidth=0.0,alpha=falpha) xlabel(xlab, {'color' : 'k'}) ylabel(ylab, {'color' : 'k'}) xlim(xmin-xr*0.01,xmax+xr*0.01) if ymin >= 0.0: ylim(ymin-yr*0.01,ymax+yr*0.01) else: ylim(1.0e-10,ymax+yr*0.01) pylab.grid() # render plot if cmdLine: pylab.show() else: pylab.ion() pylab.plot([]) pylab.ioff() ## write output file if status == 0: for i in range(len(outdata)): instr[1].data.field(datacol)[i] = outdata[i] instr.writeto(outfile) ## close input file if status == 0: status = kepio.closefits(instr,logfile,verbose) ## end time if (status == 0): message = 'KEPFILTER completed at' else: message = '\nKEPFILTER aborted at' kepmsg.clock(message,logfile,verbose)