예제 #1
0
def run_centering(eventname, cwd):
    """
  Read the control file.
  Load the event.
  Launch a thread for each centering run.
  """

    owd = os.getcwd()
    os.chdir(cwd)
    config = os.path.basename(eventname)[:-4] + '.pcf'
    pcfs = rd.read_pcf(config, 'centering')

    if len(pcfs) == 1:  #, I may be in the center dir, to re-run:
        # Get name of centering dir:
        pcf = pcfs[0]
        centerdir = pcf.method
        if pcf.pcfname is not None:
            centerdir += "_" + str(pcf.pcfname)

        if cwd[-len(centerdir):] == centerdir:
            # Go to dir where poet2 files were saved.
            cwd = cwd[:-len(centerdir)]
            os.chdir(cwd)

    # Load the event:
    try:
        event = me.loadevent(eventname,
                             load=['dendata', 'data', 'uncd', 'mask'])
        print("Performing centering on denoised data")
    except:
        event = me.loadevent(eventname, load=['data', 'uncd', 'mask'])

    # Loop over each run:
    for pcf in pcfs:

        # Make a copy of the event:
        this_event = copy.copy(event)

        # Name of the directory to put the results:
        centerdir = pcf.method
        if pcf.pcfname is not None:
            centerdir += "_" + str(pcf.pcfname)
        this_event.centerdir = centerdir

        # Create the centering directory if it doesn't exist:
        if not os.path.exists(centerdir):
            os.mkdir(centerdir)

        # copy the photometry and centering configuration into the
        # centering directory
        filename = centerdir + '/' + event.eventname + '.pcf'
        pcf.make_file(filename, 'centering')
        rd.copy_config(config, ['photometry'], filename)

        # Launch the thread:
        p = Process(target=centering, args=(this_event, pcf, centerdir, owd))
        p.start()

    os.chdir(owd)
예제 #2
0
def run_centering(eventname, control, cwd=None):
    """
  Read the control file.
  Load the event.
  Launch a thread for each centering run.
  """

    if cwd is None:
        cwd = os.getcwd()
    os.chdir(cwd)
    pcf = rd.read_pcf(control)
    nruns = len(pcf)

    if nruns == 1:  #, I may be in the center dir, to re-run:
        # Get name of centering dir:
        centerdir = pcf[0].method.get()
        if pcf[0].pcfname.get() is not "":
            centerdir += "_" + pcf[0].pcfname.get()

        if cwd[-len(centerdir):] == centerdir:
            # Go to dir where poet2 files were saved.
            cwd = cwd[:-len(centerdir)]
            os.chdir(cwd)

    # Load the event:
    try:
        event = me.loadevent(eventname,
                             load=['dendata', 'data', 'uncd', 'mask'])
        print("Performing centering on denoised data")
    except:
        event = me.loadevent(eventname, load=['data', 'uncd', 'mask'])
        event.denoised = False

    # Loop over each run:
    for run in np.arange(nruns):
        os.chdir(cwd)

        # Make a copy of the event:
        this_event = copy.copy(event)

        # Name of the directory to put the results:
        centerdir = pcf[run].method.get()
        if pcf[run].pcfname.get() is not "":
            centerdir += "_" + pcf[run].pcfname.get()
        this_event.centerdir = centerdir

        # Create the centering directory if it doesn't exist:
        if not os.path.exists(centerdir):
            os.mkdir(centerdir)

        # copy the photometry control file to centerdir
        shutil.copy('photom.pcf', centerdir + '/photom.pcf')

        # Launch the thread:
        p = Process(target=centering, args=(this_event, pcf[run], centerdir))
        p.start()
예제 #3
0
def run_denoising(eventname, control):
    """
    Load the event.
    Read the control file.
    Launch a thread for each centering run.
    """
    global event

    pcf = rd.read_pcf(control)
    nruns = len(pcf)
    cwd = os.getcwd()

    if nruns == 1:  #, I may be in the denoise dir, to re-run:
        # Get name of denoising dir:
        denoisedir = pcf[0].wavelet.get() + '_' + pcf[0].threshold.get(
        ) + '_L' + str(pcf[0].numlvls.get())
        if pcf[0].pcfname.get() != "":
            denoisedir += "_" + pcf[0].pcfname.get()
        if cwd[-len(denoisedir):] == denoisedir:
            # Go to dir where poet2 files were saved.
            cwd = cwd[:-len(denoisedir)]
            os.chdir(cwd)

    # Loop over each run:
    for run in np.arange(nruns):
        os.chdir(cwd)

        # Load a fresh event:
        print("Loading " + eventname)
        event = me.loadevent(eventname, load=['data', 'uncd', 'mask'])

        # Name of the directory to put the results:
        denoisedir = pcf[run].wavelet.get() + '_' + pcf[run].threshold.get(
        ) + '_L' + str(pcf[run].numlvls.get())
        if pcf[run].pcfname.get() != "":
            denoisedir += "_" + pcf[run].pcfname.get()
        event.denoisedir = denoisedir

        # Create the denoising directory if it doesn't exist:
        if not os.path.exists(denoisedir):
            os.mkdir(denoisedir)

        # copy the centering and photometry control files to denoisedir
        shutil.copy('center.pcf', denoisedir + '/center.pcf')
        shutil.copy('photom.pcf', denoisedir + '/photom.pcf')

        # Modify source estimate
        if hasattr(pcf[run], 'srcest'):
            if nruns == event.npos:
                event.srcest[:, run] = pcf[run].srcest.get()
            else:
                for pos in range(event.npos):
                    event.srcest[:, pos] = pcf[0].srcest.get()

        #Call denoising on each wavelet:
        denoise(pcf[run], denoisedir)

    return
예제 #4
0
파일: zen.py 프로젝트: em-delarme/zen
def main():
    eventname = sys.argv[1]
    cfile     = sys.argv[2]
    
    # Load the POET event object (up through p5)
    event_chk = me.loadevent(eventname + "_p5c")
    event_pht = me.loadevent(eventname + "_pht")
    event_ctr = me.loadevent(eventname + "_ctr", load=['data', 'uncd'])

    data  = event_ctr.data
    uncd  = event_ctr.uncd
    phase = event_chk.phase[0]

    phot    = event_pht.fp.aplev[0]
    photerr = event_pht.fp.aperr[0]
    
    # Default to 3x3 box of pixels
    avgcentx = np.floor(np.average(event_pht.fp.x) + 0.5)
    avgcenty = np.floor(np.average(event_pht.fp.y) + 0.5)
    avgcent  = [avgcenty, avgcentx]
    pixels = []
	   
    for i in range(3):
        for j in range(3):
            pixels.append([avgcenty - 1 + i, avgcentx - 1 + j])
		  
    phat, dP = zf.zen_init(data, pixels)

    npix  = len(pixels)
    necl  = 6 #number of eclipse parameters

    #photerr = photerr/np.sqrt(np.mean(phot))
    #phot    = phot/np.mean(phot)
    
    # FINDME: This is the general structure we need for MC3, but names/numbers
    # are subject to change
    allp, bp = mc3.mcmc(phot, photerr, func=zf.zen,
                        indparams=[phase, phat, npix], cfile=cfile)

    return
예제 #5
0
def w4Restore(filedir='..', fname=None, topdir=None, clip=None):
    import shutil
    global numevents

    files = []
    event = []
    filename = ''
    if fname == None:
        for fname in os.listdir(filedir):
            if (fname.endswith("-w3.dat")):
                files.append(fname[:-4])
    else:
        files.append(fname.rstrip('.dat'))
    files.sort()
    numevents = len(files)
    if numevents == 0:
        print('Cannot find any files to restore.')
        #event = ancil(None)
        return event
    for i in np.arange(numevents):
        #Load event
        event.append(me.loadevent(filedir + '/' + files[i]))
        print('Finished loading: ' + event[i].eventname)
        filename = ''.join((filename, event[i].eventname))
        event[i].ancildir = ancildir
    #On initial setup, rename eg00params and eg00-initvals
    '''
    for i in np.arange(numevents):
        event[i].paramsfile   = ancildir + event[i].eventname + 'params.py'
        event[i].initvalsfile = ancildir + event[i].eventname + '-initvals.txt'
        # Copy eg00params
        if os.path.isfile(event[i].paramsfile) == False:
            shutil.copy(ancildir + 'eg00params.py', event[i].paramsfile)
        # Copy eg00-initvals
        if os.path.isfile(event[i].initvalsfile) == False:
            shutil.copy(ancildir + 'eg00-initvals.txt', event[i].initvalsfile)
    '''
    return event
예제 #6
0
def main():
    '''
    One function to rule them all.
    '''

    # Parse the command line arguments
    eventname = sys.argv[1]
    cfile     = sys.argv[2]

    outdir = time.strftime('%Y-%m-%d-%H:%M:%S') + '_' + eventname + '/'

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    days2sec = 86400

    # Read the config file into a dictionary
    print("Reading the config file.")
    config = ConfigParser.SafeConfigParser()
    config.read([cfile])
    configdict = dict(config.items("MCMC"))

    # Pull some variables out
    plots = configdict['plots'] == 'True'
    bins  = configdict['bins']  == 'True'

    # Get initial parameters and stepsize arrays from the config
    stepsize = [float(s) for s in configdict['stepsize'].split()]
    params   = [float(s) for s in configdict['params'].split()]

    # Load the POET event object (up through p5)
    print("Loading the POET event object.")
    event_chk = me.loadevent(eventname + "_p5c")
    event_pht = me.loadevent(eventname + "_pht")
    event_ctr = me.loadevent(eventname + "_ctr", load=['data', 'uncd', 'mask'])

    data  = event_ctr.data
    uncd  = event_ctr.uncd
    phase = event_chk.phase[0]


    # Identify the bright pixels to use
    print("Identifying brightest pixels.")
    nx = data.shape[1]
    ny = data.shape[2]
    
    phot    = event_pht.fp.aplev[np.where(event_chk.good)]
    photerr = event_pht.fp.aperr[np.where(event_chk.good)]

    xavg = np.int(np.floor(np.average(event_pht.fp.x)))
    yavg = np.int(np.floor(np.average(event_pht.fp.y)))

    boxsize = 10
    
    photavg     = np.average(data[:,yavg-boxsize:yavg+boxsize,xavg-boxsize:xavg+boxsize], axis=0)[:,:,0]
    photavgflat = photavg.flatten()

    # Some adjustable parameters that should be at the top of the file
    npix = 9
    necl = 6 #number of eclipse parameters

    flatind = photavgflat.argsort()[-npix:]

    rows = flatind / photavg.shape[1]
    cols = flatind % photavg.shape[0]

    pixels = []

    for i in range(npix):
        pixels.append([rows[i]+yavg-boxsize,cols[i]+xavg-boxsize])
    
    # Default to 3x3 box of pixels
    # avgcentx = np.floor(np.average(event_pht.fp.x) + 0.5)
    # avgcenty = np.floor(np.average(event_pht.fp.y) + 0.5)
    # avgcent  = [avgcenty, avgcentx]
    # pixels = []
	   
    # for i in range(3):
    #     for j in range(3):
    #         pixels.append([avgcenty - 1 + i, avgcentx - 1 + j])

    print("Doing preparatory calculations.")
    phat, dP = zf.zen_init(data, pixels)

    phatgood = np.zeros(len(event_chk.good[0]))
    
    # Mask out the bad images in phat
    for i in range(npix):
        tempphat = phat[:,i].copy()
        tempphatgood = tempphat[np.where(event_chk.good[0])]
        if i == 0:
            phatgood = tempphatgood.copy()
        else:
            phatgood = np.vstack((phatgood, tempphatgood))
        del(tempphat)
        del(tempphatgood)
        
    # Invert the new array because I lack foresight
    phatgood  = phatgood.T
    phasegood = event_chk.phase[np.where(event_chk.good)]

    # Do binning if desired
    if bins:
        # Width of bins to try
        bintry = np.array([ 4.,
                            8.,
                           12.,
                           16.,
                           20.,
                           24.,
                           28.,
                           32.,
                           36.,
                           40.,
                           44.,
                           48.,
                           52.,
                           56.,
                           60.,
                           64.])

        #bintry = np.arange(4,129,dtype=float)

        # Convert bin widths to phase from seconds
        bintry /= (event_chk.period * days2sec)

        # Initialize best chi-squared to an insanely large number
        # for comparison later
        chibest = 1e300

        chisqarray = np.zeros(len(bintry))

        # Optimize bin size
        print("Optimizing bin size.")
        for i in range(len(bintry)):
            print("Least-squares optimization for " + str(bintry[i] * event_chk.period * days2sec)
                  + " second bin width.")

            # Bin the phase and phat
            for j in range(npix):
                if j == 0:
                    binphase,     binphat = zf.bindata(phasegood, phatgood[:,j], bintry[i])
                else:
                    binphase, tempbinphat = zf.bindata(phasegood, phatgood[:,j], bintry[i])
                    binphat = np.column_stack((binphat, tempbinphat))
            # Bin the photometry and error
            # Phase is binned again but is identical to
            # the previously binned phase.
            binphase, binphot, binphoterr = zf.bindata(phasegood, phot, bintry[i], yerr=photerr)

            # Normalize
            photnorm    = phot    / phot.mean()
            photerrnorm = photerr / phot.mean()

            binphotnorm    = binphot    / binphot.mean()
            binphoterrnorm = binphoterr / binphot.mean()

            # Make xphat for use with zen_optimize
            xphatshape = (binphat.shape[0], binphat.shape[1]+1)
            xphat      = np.zeros(xphatshape)

            xphat[:,:-1] = binphat
            xphat[:, -1] = binphase

            # Minimize chi-squared for this bin size
            ret = sco.curve_fit(zf.zen_optimize, xphat, binphotnorm, p0=params, sigma=binphoterrnorm, maxfev = 100000)

            # Calculate the best-fitting model
            model = zf.zen(ret[0], binphase, binphat, npix)

            # Calculate reduced chi-squared
            chisq = np.sum((binphotnorm - model)**2/binphoterrnorm**2)
            redchisq = chisq/len(binphotnorm)
            print("Reduced chi-squared: " + str(redchisq))

            chisqarray[i] = redchisq

            # Save results if this fit is better
            if redchisq < chibest:
                chibest = redchisq
                binbest = bintry[i]

        # Rebin back to the best binning
        binphase, binphot, binphoterr = zf.bindata(phasegood, phot, binbest, yerr=photerr)
        binphotnorm    = binphot    / binphot.mean()
        binphoterrnorm = binphoterr / binphot.mean()

        for j in range(npix):
            if j == 0:
                binphase,     binphat = zf.bindata(phasegood, phatgood[:,j], binbest)
            else:
                binphase, tempbinphat = zf.bindata(phasegood, phatgood[:,j], binbest)
                binphat = np.column_stack((binphat, tempbinphat))

        if plots:
            plt.clf()
            plt.plot(bintry * event_chk.period * days2sec, chisqarray)
            plt.xlabel("Bin width (seconds)")
            plt.ylabel("Reduced Chi-squared")
            plt.title("Reduced Chi-squared of PLD model fit for different bin sizes")
            plt.savefig(outdir+"redchisq.png")
            
    # If not binning, use regular photometry
    else:
        photnorm       = phot    / phot.mean()
        photerrnorm    = photerr / phot.mean()
        binphotnorm    = photnorm.copy()
        binphoterrnorm = photerrnorm.copy()
        binphase       = phasegood.copy()
        binphat        = phatgood.copy()

    # And we're off!    
    print("Beginning MCMC.")
    savefile = configdict['savefile']
    log      = configdict['logfile']
    
    bp, CRlo, CRhi, stdp, posterior, Zchain = mc3.mcmc(binphotnorm,
                                                       binphoterrnorm,
                                                       func=zf.zen,
                                                       indparams=[binphase,
                                                                  binphat,
                                                                  npix],
                                                       cfile=cfile,
                                                       savefile=outdir+savefile,
                                                       log=outdir+log)


    # Get initial parameters and stepsize arrays from the config
    stepsize = [float(s) for s in configdict['stepsize'].split()]
    params   = [float(s) for s in configdict['params'].split()]

    # Calculate the best-fitting model
    bestfit = zf.zen(bp, binphase, binphat, npix)

    # Get parameter names array to match params with names
    parnames = configdict["parname"].split()

    # Make array of parameters, with eclipse depth replaced with 0
    noeclParams = np.zeros(len(bp))

    for i in range(len(noeclParams)):
        if parnames[i] == 'Depth':
            noeclParams[i] == 0
            depth = bp[i]
        else:
            noeclParams[i] = bp[i]

    noeclfit = zf.zen(noeclParams, binphase, binphat, npix)

    bestecl = depth*(zf.eclipse(binphase, bp[npix:npix+necl])-1) + 1

    # Make plots
    print("Making plots.")
    binnumplot = 200
    binplotwidth = (phasegood[-1]-phasegood[0])/binnumplot
    binphaseplot, binphotplot, binphoterrplot = zf.bindata(phasegood, phot, binplotwidth, yerr=photerr)
    binphaseplot, binnoeclfit = zf.bindata(binphase, noeclfit, binplotwidth)
    binphaseplot, binbestecl  = zf.bindata(binphase,  bestecl,  binplotwidth)
    binphotnormplot = binphotplot / binphotplot.mean()
    binphoterrnormplot = binphoterrplot / binphotplot.mean()
    zp.normlc(binphaseplot[:-1], binphotnormplot[:-1], binphoterrnormplot[:-1],
              binnoeclfit[:-1], binbestecl[:-1], 1,
              title='Normalized Binned WASP-29b Data With Eclipse Models', savedir=outdir)
예제 #7
0
def badpix(eventname, cwd):
    """
  Modification History:
  ---------------------
  2010-??-??  patricio  Initial Python implementation
  2014-08-13  garland   switched the pyfits package to astropy.io.fits
  	              [email protected] 
  2017-06-20  zacchaeus Fixed None comparisons
                        [email protected]
  """

    owd = os.getcwd()
    os.chdir(cwd)
    tini = time.time()

    # Load the event
    event = me.loadevent(eventname)
    # Load the data
    me.updateevent(event, eventname, event.loadnext)

    # Create a new log starting from the old one.
    oldlogname = event.logname
    logname = event.eventname + ".log"
    log = le.Logedit(logname, oldlogname)
    event.logname = logname
    log.writelog('\nMARK: ' + time.ctime() + ': Starting p2badpix.')

    # ccampo 3/18/2011: do this in p5
    # Julian observation date
    #event.fp.juldat = event.jdjf80 + event.fp.time / 86400.0

    # ::::::::::::::::::::::: UNCERTAINTIES ::::::::::::::::::::::::::::::::
    # IRAC subarray data come with bogus uncertainties that are not linearly
    # related to photon noise.  We scale them later, using the reduced chi
    # squared from the model fit.

    # ::::::::::::::::::::::: FLUX CONVERSION :::::::::::::::::::::::::::::
    # Do we want flux (uJy/pix) or surface brightness (MJy/sr) units?  If
    # doing photometry, convert to flux.  Since we care about relative
    # numbers, it doesn't really matter.

    # Convert from surface brightness (MJy/sr) to flux units (uJy/pix)
    if event.fluxunits:
        log.writelog('Converting surface brightness to flux')
        event.data, event.uncd = btf.poet_bright2flux(event.data, event.uncd,
                                                      event.posscl)
        if event.havepreflash:
            event.predata, event.preuncd = btf.poet_bright2flux(
                event.predata, event.preuncd, event.posscl)
        if event.havepostcal:
            event.postdata, event.postuncd = btf.poet_bright2flux(
                event.postdata, event.postuncd, event.posscl)

    else:
        log.writelog('Did not convert bright to flux.')

    # Mean Background Estimate, from zodi model
    event.estbg = (np.mean(event.fp.zodi[np.where(event.fp.exist)]) +
                   np.mean(event.fp.ism[np.where(event.fp.exist)]) +
                   np.mean(event.fp.cib[np.where(event.fp.exist)]))

    if event.fluxunits:
        event.estbg *= (event.srperas * 1e12 * np.mean(event.posscl[0, :]) *
                        np.mean(event.posscl[1, :]))

    # Bad Pixel Masking
    log.writelog('Find and fix bad pixels')

    # Get permanent bad pixel mask.
    if not event.ispmask[0]:
        log.writelog('\nPermanent Bad pixel mask not found!')
    else:
        hdu = fits.open(event.pmaskfile[0])
        if hdu[0].header['bitpix'] == -32:  # if data type is float
            hdu[0].scale(type='int16')  # cast it down to int16
        event.pmask = hdu[0].data

    # IRS FIX:
    # IRS data contains the blue peak subarray while its pmask contains
    # the whole array (Hard coding)
    if event.photchan == 5:
        event.pmask = event.pmask[3:59, 86:127]

    # Do NOT define sigma, we have a different scheme for finding baddies
    # adds Spitzer rejects: fp.nsstrej  &  our rejects: fp.nsigrej
    event.mask = pbm.poet_badmask(event.data,
                                  event.uncd,
                                  event.pmask,
                                  event.inst.pcrit,
                                  event.bdmskd,
                                  event.inst.dcrit,
                                  event.fp,
                                  nimpos=event.nimpos)

    # User rejected pixels:
    if event.userrej is not None:
        for i in range(np.shape(event.userrej)[0]):
            event.mask[:, event.userrej[i, 0], event.userrej[i, 1], :] = 0
        event.fp.userrej = np.sum(np.sum(1 - event.mask, axis=1), axis=1)
        event.fp.userrej = np.transpose(event.fp.userrej) - event.fp.nsstrej
    else:
        event.fp.userrej = np.zeros((event.npos, event.maxnimpos))

    # define sigma here.
    # adds median sky: fp.medsky
    event.meanim = pcb.poet_chunkbad(event.data, event.uncd, event.mask,
                                     event.nimpos, event.sigma, event.szchunk,
                                     event.fp, event.nscyc)

    log.writelog('Masks combined')

    # Repeat procedure for preflash and postcal data:
    if event.havepreflash:
        event.premask = pbm.poet_badmask(event.predata,
                                         event.preuncd,
                                         event.pmask,
                                         event.inst.pcrit,
                                         event.prebdmskd,
                                         event.inst.dcrit,
                                         event.prefp,
                                         nimpos=event.prenimpos)
        if event.userrej is not None:
            for i in range(np.shape(event.userrej)[0]):
                event.premask[:, event.userrej[i, 0], event.userrej[i,
                                                                    1], :] = 0
            event.prefp.userrej = np.sum(np.sum(1 - event.premask, axis=1),
                                         axis=1)
            event.prefp.userrej = np.transpose(
                event.prefp.userrej) - event.prefp.nsstrej
        else:
            event.prefp.userrej = np.zeros((event.npos, event.premaxnimpos))

        event.premeanim = pcb.poet_chunkbad(event.predata, event.preuncd,
                                            event.premask, event.prenimpos,
                                            event.sigma, event.szchunk,
                                            event.prefp, event.nscyc)
    if event.havepostcal:
        event.postmask = pbm.poet_badmask(event.postdata,
                                          event.postuncd,
                                          event.pmask,
                                          event.inst.pcrit,
                                          event.postbdmskd,
                                          event.inst.dcrit,
                                          event.postfp,
                                          nimpos=event.postnimpos)

        if event.userrej is not None:
            for i in range(np.shape(event.userrej)[0]):
                event.postmask[:, event.userrej[i, 0], event.userrej[i,
                                                                     1], :] = 0
            event.postfp.userrej = np.sum(np.sum(1 - event.postmask, axis=1),
                                          axis=1)
            event.postfp.userrej = np.transpose(event.postfp.userrej) - \
                                   event.postfp.nsstrej
        else:
            event.postfp.userrej = np.zeros((event.npos, event.postmaxnimpos))

        event.postmeanim = pcb.poet_chunkbad(event.postdata, event.postuncd,
                                             event.postmask, event.postnimpos,
                                             event.sigma, event.szchunk,
                                             event.postfp, event.nscyc)
        for pos in range(event.npos):
            fits.writeto(event.eventname + "_medpostcal.fits",
                         event.postmeanim[:, :, pos],
                         clobber=True)

        # Delete post calibration data:
        event.havepostcal = False
        del (event.postdata)
        del (event.postmask)
        del (event.postuncd)
        del (event.postbdmskd)

    # Save the data
    if event.instrument == 'mips':
        todel = ['bdmskd', 'brmskd']  # what to delete
    else:
        todel = ['bdmskd']

    me.saveevent(event,
                 event.eventname + "_bpm",
                 save=['data', 'uncd', 'mask'],
                 delete=todel)

    # Print time elapsed and close log:
    log.writelog("Output files:")
    log.writelog("Data:")
    log.writelog(" " + cwd + '/' + event.eventname + "_bpm.dat")
    log.writelog(" " + cwd + '/' + event.eventname + "_bpm.h5")
    log.writelog("Log:")
    log.writelog(" " + cwd + '/' + logname)

    dt = t.hms_time(time.time() - tini)
    log.writeclose('\nBad pixel masking time (h:m:s):  %s ' % dt)

    os.chdir(owd)

    if event.runp3:
        #poet.p(3)
        os.system("python3 poet.py p3")
예제 #8
0
def run_denoising(eventname, cwd):
    """
    Load the event.
    Read the control file.
    Launch a thread for each centering run.
    """

    owd = os.getcwd()
    os.chdir(cwd)
    config = os.path.basename(eventname)[:-4] + '.pcf'
    pcfs = rd.read_pcf(config, 'denoise')
    
    if len(pcfs) == 1: #, I may be in the denoise dir, to re-run: 
        # Get name of denoising dir:
        pcf = pcfs[0]
        denoisedir = pcf.wavelet +'_'+ pcf.threshold +'_L'+ \
                     str(pcf.numlvls)
        if pcf.pcfname is not None:
            denoisedir += "_" + str(pcf.pcfname)
        if cwd[-len(denoisedir):] == denoisedir:
            # Go to dir where poet2 files were saved.
            cwd = cwd[:-len(denoisedir)]
            os.chdir(cwd)
    
    # Loop over each run:
#    for run in np.arange(nruns):
    for run, pcf in enumerate(pcfs):

        # Load a fresh event:
        print("Loading " + eventname)
        event = me.loadevent(eventname, load=['data','uncd','mask'])

        # Name of the directory to put the results:
        denoisedir = pcf.wavelet +'_'+ pcf.threshold +'_L'+ str(pcf.numlvls)
        if pcf.pcfname is not None:
            denoisedir += "_" + str(pcf.pcfname)
        event.denoisedir = denoisedir
        
        # Create the denoising directory if it doesn't exist:
        if not os.path.exists(denoisedir): 
            os.mkdir(denoisedir)

        # copy the centering and photometry configs and this pcf into
        # denoise directory
        filename = denoisedir + '/' + event.eventname + '.pcf'
        pcf.make_file(filename, 'denoise')
        rd.copy_config(config, ['centering', 'photometry'], filename)
        
        # Modify source estimate
        if hasattr(pcf, 'srcest'):
            if nruns == event.npos:
                event.srcest[:,run] = pcf.srcest
            else:
                for pos in range(event.npos):
                    event.srcest[:,pos] = pcf.srcest

        # Set denoised flag to True:
        event.denoised = True
        
        # Call denoising on each wavelet:
        denoise(pcf, denoisedir, owd)
    
    os.chdir(owd)
    return
예제 #9
0
def run_photometry(eventname, control, cwd=None):
    """
  Load the event.
  Read the control file.
  Launch a thread for each centering run.
  """

    if cwd == None:
        cwd = os.getcwd()
    os.chdir(cwd)
    pcf = rd.read_pcf(control)
    nruns = len(pcf)

    if nruns == 1:  #, I may be in photdir to re-run:
        # Get name of photometry dir:
        if pcf[0].dooptimal.get():
            photdir = 'optimal' + '%02d' % pcf[0].oresize.get()
        else:
            photdir = ('ap%03d' % (pcf[0].photap.get() * 100) +
                       '%02d' % pcf[0].skyin.get() +
                       '%02d' % pcf[0].skyout.get())
        if pcf[0].pcfname.get() != "":
            photdir += "_" + pcf[0].pcfname.get()

        # If I am in the photometry dir already:
        if cwd[-len(photdir):] == photdir:
            # Go to dir where poet3 files were saved.
            cwd = cwd[:-len(photdir)]
            os.chdir(cwd)

        mute = False
    else:
        mute = True

    # Load the event:
    event = me.loadevent(eventname, load=['data', 'uncd', 'mask'])

    # Loop over each run:
    for run in np.arange(nruns):
        os.chdir(cwd)

        # Make a copy of the event:
        this_event = copy.copy(event)

        # Get name of photometry dir:
        if pcf[run].dooptimal.get():
            photdir = 'optimal' + '%02d' % pcf[run].oresize.get()
        else:
            photdir = ('ap%03d' % (pcf[run].photap.get() * 100) +
                       '%02d' % pcf[run].skyin.get() +
                       '%02d' % pcf[run].skyout.get())
        if pcf[run].pcfname.get() != "":
            photdir += "_" + pcf[run].pcfname.get()
        this_event.photdir = photdir

        # Create the photometry directory if it doesn't exist:
        if not os.path.exists(photdir):
            os.mkdir(photdir)

        # Launch the thread:
        p = Process(target=photometry,
                    args=(this_event, pcf[run], photdir, mute))
        p.start()
예제 #10
0
def checks(eventname, period=None, ephtime=None, cwd=None):
    if cwd == None:
        cwd = os.getcwd()
    os.chdir(cwd)

    # Load the Event
    event = me.loadevent(eventname)

    # Create a log
    oldlogname = event.logname
    logname = event.eventname + "_p5.log"
    log = le.Logedit(logname, oldlogname)
    log.writelog('\nStart Checks: ' + time.ctime())

    # If p5 run after p3: we are using results from PSFfit:
    if not hasattr(event, "phottype"):
        event.phottype = "psffit"
        try:
            os.mkdir("psffit/")
        except:
            pass
        os.chdir("psffit/")

    # Move frame parameters to fit Kevin's syntax:
    # event.fp.param --> event.param
    event.filenames = event.fp.filename
    event.x = event.fp.x
    event.y = event.fp.y
    event.sx = event.fp.sx
    event.sy = event.fp.sy
    event.time = event.fp.time
    event.pos = event.fp.pos
    event.frmvis = event.fp.frmvis
    event.filename = event.eventname

    if event.phottype == "aper":
        event.good = event.fp.good
        event.aplev = event.fp.aplev
        event.aperr = event.fp.aperr
        event.background = event.fp.skylev
        log.writelog('Photometry method is APERTURE')
    elif event.phottype == "psffit":
        event.aplev = event.fp.psfflux
        event.background = event.fp.psfsky
        # FINDME: do something with aperr and good
        event.aperr = 0.0025 * np.mean(
            event.fp.psfflux) * (event.aplev * 0 + 1)
        event.good = np.ones(np.shape(event.aplev))
        log.writelog('Photometry method is PSF FITTING')
    elif event.phottype == "optimal":
        event.good = event.fp.ogood
        event.aplev = event.fp.ophotlev
        event.aperr = event.fp.ophoterr
        # FINDME: Background from optimal?
        event.background = event.fp.psfsky
        log.writelog('Photometry method is OPTIMAL')

    # UPDATE period AND ephtime
    if period != None:
        event.period = period[0]
        event.perioderr = period[1]
    if ephtime != None:
        event.ephtime = ephtime[0]
        event.ephtimeerr = ephtime[1]

    log.writelog("\nCurrent event = " + event.eventname)
    log.writelog("Kurucz file     = " + event.kuruczfile)
    log.writelog("Filter file     = " + event.filtfile)

    # Light-time correction to BJD:

    # Julian observation date
    #event.juldat = event.jdjf80 + event.fp.time / 86400.0
    event.juldat = event.fp.juldat = event.j2kjd + event.fp.time / 86400.0

    if not event.ishorvec:
        log.writeclose('\nHorizon file not found!')
        return
    print("Calculating BJD correction...")
    event.fp.bjdcor = stc.suntimecorr(event.ra, event.dec, event.fp.juldat,
                                      event.horvecfile)

    # Get bjd times:
    event.bjdcor = event.fp.bjdcor
    #event.bjddat = event.fp.juldat + event.fp.bjdcor / 86400.0
    event.bjdutc = event.fp.juldat + event.fp.bjdcor / 86400.0  # utc bjd date
    event.bjdtdb = np.empty(event.bjdutc.shape)
    for i in range(event.bjdtdb.shape[0]):
        event.bjdtdb[i] = utc_tt.utc_tdb(
            event.bjdutc[i])  # terrestial bjd date

    # ccampo 3/18/2011: check which units phase should be in
    try:
        if event.tep.ttrans.unit == "BJDTDB":
            event.timestd = "tdb"
            event.fp.phase = tp.time2phase(event.bjdtdb, event.ephtime,
                                           event.period, event.ecltype)
        else:
            event.timestd = "utc"
            event.fp.phase = tp.time2phase(event.bjdutc, event.ephtime,
                                           event.period, event.ecltype)
    except:
        event.timestd = "utc"
        event.fp.phase = tp.time2phase(event.bjdutc, event.ephtime,
                                       event.period, event.ecltype)

    # assign phase variable
    event.phase = event.fp.phase

    # ccampo 3/18/2011: moved this above
    # Eclipse phase, BJD
    #event.fp.phase = tp.time2phase(event.fp.juldat + event.fp.bjdcor / 86400.0,
    #                               event.ephtime, event.period, event.ecltype)

    # verify leapsecond correction
    hfile = event.filenames[0, 0]
    try:
        image, event.header = pf.getdata(hfile.decode('utf-8'), header=True)
        dt = ((event.bjdtdb - event.bjdutc) * 86400.0)[0, 0]
        dt2 = event.header['ET_OBS'] - event.header['UTCS_OBS']
        log.writelog('Leap second correction : ' + str(dt) + ' = ' + str(dt2))
    except:
        log.writelog('Could not verify leap-second correction.')

    log.writelog('Min and Max light-time correction: ' +
                 np.str(np.amin(event.fp.bjdcor)) + ', ' +
                 np.str(np.amax(event.fp.bjdcor)) + ' seconds')

    # Verify light-time correction
    try:
        image, event.header = pf.getdata(hfile.decode('utf-8'), header=True)
        try:
            log.writelog('BJD Light-time correction: ' +
                         str(event.bjdcor[0, 0]) + ' = ' +
                         str((event.header['BMJD_OBS'] -
                              event.header['MJD_OBS']) * 86400))
        except:
            log.writelog('HJD Light-time correction: ' +
                         str(event.bjdcor[0, 0]) + ' = ' +
                         str((event.header['HMJD_OBS'] -
                              event.header['MJD_OBS']) * 86400))
    except:
        log.writelog('Could not verify light-time correction.')

    # Number of good frames should be > 95%
    log.writelog("Good Frames = %7.3f" % (np.mean(event.good) * 100) + " %")

    log.writelog('\nCentering:     X mean     X stddev  Y mean     Y stddev')
    for pos in np.arange(event.npos):
        log.writelog(
            'position %2d:' % pos +
            ' %10.5f' % np.mean(event.x[pos, np.where(event.good[pos])]) +
            ' %9.5f' % np.std(event.x[pos, np.where(event.good[pos])]) +
            ' %10.5f' % np.mean(event.y[pos, np.where(event.good[pos])]) +
            ' %9.5f' % np.std(event.y[pos, np.where(event.good[pos])]))

    # COMPUTE RMS POSITION CONSISTENCY
    event.xprecision = np.sqrt(np.median(np.ediff1d(event.x)**2))
    event.yprecision = np.sqrt(np.median(np.ediff1d(event.y)**2))

    log.writelog('RMS of x precision = ' + str(np.round(event.xprecision, 4)) +
                 ' pixels.')
    log.writelog('RMS of y precision = ' + str(np.round(event.yprecision, 4)) +
                 ' pixels.')
    if event.phottype == "aper":
        log.writelog('\nCenter & photometry half-width/aperture sizes = ' +
                     str(event.ctrim) + ', ' + str(event.photap) + ' pixels.')
    log.writelog('Period = ' + str(event.period) + ' +/- ' +
                 str(event.perioderr) + ' days')
    log.writelog('Ephemeris = ' + str(event.ephtime) + ' +/- ' +
                 str(event.ephtimeerr) + ' JD')

    fmt1 = [
        'C0o', 'C1o', 'C2o', 'ro', 'ko', 'co', 'mo', 'bs', 'gs', 'ys', 'rs',
        'ks', 'cs', 'ms'
    ]
    fmt2 = ['b,', 'g,', 'y,', 'r,']

    plt.figure(501)
    plt.clf()
    plt.figure(502, figsize=(8, 12))
    plt.clf()
    plt.figure(503)
    plt.clf()
    plt.figure(504)
    plt.clf()
    plt.figure(505)
    plt.clf()
    plt.figure(506)
    plt.clf()

    for pos in np.arange(event.npos):
        wheregood = np.where(event.good[pos, :])
        # CHOOSE ONLY GOOD FRAMES FOR PLOTTING
        phase = event.phase[pos, :][wheregood]
        aplev = event.aplev[pos, :][wheregood]
        jdtime = event.bjdutc[pos, :][wheregood]
        background = event.background[pos, :][wheregood]
        # COMPUTE X AND Y PIXEL LOCATION RELATIVE TO ...
        if event.npos > 1:
            # CENTER OF EACH PIXEL
            y = (event.y[pos, :] - np.round(event.y[pos, :]))[wheregood]
            x = (event.x[pos, :] - np.round(event.x[pos, :]))[wheregood]
        else:
            # CENTER OF MEDIAN PIXEL
            y = (event.y[pos, :] - np.round(np.median(event.y)))[wheregood]
            x = (event.x[pos, :] - np.round(np.median(event.x)))[wheregood]

        # SORT aplev BY x, y AND radial POSITIONS
        rad = np.sqrt(x**2 + y**2)
        xx = np.sort(x)
        yy = np.sort(y)
        sxx = np.sort(event.sx[0])
        syy = np.sort(event.sy[0])
        rr = np.sort(rad)
        xaplev = aplev[np.argsort(x)]
        yaplev = aplev[np.argsort(y)]
        raplev = aplev[np.argsort(rad)]

        # BIN RESULTS FOR PLOTTING POSITION SENSITIVITY EFFECT
        nobj = aplev.size
        nbins = int(120 / event.npos)
        binxx = np.zeros(nbins)
        binyy = np.zeros(nbins)
        binsxx = np.zeros(nbins)
        binsyy = np.zeros(nbins)
        binrr = np.zeros(nbins)
        binxaplev = np.zeros(nbins)
        binyaplev = np.zeros(nbins)
        binraplev = np.zeros(nbins)
        binxapstd = np.zeros(nbins)
        binyapstd = np.zeros(nbins)
        binrapstd = np.zeros(nbins)
        binphase = np.zeros(nbins)
        binaplev = np.zeros(nbins)
        binapstd = np.zeros(nbins)
        for i in range(nbins):
            start = int(1. * i * nobj / nbins)
            end = int(1. * (i + 1) * nobj / nbins)
            binxx[i] = np.mean(xx[start:end])
            binyy[i] = np.mean(yy[start:end])
            binsxx[i] = np.mean(sxx[start:end])
            binsyy[i] = np.mean(syy[start:end])
            binrr[i] = np.mean(rr[start:end])
            binxaplev[i] = np.median(xaplev[start:end])
            binyaplev[i] = np.median(yaplev[start:end])
            binraplev[i] = np.median(raplev[start:end])
            binxapstd[i] = np.std(xaplev[start:end]) / np.sqrt(end - start)
            binyapstd[i] = np.std(yaplev[start:end]) / np.sqrt(end - start)
            binrapstd[i] = np.std(raplev[start:end]) / np.sqrt(end - start)
            binphase[i] = np.mean(phase[start:end])
            binaplev[i] = np.median(aplev[start:end])
            binapstd[i] = np.std(aplev[start:end]) / np.sqrt(end - start)

        # PLOT 1: flux
        plt.figure(501)
        plt.errorbar(binphase,
                     binaplev,
                     binapstd,
                     fmt=fmt1[pos],
                     linewidth=1,
                     label=('pos %i' % (pos)))
        plt.title(event.planetname + ' Phase vs. Binned Flux')
        plt.xlabel('Orbital Phase')
        plt.ylabel('Flux')
        plt.legend(loc='best')

        # PLOT 2: position-flux
        plt.figure(502)
        plt.subplot(2, 1, 1)
        plt.title(event.planetname + ' Position vs. Binned Flux')
        plt.errorbar(binyy,
                     binyaplev,
                     binyapstd,
                     fmt=fmt1[pos],
                     label=('pos %i y' % (pos)))
        plt.ylabel('Flux')
        plt.legend(loc='best')
        plt.subplot(2, 1, 2)
        plt.errorbar(binxx,
                     binxaplev,
                     binxapstd,
                     fmt=fmt1[pos],
                     label=('pos %i x' % (pos)))
        plt.xlabel('Pixel Postion')
        plt.ylabel('Flux')
        plt.legend(loc='best')

        #PLOT 3: position-phase
        plt.figure(503)

        plt.plot(phase, x, 'b,')
        plt.plot(phase, y, 'r,')
        plt.title(event.planetname + ' Phase vs. Position')
        plt.xlabel('Orbital Phase')
        plt.ylabel('Pixel Position')
        plt.legend('xy')

        #PLOT 4: flux-radial distance
        plt.figure(504)
        plt.errorbar(binrr,
                     binraplev,
                     binrapstd,
                     fmt=fmt1[pos],
                     label=('pos %i' % (pos)))
        plt.title(event.planetname + ' Radial Distance vs. Flux')
        plt.xlabel('Distance From Center of Pixel')
        plt.ylabel('Flux')
        plt.legend(loc='best')

        # ::::::::::: Background setting :::::::::::::::::
        if np.size(background) != 0:
            # number of points per bin:
            npoints = 42
            nbins = int(np.size(background) / npoints)
            medianbg = np.zeros(nbins)
            bphase = np.zeros(nbins)  # background bin phase
            bintime = np.zeros(nbins)  # background bin JD time
            for i in range(nbins):
                start = int(1.0 * i * npoints)
                end = int(1.0 * (i + 1) * npoints)
                medianbg[i] = np.median(background[start:end])
                bphase[i] = np.mean(phase[start:end])
                bintime[i] = np.mean(jdtime[start:end])

            # PLOT 5: background-phase
            day = int(np.floor(np.amin(jdtime)))
            timeunits1 = jdtime - day
            timeunits2 = bintime - day
            xlabel = 'JD - ' + str(day)
            if event.ecltype == 's':
                timeunits1 = phase
                timeunits2 = bphase
                xlabel = 'Phase'

            plt.figure(505)
            plt.plot(timeunits1,
                     background,
                     color='0.45',
                     linestyle='None',
                     marker=',')
            if np.size(background) > 10000:
                plt.plot(timeunits2, medianbg, fmt2[pos], label='median bins')
            plt.title(event.planetname + ' Background level')
            plt.xlabel(xlabel)
            plt.ylabel('Flux')

        # PLOT 6: width-flux
        plt.figure(506)
        plt.subplot(2, 1, 1)
        plt.title(event.planetname + ' Gaussian Width vs. Binned Flux')
        plt.errorbar(binsyy,
                     binyaplev,
                     binyapstd,
                     fmt=fmt1[pos],
                     label=('width %i y' % (pos)))
        plt.ylabel('Flux')
        plt.legend(loc='best')
        plt.subplot(2, 1, 2)
        plt.errorbar(binsxx,
                     binxaplev,
                     binxapstd,
                     fmt=fmt1[pos],
                     label=('width %i x' % (pos)))
        plt.xlabel('Gaussian Width')
        plt.ylabel('Flux')
        plt.legend(loc='best')

    figname1 = str(event.eventname) + "-fig501.png"
    figname2 = str(event.eventname) + "-fig502.png"
    figname3 = str(event.eventname) + "-fig503.png"
    figname4 = str(event.eventname) + "-fig504.png"
    figname5 = str(event.eventname) + "-fig505.png"
    figname6 = str(event.eventname) + "-fig506.png"

    plt.figure(501)
    plt.savefig(figname1)
    plt.figure(502)
    plt.savefig(figname2)
    plt.figure(503)
    plt.savefig(figname3)
    plt.figure(504)
    plt.savefig(figname4)
    plt.figure(505)
    plt.plot(timeunits1[0],
             background[0],
             color='0.45',
             linestyle='None',
             marker=',',
             label='all points')
    plt.legend(loc='best')
    plt.savefig(figname5)
    plt.figure(506)
    plt.savefig(figname6)

    # Saving
    me.saveevent(event, event.eventname + "_p5c")

    cwd = os.getcwd() + "/"
    # Print outputs, end-time, and close log.
    log.writelog("Output files:")
    log.writelog("Data:")
    log.writelog(" " + cwd + event.eventname + "_p5c.dat")
    log.writelog("Log:")
    log.writelog(" " + cwd + logname)
    log.writelog("Figures:")
    log.writelog(" " + cwd + figname1)
    log.writelog(" " + cwd + figname2)
    log.writelog(" " + cwd + figname3)
    log.writelog(" " + cwd + figname4)
    log.writelog(" " + cwd + figname5)
    log.writelog(" " + cwd + figname6)
    log.writeclose('\nEnd Checks: ' + time.ctime())

    return event
예제 #11
0
파일: w3lc.py 프로젝트: kevin218/WFC3
def lcWFC3(eventname,
           eventdir,
           nchan,
           wmin=1.125,
           wmax=1.65,
           expand=1,
           isplots=True):
    '''
    Compute photometric flux over specified range of wavelengths
    
    Parameters
    ----------
    eventname   : Unique identifier for these data
    eventdir    : Location of save file
    nchan       : Number of spectrophotometric channels
    wmin        : minimum wavelength
    wmax        : maximum wavelength
    expand      : expansion factor
    isplots     : Set True to produce plots
    
    Returns
    -------
    None
    
    History
    -------
    Written by Kevin Stevenson      June 2012
    
    '''

    # Load saved data
    # An event is an instance of an object
    #   loadevent: load the saved files from storage
    #       container for all of the data
    #           event: small data structures (i.e. light curves)
    #           aux:   large data structures (i.e. image cubes, etc)
    #
    #       i.e. event.BJDTDB : returns the time array
    #            aux.spectra  : 1D spectra per NDR
    #            aux.specerr  : 1D spectra error per NDR
    #            aux.data_mhdr: master header per frame
    #
    print("Loading saved data...")
    ev = me.loadevent(eventdir + '/d-' + eventname + '-w1')
    aux = me.loadevent(eventdir + '/d-' + eventname + '-data')
    ev.spectra = aux.spectra
    specerr = aux.specerr
    data_mhdr = aux.data_mhdr

    # Determine wavelength bins
    binsize = (wmax - wmin) / nchan  # width in bins
    wave_low = np.round([i for i in np.linspace(wmin, wmax - binsize, nchan)],
                        3)  # Left  edge of the wavelength bins
    wave_hi = np.round([i for i in np.linspace(wmin + binsize, wmax, nchan)],
                       3)  # Right edge of the wavelength bins
    # binwave     = (wave_low + wave_hi)/2. # Middle of wavelength bin

    # Increase resolution of spectra: uses np.zoom to oversample the image in a flux conserving interpolation
    if expand > 1:
        # note: ev.n_spec : number of spectra per frame :: hopefully just one
        print("Increasing spectra resolution...")
        # ev.spectra.shape[3] : wavelength (dispersion) dimension
        hdspectra = np.zeros(
            (ev.n_files, ev.n_spec, ev.n_reads - 1, expand *
             ev.spectra.shape[3]))  # hdspectra : high definition spectra
        hdspecerr = np.zeros(
            (ev.n_files, ev.n_spec, ev.n_reads - 1, expand *
             ev.spectra.shape[3]))  # hdspecerr : high definition spectra error
        hdwave = np.zeros((ev.n_img, ev.n_spec, expand * ev.spectra.shape[3]
                           ))  # hdwave    : high definition wavelength array

        # This is the 'zoom' step
        for n in range(
                ev.n_spec):  # per spectrum on the image (n_spec == 1 for WFC3)
            # This operates over all 1D stellar spectrum (over time) at once
            hdspectra[:, n] = spni.zoom(ev.spectra[:, n], zoom=[1, 1, expand])
            hdspecerr[:, n] = spni.zoom(specerr[:, n],
                                        zoom=[1, 1, expand]) * np.sqrt(expand)
        for m in range(ev.n_img):  # n_img : number of direct images
            # Some visits have a new wavelength solution per orbit
            for n in range(ev.n_spec):
                hdwave[m, n] = spni.zoom(ev.wave[m][n], zoom=expand)

        # Store high defition spectra
        ev.spectra = hdspectra
        specerr = hdspecerr
        ev.wave = hdwave

    # Correct for drift, if calculated
    if hasattr(ev, 'drift_model') and ev.drift_model != None:
        # Correct for drift :: map the motion of the spectrum across the detector
        #   provides higher precision on the wavelength solution
        print(
            'Applying drift correction... (Old stare-mode version, may not work)'
        )
        # Staring Mode Operations
        # ev.drift_model is defined in `w1`
        nx = ev.spectra.shape[
            3]  # number of pixels in the wavelength direction # FINDME: CHANGED the SHAPE[2] to a SHAPE[3]
        for m in range(ev.n_files):  # iterate over time
            for n in range(ev.n_spec
                           ):  # iterate over number of spectra (ideally == 1)
                spline = spi.UnivariateSpline(
                    np.arange(nx), ev.spectra[m, n], k=3,
                    s=0)  # Compute the spline for the shift
                ev.spectra[m, n] = spline(
                    np.arange(nx) +
                    ev.drift_model[n, m])  # Shifts the spectrum
        # finished Stare-mode operations
    elif ev.detector == 'IR':
        # This is for Scanning Mode
        #Calculate drift over all frames and non-destructive reads
        print('Applying drift correction...')
        # hst.drift_fit calculates the drift in the 1D spectra :: Does a cross correlation
        #   ev      : class with the data
        #   preclip : left edge of spectrum
        #   preclip : right  edge of spectrum
        ev.drift, ev.drift_model, ev.goodmask = hst.drift_fit(ev,
                                                              preclip=0,
                                                              postclip=None,
                                                              width=5 * expand,
                                                              deg=2,
                                                              validRange=11 *
                                                              expand)

        # Correct for drift
        if ev.n_reads > 2:
            # Throw away the first NDR -- it's bad
            print('WARNING: Marking all first reads as bad.')
            istart = 1
        else:
            print('Using first reads.')
            istart = 0

        # Apply the Drift correction (fancy word for spline)
        nx = ev.spectra.shape[
            3]  # number of pixels in the wavelength direction
        for m in range(ev.n_files):  # iterate over time
            for n in range(ev.n_spec
                           ):  # iterate over number of spectra (ideally == 1)
                for p in range(istart, ev.n_reads - 1):
                    # Compute the spline for the shift
                    spline = spi.UnivariateSpline(np.arange(nx),
                                                  ev.spectra[m, n, p],
                                                  k=3,
                                                  s=0)

                    # Using measured drift, not model fit
                    # `model fit` comes from the ev.drift_model
                    # `measured drift` comes from spline of order 3
                    ev.spectra[m, n, p] = spline(
                        np.arange(nx) +
                        ev.drift[n, m, p])  # Shifts the spectrum

        #Apply scan height correction
        #print('Applying scan height correction...')
        #ev.spectra  += ev.spectra[0,0]*(1-ev.scanHeight[:,:,np.newaxis,np.newaxis]/ev.scanHeight[0,0])
        #ev.spectra /= ev.scanHeight[:,:,np.newaxis,np.newaxis]
        #specerr    /= ev.scanHeight[:,:,np.newaxis,np.newaxis]
    else:
        # UVIS Stuff
        istart = 0

    # Assign scan direction: 0:forward vs 1:reverse
    ev.scandir = np.zeros(
        ev.n_files)  # Sets up all images as forward scan: modify later
    ev.n_scan0 = 0  # Number of forward scans
    ev.n_scan1 = 0  # Number of reverse scans

    try:
        scan0 = data_mhdr[0][
            'POSTARG2']  # POSTARG2 changes for forward and reverse scan 0: first file
        scan1 = data_mhdr[1][
            'POSTARG2']  # POSTARG2 changes for forward and reverse scan 1: first file
        for m in range(ev.n_files):
            # for every file file, check header if POSTARG2 == scan0 or scan1
            if data_mhdr[m]['POSTARG2'] == scan0:
                # Sum up number of forward scan
                ev.n_scan0 += 1
            elif data_mhdr[m]['POSTARG2'] == scan1:
                # Store scandir == 1 for reverse scanning
                ev.scandir[m] = 1
                # Sum up number of reverse scan
                ev.n_scan1 += 1
            else:
                # Something happened
                print('WARNING: Unknown scan direction for file ' + str(m) +
                      '.')

        print("# of files in scan direction 0: " + str(ev.n_scan0))
        print("# of files in scan direction 1: " + str(ev.n_scan1))
    except:
        ev.n_scan0 = ev.n_files
        print("Unable to determine scan direction, assuming unidirectional.")

    print("Generating light curves...")
    ev.eventname2 = ev.eventname  # Store old event name
    for i in range(nchan):
        ev.wave_low = wave_low[i]
        ev.wave_hi = wave_hi[i]
        print("Bandpass = %.3f - %.3f" % (ev.wave_low, ev.wave_hi))

        # Calculate photometric flux for each spectrum
        ev.photflux = np.zeros(
            (ev.n_spec, ev.n_files, np.max(
                (1, ev.n_reads -
                 2))))  # This become the light curve (to be populated)
        ev.photfluxerr = np.zeros(
            (ev.n_spec, ev.n_files, np.max((1, ev.n_reads - 2))
             ))  # This become the light curve errorbars (to be populated)
        # ev.wave         = []
        for n in range(ev.n_spec):  # hopefully == 1
            if ev.detector == 'IR':
                # Compute common wavelength and indices to apply over all observations
                wave = np.zeros(len(
                    ev.wave[0][n]))  # To be the globale wavelength array
                for j in range(ev.n_img):  # iterate over each image
                    wave += ev.wave[j][n]

                wave /= ev.n_img

                # wave  = np.mean(ev.wave, axis=0) # FINDME: TEST LATER

                # index == where wave meets BOTH requirement
                # Which indices in the mean wavelength (`wave`) are between the individal channel boundaries
                index = np.where(
                    np.bitwise_and(wave >= wave_low[i], wave <= wave_hi[i]))[0]
                # index = np.where((wave >= wave_low[i])*(wave <= wave_hi[i]))[0] # FINDME: TEST LATER
            else:
                # UVIS: Use all pixels for aperture photometry
                index = range(len(ev.spectra[0, 0, 0]))

            for m in range(ev.n_files):
                '''
                # This is a different way to compute the indices to associate with columns to be summed into 1D spectra
                # Select appropriate orbit-dependent wavelength
                if ev.n_img == (np.max(ev.orbitnum)+1):
                    j = int(ev.orbitnum[m])
                else:
                    j = 0
                #Method 1
                ev.wave.append(np.mean(ev.wavegrid[j][n],axis=0))
                index = np.where(np.bitwise_and(ev.wave[n] >= wave_low, ev.wave[n] <= wave_hi))[0]
                #Method 2
                index = np.where(np.bitwise_and(ev.wave[j][n] >= wave_low, ev.wave[j][n] <= wave_hi))[0]
                '''

                # This creates a light curve per NDR
                ev.photflux[n, m] = np.sum(
                    ev.spectra[m, n, istart:, index], axis=0
                )  # Summing in the 1D spectral plane (m == 1 for WFC3)
                ev.photfluxerr[n, m] = np.sqrt(
                    np.sum(specerr[m, n, istart:, index]**2, axis=0)
                )  # Summing in quadrature the 1D spectral plane (m == 1 for WFC3)

        # Save results for individual channels into individual files
        ev.eventname = ev.eventname2 + '_' + str(int(
            ev.wave_low * 1e3)) + '_' + str(int(ev.wave_hi * 1e3))
        # me.saveevent(ev, eventdir + '/d-' + ev.eventname + '-w3', delete=['data_mhdr', 'spectra', 'specerr'])

        # saveevent stores everything (that we want) into a pickle
        me.saveevent(ev, eventdir + '/d-' + ev.eventname + '-w3')

        # Produce plot
        if isplots == True:
            # 3XYZ: 3: w3 function
            #       X: Plot Number
            #       Y & Z: Spectral Channel Number
            #
            # Normalized Flux vs Time
            plt.figure(3000 + i, figsize=(10, 8))
            plt.clf()  # this clears the frame
            plt.suptitle('Wavelength range: ' + str(wave_low[i]) + '-' +
                         str(wave_hi[i]))
            ax = plt.subplot(111)
            #for n in range(ev.n_spec):
            #plt.subplot(ev.n_spec,1,1)
            #plt.title('Star ' + str(n))
            #igood   = np.where(ev.goodmask[0])[0]
            iscan0 = np.where(ev.scandir == 0)[0]
            iscan1 = np.where(ev.scandir == 1)[0]
            mjd = np.floor(ev.bjdtdb[0])
            flux0 = np.sum(ev.photflux[0][iscan0], axis=1) / np.sum(
                ev.photflux[0, [iscan0[-1]]])
            #err  = np.sqrt(1 / np.sum(1/ev.photfluxerr[0]**2,axis=1))/np.sum(ev.photflux[0,-1])
            try:
                err0 = np.sqrt(np.sum(ev.photfluxerr[0][iscan0]**2,
                                      axis=1)) / np.sum(
                                          ev.photflux[0, [iscan0[-1]]])
            except:
                err0 = 0
                # err1    = 0
            plt.errorbar(ev.bjdtdb[iscan0] - mjd, flux0, err0, fmt='bo')
            plt.text(
                0.05,
                0.1,
                "MAD = " +
                str(np.round(1e6 * np.median(np.abs(np.ediff1d(flux0))))) +
                " ppm",
                transform=ax.transAxes,
                color='b')
            if len(iscan1) > 0:
                flux1 = np.sum(ev.photflux[0][iscan1], axis=1) / np.sum(
                    ev.photflux[0, [iscan0[-1]]])
                err1 = np.sqrt(np.sum(ev.photfluxerr[0][iscan1]**2,
                                      axis=1)) / np.sum(
                                          ev.photflux[0, [iscan1[-1]]])
                plt.errorbar(ev.bjdtdb[iscan1] - mjd, flux1, err1, fmt='ro')
                plt.text(
                    0.05,
                    0.05,
                    "MAD = " +
                    str(np.round(1e6 * np.median(np.abs(np.ediff1d(flux1))))) +
                    " ppm",
                    transform=ax.transAxes,
                    color='r')
            plt.ylabel('Normalized Flux')
            plt.xlabel('Time [MJD + ' + str(mjd) + ']')

            plt.subplots_adjust(left=0.10,
                                right=0.95,
                                bottom=0.10,
                                top=0.90,
                                hspace=0.20,
                                wspace=0.3)
            plt.savefig(eventdir + '/figs/' + ev.eventname + '-Fig' +
                        str(3000 + i) + '.png')
            #plt.pause(0.1)

            if ev.detector == 'IR':
                # Drift: frame number vs drift in the wavelength direction
                plt.figure(3100 + i, figsize=(10, 8))
                plt.clf()
                for i in range(istart, ev.n_reads - 1):
                    plt.subplot(1, np.max((1, ev.n_reads - 2)), np.max((1, i)))
                    plt.plot(ev.drift[0, :, i], '.')
                    if i == istart:
                        plt.ylabel('Spectrum Drift')
                    if i == (ev.n_reads - 1) / 2:
                        plt.xlabel('Frame Number')
                plt.savefig(eventdir + '/figs/' + ev.eventname + '-Fig' +
                            str(3100) + '.png')

    if (isplots == True) and (ev.detector == 'IR'):
        # 2D light curve with drift correction
        # Plot frame number vs wavelength with color associated with value
        #   Very cool plot that produces "image" of the entire time series (hopefully corrected)
        plt.figure(3200, figsize=(8, ev.n_files / 20. + 0.8))
        plt.clf()
        vmin = 0.97
        vmax = 1.03
        # istart      = 0
        normspec = np.mean(ev.spectra[:, 0, istart:], axis=1) / np.mean(
            ev.spectra[-6:, 0, istart:], axis=(0, 1))
        ediff = np.zeros(ev.n_files)
        iwmin = np.where(ev.wave[0][0] > wmin)[0][0]
        iwmax = np.where(ev.wave[0][0] > wmax)[0][0]
        for i in range(ev.n_files):
            ediff[i] = 1e6 * np.median(
                np.abs(np.ediff1d(normspec[i, iwmin:iwmax])))
            plt.scatter(ev.wave[0][0],
                        np.zeros(ev.specsize) + i,
                        c=normspec[i],
                        s=14,
                        linewidths=0,
                        vmin=vmin,
                        vmax=vmax,
                        marker='s',
                        cmap=plt.cm.RdYlBu_r)
        plt.title("MAD = " + str(np.round(np.mean(ediff), 0)) + " ppm")
        plt.xlim(wmin, wmax)
        if nchan > 1:
            xticks = np.round([i for i in np.linspace(wmin, wmax, nchan + 1)],
                              3)
            plt.xticks(xticks, xticks)
            plt.vlines(xticks, 0, ev.n_files, 'k', 'dashed')
        plt.ylim(0, ev.n_files)
        plt.ylabel('Frame Number')
        plt.xlabel('Wavelength ($\mu m$)')
        plt.colorbar()
        plt.tight_layout()
        plt.savefig(eventdir + '/figs/fig3200-2D_LC.png')
예제 #12
0
def interactive():
    #w1.reduceWFC3(eventname, isplots=1)   #Reduction + extraction
    ev = w2.reduceWFC3(eventname, eventdir, isplots=3)
    # G141
    #wave_low = [1.125]
    #wave_hi  = [1.65]
    #w3.lcWFC3   (eventname, eventdir, 1, isplots=1, expand=1)
    #ev=w5.lcWFC3   (eventname, eventdir, 1, isplots=1, expand=1)
    #ev=w5.lcWFC3   (eventname, eventdir, 15, isplots=1, expand=1)
    # G102
    # White
    ev = w5.lcWFC3(eventname,
                   eventdir,
                   1,
                   0,
                   0,
                   wmin=0.85,
                   wmax=1.18,
                   isplots=1,
                   expand=1)  #, offset=0)
    # Spec - 15 chan
    ev = w5.lcWFC3(eventname,
                   eventdir,
                   15,
                   0,
                   0,
                   wmin=0.85,
                   wmax=1.18,
                   isplots=1,
                   expand=1)  #, offset=0)
    # Spec - 1 chan
    wave_low = np.array([0.85])
    wave_hi = np.array([1.18])
    # Spec - 15 chan
    wmin = 0.85
    wmax = 1.18
    nchan = 15
    binsize = (wmax - wmin) / nchan
    wave_low = np.round([i for i in np.linspace(wmin, wmax - binsize, nchan)],
                        3)
    wave_hi = np.round([i for i in np.linspace(wmin + binsize, wmax, nchan)],
                       3)
    #Spec - 28 chan
    #wave_low = np.round([i for i in np.arange(1.10995, 1.62,0.0186)],3)
    #wave_hi  = np.round([i for i in np.arange(1.12855, 1.64,0.0186)],3)
    for i in range(len(wave_low)):
        print(i, len(wave_low))
        # Limb darkening
        evname = eventname + '_' + str(int(wave_low[i] * 1e3)) + '_' + str(
            int(wave_hi[i] * 1e3))
        w4.ld_driver(evname,
                     eventdir,
                     wave_low[i],
                     wave_hi[i],
                     n_param=2,
                     isplots=True,
                     stellarmodel='kurucz')

    # Load data for testing purposes
    ev = me.loadevent(eventdir + '/d-' + eventname + '-w1')
    #ev           = me.loadevent(eventdir + '/d-' + evname + '-w3')
    #ev           = me.loadevent(eventdir + '/d-wa043bhp1_1125_1650-w3-WHITE')
    aux = me.loadevent(eventdir + '/d-' + eventname + '-data')
    ev.spectra = aux.spectra
    ev.specerr = aux.specerr
    ev.specbg = aux.specbg
    #ev.data      = handle['data']
    ev.data_mhdr = aux.data_mhdr
    ev.data_hdr = aux.data_hdr
    ev.mask = aux.mask

    # Plot image
    plt.figure(2)
    plt.clf()
    plt.imshow(ev.mask[1][0][ev.ywindow[0][0]:ev.ywindow[0][1],
                             ev.xwindow[0][0]:ev.xwindow[0][1]],
               origin='lower')
    plt.imshow(ev.data[32][1][ev.window[0, 1]:ev.window[1, 1]], origin='lower')

    # Plot spectra
    plt.figure(1)
    plt.clf()
    plt.errorbar(ev.wave[0][0], ev.spectra[32, 0, 2], ev.specerr[32, 0, 2])
    plt.vlines(wave_low, 0, np.max(ev.spectra), 'r', 'dashed')
    plt.vlines(wave_hi, 0, np.max(ev.spectra), 'r', 'dashed')

    plt.figure(3)
    plt.clf()
    plt.plot(ev.wave[0], ev.specbg[32, 0])

    return
예제 #13
0
파일: w4ld.py 프로젝트: kevin218/WFC3
def ld_driver(eventname,
              eventdir,
              wave_low=None,
              wave_hi=None,
              n_param=4,
              isplots=False,
              stellarmodel='phoenix'):
    '''

    '''
    # Load saved data
    print("Loading saved data...")
    ev = me.loadevent(eventdir + '/d-' + eventname + '-w3')
    aux = me.loadevent(eventdir + '/d-' + ev.eventname2 + '-data')
    ev.spectra = aux.spectra
    '''
    #FINDME: HACK
    ev.file_med = ev.loc_ld + 'kelt11_med.txt'
    ev.file_med = ev.loc_ld + 'lte6250-4.38+0.2a+0.0CMg-0.1.BT-dusty-giant-2011.cifist.He.irf.fits'
    ev.file_low = ev.loc_ld + 'lte6100-4.38+0.2a+0.0CMg-0.1.BT-dusty-giant-2011.cifist.He.irf.fits'
    ev.file_hi  = ev.loc_ld + 'lte6400-4.38+0.2a+0.0CMg-0.1.BT-dusty-giant-2011.cifist.He.irf.fits'
    #print(ev.file_med)
    '''
    n = 0
    m = 0
    ilo = np.where(ev.wave[n][m] > wave_low)[0][0]
    ihi = np.where(ev.wave[n][m] < wave_hi)[0][-1] + 1

    #
    print("Computing limb-darkening coefficients...")
    specwave = ev.wave[n][m][ilo:ihi] * 1e4  #Angstroms
    #iwave    = np.argsort(specwave)
    #specwave = specwave[iwave]*1e4  #Angstroms
    wavelow = specwave[0]
    wavehi = specwave[-1]
    spectrum = np.sum(ev.spectra[n, :, ilo:ihi], axis=0)
    if isplots:
        # Optimal
        ev.ldcoeffs = limbDarkening(ev.file_med,
                                    wavelow,
                                    wavehi,
                                    specwave=specwave,
                                    spectrum=spectrum,
                                    n_param=n_param,
                                    n_plot=4000,
                                    stellarmodel=stellarmodel)
        plt.title(str(n_param) + ' parameter model, optimal fit')
        plt.savefig(eventdir + '/figs/' + ev.eventname + '-Fig' + str(4000) +
                    str(n_param) + '.png')
        try:
            # Low
            ev.ldcoeffs_low = limbDarkening(ev.file_low,
                                            wavelow,
                                            wavehi,
                                            specwave=specwave,
                                            spectrum=spectrum,
                                            n_param=n_param,
                                            n_plot=4001,
                                            stellarmodel=stellarmodel)
            plt.title(str(n_param) + ' parameter model, low fit')
            plt.savefig(eventdir + '/figs/' + ev.eventname + '-Fig' +
                        str(4001) + str(n_param) + '.png')
            # Hi
            ev.ldcoeffs_hi = limbDarkening(ev.file_hi,
                                           wavelow,
                                           wavehi,
                                           specwave=specwave,
                                           spectrum=spectrum,
                                           n_param=n_param,
                                           n_plot=4002,
                                           stellarmodel=stellarmodel)
            plt.title(str(n_param) + ' parameter model, hi fit')
            plt.savefig(eventdir + '/figs/' + ev.eventname + '-Fig' +
                        str(4002) + str(n_param) + '.png')
        except:
            pass
    else:
        ev.ldcoeffs = limbDarkening(ev.file_med,
                                    wavelow,
                                    wavehi,
                                    specwave=specwave,
                                    spectrum=spectrum,
                                    n_param=n_param,
                                    n_plot=False,
                                    stellarmodel=stellarmodel)
        try:
            ev.ldcoeffs_low = limbDarkening(ev.file_low,
                                            wavelow,
                                            wavehi,
                                            specwave=specwave,
                                            spectrum=spectrum,
                                            n_param=n_param,
                                            n_plot=False,
                                            stellarmodel=stellarmodel)
            ev.ldcoeffs_hi = limbDarkening(ev.file_hi,
                                           wavelow,
                                           wavehi,
                                           specwave=specwave,
                                           spectrum=spectrum,
                                           n_param=n_param,
                                           n_plot=False,
                                           stellarmodel=stellarmodel)
        except:
            pass
    print(eventname, ev.ldcoeffs)

    # Save results
    print('Saving results...')
    me.saveevent(ev,
                 eventdir + '/d-' + ev.eventname + '-w4',
                 delete=['spectra'])

    return
예제 #14
0
def poetRestore(filedir='..', topdir=None, clip=None):
    #global numevents
    #Append system path
    if topdir == None or topdir == 'None':
        r = os.getcwd().split("/")
        topdir = "/".join(r[:r.index("run")])
    sys.path.append(topdir + '/lib/')

    files = []
    event = []
    filename = ''
    for fname in os.listdir(filedir):
        if (fname.endswith("_p5c.dat")):
            files.append(fname[:-4])
    files.sort()
    numevents = len(files)
    if numevents == 0:
        print('Cannot find any files to restore.')
        return []
    for i in np.arange(numevents):
        #Load event
        event.append(me.loadevent(filedir + '/' + files[i]))
        print('Finished loading: ' + event[i].eventname)
        filename = ''.join((filename, event[i].eventname))
        event[i].ancildir = ancildir
        #Clip data set to model and plot only a portion of the entire light curve
        #Good for modeling a transit or eclipse in an around-the-orbit data set
        if clip != None and clip != 'None':
            if type(clip) == str:
                #Convert from string to 2 ints
                start, end = clip.split(':', 1)
                try:
                    start = int(start)
                except:
                    print("Error with format of optional variable clip.")
                    return []
                try:
                    end = int(end)
                except:
                    end = None
            else:
                if len(clip) == 2:
                    start, end = clip
                else:
                    start = clip[0]
                    end = None
            #Use only data points from 'start' to 'end'
            event[i].phase = event[i].phase[:, start:end]
            event[i].aplev = event[i].aplev[:, start:end]
            event[i].aperr = event[i].aperr[:, start:end]
            event[i].good = event[i].good[:, start:end]
            event[i].time = event[i].time[:, start:end]
            event[i].y = event[i].y[:, start:end]
            event[i].x = event[i].x[:, start:end]
            event[i].sy = event[i].sy[:, start:end]
            event[i].sx = event[i].sx[:, start:end]
            event[i].juldat = event[i].juldat[:, start:end]
            event[i].bjdutc = event[i].bjdutc[:, start:end]
            event[i].bjdtdb = event[i].bjdtdb[:, start:end]
    #Create and populate ancil directory, if it doesn't already exist
    if os.path.isdir(ancildir) == False:
        os.mkdir(ancildir, 775)
        sys.path.append(ancildir)
        for i in np.arange(numevents):
            #Copy params file into new ancil dir.
            paramsfile = event[i].topdir + modeldir + event[
                i].eventname + '_params.py'
            event[i].paramsfile = ancildir + event[i].eventname + '_params.py'
            if os.path.isfile(event[i].paramsfile) == False:
                if os.path.isfile(paramsfile):
                    shutil.copy(paramsfile, event[i].paramsfile)
                else:
                    shutil.copy(event[i].topdir + modeldir + 'eg00_params.py',
                                event[i].paramsfile)
            #Copy initial parameters file into new ancil dir
            initpfile = []
            for f in os.listdir(event[i].topdir + modeldir):
                if f.startswith(event[i].eventname) and f.endswith('.txt'):
                    initpfile.append(f)
            if len(initpfile) == 0:
                shutil.copy(event[i].topdir + modeldir + 'eg00-initvals.txt',
                            ancildir)
            for f in initpfile:
                if os.path.isfile(ancildir + f) == False:
                    shutil.copy(event[i].topdir + modeldir + f, ancildir)
    else:
        #On initial setup, rename eg00params and eg00-initvals
        for i in np.arange(numevents):
            event[i].paramsfile = ancildir + event[i].eventname + '_params.py'
            event[i].initvalsfile = ancildir + event[
                i].eventname + '-initvals.txt'
            # Copy eg00params
            if os.path.isfile(event[i].paramsfile) == False:
                print("Missing file: " + event[i].paramsfile)
                try:
                    shutil.copy(ancildir + 'eg00_params.py',
                                event[i].paramsfile)
                except:
                    print("Missing file: " + ancildir + 'eg00_params.py')
            # Copy eg00-initvals
            if os.path.isfile(event[i].initvalsfile) == False:
                print("Missing file: " + event[i].initvalsfile)
                try:
                    shutil.copy(ancildir + 'eg00-initvals.txt',
                                event[i].initvalsfile)
                except:
                    print("Missing file: " + ancildir + 'eg00-initvals.txt')
    return event
예제 #15
0
def main(rundir, cfile=None, cfilename=None):
    '''
    One function to rule them all.
    '''
    # Set up logging of all print statements in this main file
    logfile = 'zen.log'
    templogfile = rundir + '/' + logfile + '.tmp'
    log = logger.Logger(templogfile)
    print("Start: %s" % time.ctime(), file=log)
    
    configobjs = []

    # eventlist is a list of events for each model set
    # eventlistlist is a list of eventlists. For example,
    # if you have 3 model sets and 2 data sets, eventlist
    # will be the events with optimized photometry for
    # each data set and model (so length 3) and eventlistlist contains
    # all the necessary events for joint fitting this scenario
    # (currently unsupported, but future update may change that)
    eventlistlist = []
    # Read the config file into a dictionary
    print("Reading the config file(s).")

    # If no obj is given, read them all
    if type(cfile) == type(None):
        confignames = []
        for fname in os.listdir(rundir):
            if (fname.endswith("-zen.cfg")):
                confignames.append(fname)
        confignames.sort()
        for fname in confignames:
            config = configparser.ConfigParser()
            config.read(rundir + '/' + fname)
            configobjs.append(config)
    # If a filename is provided
    elif isinstance(cfile, str):
        confignames = [cfile]
        config = configparser.ConfigParser()
        config.read(rundir + '/' + cfile)
        configobjs.append(config)
    # Otherwise, use just the object received    
    else:
        configobjs.append(cfile)
        confignames = [cfilename]

    nevents = len(configobjs)

    for m in range(nevents):
        eventlist = []
        fit = []
        nmodelsets = len(configobjs[m]['EVENT']['models'].split('\n'))
        if nmodelsets > 1 and nevents > 1:
            print("WARNING: multiple model sets not supported with" +
                  "joint fits. Please choose a single model for each" +
                  "event.")
            sys.exit()
            
        for n in range(nmodelsets):            
            # Initialize fit object (we don't yet know which event object
            # to attach it to)
            fit.append(readeventhdf.fits())

            # Fill in fit options
            zf.fitopt(fit[n], configobjs[m], rundir, n)

        if nevents > 1 and len(fit[n].bintry) > 1:
            print("WARNING: bin size optimization not supported with" +
                  " joint fits due to issues with shared parameters between" +
                  " data sets. Please set bintry to a single value for" +
                  " each data set (can be different for each).")
            sys.exit()

        for n in range(nmodelsets):
            # Get initial parameters and stepsize arrays from the config
            fit[n].modelfile = rundir + '/' + fit[n].modelfile

            nmodels = len(fit[n].modelstrs)

            parlist = pe.read(fit[n].modelfile,
                              fit[n].modelstrs,
                              None,
                              npldpars=fit[n].npix)
            
            fit[n].params   = []
            fit[n].pmin     = []
            fit[n].pmax     = []
            fit[n].npars    = []
            fit[n].stepsize = []
            for i in np.arange(nmodels):
                pars = parlist[i][2]
                fit[n].params    = np.concatenate((fit[n].params,     pars[0]),  0)
                fit[n].pmin      = np.concatenate((fit[n].pmin,       pars[1]),  0)
                fit[n].pmax      = np.concatenate((fit[n].pmax,       pars[2]),  0)
                fit[n].stepsize  = np.concatenate((fit[n].stepsize,   pars[3]),  0)
                fit[n].npars     = np.concatenate((fit[n].npars, [len(pars[0])]),0)

            # Currently there's a bug in numpy that converts concatenated
            # lists of ints to floats if one list is empty. This is a
            # workaround
            fit[n].npars = [int(p) for p in fit[n].npars]

            fit[n].modelfuncs, fit[n].modeltypes, fit[n].parnames, fit[n].i, fit[n].saveext = \
                        mc.setupmodel(fit[n].modelstrs, fit[n].i, fit[n].npix)

            # Parse priors
            nump = 0
            fit[n].prior    = np.zeros(len(fit[n].parnames))
            fit[n].priorlow = np.zeros(len(fit[n].parnames))
            fit[n].priorup  = np.zeros(len(fit[n].parnames))
            if hasattr(fit[n], "priorvars"):
                if len(fit[n].priorvals) % 3 != 0:
                    print("WARNING: priorvals not specified correctly.")
                for pvar in fit[n].priorvars:
                    if hasattr(fit[n].i, pvar):
                        fit[n].prior   [getattr(fit[n].i, pvar)] = fit[n].priorvals[3*nump]
                        fit[n].priorlow[getattr(fit[n].i, pvar)] = fit[n].priorvals[3*nump+1]
                        fit[n].priorup [getattr(fit[n].i, pvar)] = fit[n].priorvals[3*nump+2]
                    else:
                        print("Prior variable " + pvar + " not recognized.")
                    nump += 1

            fit[n].numm = len(fit[n].modelfuncs)

            nbin  = len(fit[n].bintry)
            ncent = len(fit[n].centdir)
            nphot = len(fit[n].photdir)

            # Set up multiprocessing
            jobs = []
            # Multiprocessing requires 1D arrays (if we use shared memory)
            chisqarray = mp.Array('d', np.zeros(nbin  *
                                                nphot *
                                                ncent))
            chislope   = mp.Array('d', np.zeros(nbin  *
                                                nphot *
                                                ncent))

            # Load the data (images are the same regardless of cent and
            # phot, so we can load prior to the loop)
            event_data = me.loadevent(rundir + '/' + fit[n].eventname + "_ini",
                                      load=['data'])
            data = event_data.data

            # Giant loop over all specified apertures and centering methods
            for l in range(nphot):
                for k in range(ncent):            
                    # Load the POET event object (up through p5)
                    print("Loading the POET event object.", file=log)
                    print("Ap:   " + fit[n].photdir[l], file=log)
                    print("Cent: " + fit[n].centdir[k], file=log)
                    centloc = '/'.join([rundir, fit[n].centdir[k], '']) 
                    photloc = '/'.join([rundir, fit[n].centdir[k],
                                        fit[n].photdir[l], ''])
                    if os.path.isdir(photloc):
                        event = me.loadevent(photloc + fit[n].eventname + "_p5c")
                    else:
                        print("Unable to find "
                              + fit[n].centdir[k] + '/'
                              + fit[n].photdir[l] +
                              ". Skipping.", file=log)
                        fill = np.ones(nbin) * np.inf
                        chisqarray[     nbin*l+nbin*nphot*k:
                                   nbin+nbin*l+nbin*nphot*k] = fill
                        chislope  [     nbin*l+nbin*nphot*k:
                                   nbin+nbin*l+nbin*nphot*k] = fill
                        continue

                    phase = event.phase

                    # Create masks
                    preclipmask  = phase > fit[n].preclip
                    postclipmask = phase < fit[n].postclip
                    fit[n].clipmask = np.logical_and(preclipmask, postclipmask)

                    for i in range(fit[n].ninterclip):
                        interclipmask = np.logical_or(phase < fit[n].interclip[2*i  ],
                                                      phase > fit[n].interclip[2*i+1])
                        fit[n].clipmask = np.logical_and(fit[n].clipmask, interclipmask)

                    fit[n].mask = np.logical_and(   fit[n].clipmask, event.good)


                    npos = data.shape[0]
                    if npos > 1:
                        mflux = np.mean(event.fp.aplev[np.where(event.good)])
                        posmflux = np.zeros((event.good.shape[0],1))
                        for i in range(event.good.shape[0]):
                            posgood = np.where(event.good[i])
                            posmflux[i] = np.mean(event.fp.aplev[i, posgood])
                        event.fp.aplev = event.fp.aplev / posmflux * mflux
                    
                    phasegood = event.phase[fit[n].mask]

                    phot    = event.fp.aplev[fit[n].mask]
                    photerr = event.fp.aperr[fit[n].mask]

                    normfactor = np.average(phot)

                    phot    /= normfactor
                    photerr /= normfactor

                    # Make sure phase is ascending
                    ind = np.argsort(phasegood)
                    phasegood = phasegood[ind]
                    phot      = phot[ind]
                    photerr   = photerr[ind]

                    # Identify the bright pixels to use
                    print("Identifying brightest pixels.", file=log)
                    boxsize = 10

                    xavg, yavg, rows, cols, pixels = zf.pldpixcoords(event,
                                                                     data,
                                                                     fit[n].npix,
                                                                     boxsize,
                                                                     fit[n].mask)

                    print("Doing preparatory calculations.", file=log)
                    phat, dP = zf.zen_init(data, pixels)

                    phatgood = np.zeros(len(fit[n].mask))

                    # Mask out the bad images in phat
                    for i in range(fit[n].npix):
                        tempphat = phat[:,i].copy()
                        tempphatgood = tempphat[fit[n].mask[0]]
                        if i == 0:
                            phatgood = tempphatgood.copy()
                        else:
                            phatgood = np.vstack((phatgood, tempphatgood))

                    # Invert the new array because I lack foresight
                    phatgood  = phatgood.T

                    # Check if maximum binning will work
                    nfreep = np.sum(np.array(fit[n].stepsize) > 0)
                    if len(phot) // np.max(fit[n].bintry) <= nfreep:
                        warnstr = ("Warning! Maximum bin size too large! " +
                                   "Reduce below {} and rerun.")
                        print(warnstr.format(len(phot)//(nfreep+1)),
                              file=log)
                        return

                    # If doing a joint fit, we need to avoid bin size
                    # optimization, because shared parameters across
                    # data sets will cause unintended behavior
                    # For a joint fit to get to this point, it should have
                    # nbin=1, nphot=1, ncent=1. In which case, we can
                    # just set each 1-element array to a single
                    # value and the code will behave correctly
                    if nevents > 1:
                        chisqarray[     nbin*l+nbin*nphot*k:
                                   nbin+nbin*l+nbin*nphot*k] = np.ones(nbin)
                        chislope  [     nbin*l+nbin*nphot*k:
                                   nbin+nbin*l+nbin*nphot*k] = np.ones(nbin) * fit[n].slopethresh
                        continue

                    # Optimize bin size                
                    # Initialize processes
                    p = mp.Process(target=zf.do_bin,
                                   args=(fit[n].bintry, phasegood, phatgood,
                                         phot, photerr, fit[n].modelfuncs,
                                         fit[n].modeltypes, fit[n].params,
                                         fit[n].npars, fit[n].npix,
                                         fit[n].stepsize, fit[n].pmin,
                                         fit[n].pmax,
                                         fit[n].parnames,
                                         chisqarray, chislope, l, k, nphot))

                    # Start process
                    jobs.append(p)
                    p.start()

                    # This intentionally-infinite loop continuously
                    # calculates the number of running processes, then
                    # exits if the number of processes is less than
                    # the number requested. This allows additional
                    # processes to spawn as other finish, which is
                    # more efficient than waiting for them all to
                    # finish since some processes can take much longer
                    # than others
                    while True:
                        procs = 0
                        for proc in jobs:
                            if proc.is_alive():
                                procs += 1

                        if procs < fit[n].nprocbin:
                            break

                        # Save the CPU some work.
                        time.sleep(0.1)

                    # Reduce memory usage (otherwise, extra memory
                    # is used while the next object is being loaded)
                    del(phase)
                    del(event)
                    gc.collect()                

            # Make sure all processes finish
            for proc in jobs:
                proc.join()

            fit[n].chisqarray = np.asarray(chisqarray).reshape((ncent,
                                                                nphot,
                                                                nbin))
            fit[n].chislope   = np.asarray(chislope  ).reshape((ncent,
                                                                nphot,
                                                                nbin))

            # Initialize bsig to something ridiculous
            fit[n].bsig = np.inf
            # Determine best binning
            # We also demand that the slope be less than a
            # value, because Deming does and if the slope is
            # too far off from -1/2, binning is not improving the
            # fit in a sensible way
            if all(i >= fit[n].slopethresh for i in fit[n].chislope.flatten()):
                print("Slope threshold too low. Increase and rerun.", file=log)
                print("Setting threshold to 0 so run can complete.", file=log)
                fit[n].slopethresh = 0

            for i in range(ncent):
                for j in range(nphot):
                    for k in range(nbin):
                        if (fit[n].chisqarray[i,j,k] <  fit[n].bsig and
                            fit[n].chislope[i,j,k]   <= fit[n].slopethresh):
                            fit[n].bsig   = fit[n].chisqarray[i,j,k]
                            fit[n].bsigsl = fit[n].chislope  [i,j,k]
                            fit[n].icent = i
                            fit[n].iphot = j
                            fit[n].ibin  = k

            if nevents == 1: # Output is nonsense for joint fits
                print("Best aperture:  " +     fit[n].photdir[fit[n].iphot],
                      file=log)
                print("Best centering: " +     fit[n].centdir[fit[n].icent],
                      file=log)
                print("Best binning:   " + str(fit[n].bintry[ fit[n].ibin]),
                      file=log)
                print("Slope of SDNR vs Bin Size: " + str(fit[n].bsigsl),
                      file=log)

            # Create an output directory if not done yet
            fit[n].outdir = '/'.join([rundir,
                                      fit[n].centdir[fit[n].icent],
                                      fit[n].photdir[fit[n].iphot],
                                      fit[n].outdir, ''])
            if not os.path.isdir(fit[n].outdir):           
                os.makedirs(fit[n].outdir)

            # Write configs to output
            for fname in confignames:
                with open(fit[n].outdir + fname, 'w') as newfile:
                    configobjs[0].write(newfile)


            # Make plot of log(bsig) and slope vs phot, cent, bin
            zp.bsigvis( fit[n], savedir=fit[n].outdir)
            zp.chislope(fit[n], savedir=fit[n].outdir)

            # Reload the event object
            centloc = '/'.join([rundir, fit[n].centdir[fit[n].icent], ''])
            photloc = '/'.join([rundir, fit[n].centdir[fit[n].icent],
                                        fit[n].photdir[fit[n].iphot], ''])

            print("Reloading best POET object.", file=log)
            event = me.loadevent(photloc + fit[n].eventname + "_p5c")
            # Adding the fit object to its event
            event.fit = []
            event.fit.append(fit[n])

            phase = event.phase

            preclipmask  = phase > fit[n].preclip
            postclipmask = phase < fit[n].postclip
            fit[n].clipmask = np.logical_and(preclipmask, postclipmask)
            for i in range(fit[n].ninterclip):
                interclipmask = np.logical_or(phase < fit[n].interclip[2*i  ],
                                              phase > fit[n].interclip[2*i+1])
                fit[n].clipmask = np.logical_and(fit[n].clipmask, interclipmask)        
            fit[n].mask = np.logical_and(   fit[n].clipmask, event.good)

            npos = data.shape[0]

            if npos > 1:
                mflux = np.mean(event.fp.aplev[np.where(event.good)])
                posmflux = np.zeros((event.good.shape[0],1))
                for i in range(event.good.shape[0]):
                    posgood = np.where(event.good[i])
                    posmflux[i] = np.mean(event.fp.aplev[i, posgood])
                event.fp.aplev = event.fp.aplev / posmflux * mflux

            phot    = event.fp.aplev[fit[n].mask]
            photerr = event.fp.aperr[fit[n].mask]

            # Make sure phase is ascending
            ind = np.argsort(phasegood)
            phasegood = phasegood[ind]
            phot      = phot[ind]
            photerr   = photerr[ind]

            # Identify the bright pixels to use
            print("Identifying brightest pixels.", file=log)
            xavg, yavg, rows, cols, pixels = zf.pldpixcoords(event, data,
                                                             fit[n].npix,
                                                             boxsize,
                                                             fit[n].mask)

            zp.pixels(event.meanim[:,:,0], pixels,
                      np.ceil(np.sqrt(fit[n].npix)),
                      xavg, yavg, fit[n].eventname, savedir=fit[n].outdir)

            print("Redoing preparatory calculations.", file=log)
            phat, dP = zf.zen_init(data, pixels)

            phatgood = np.zeros(len(fit[n].mask))

            # Mask out the bad images in phat
            for i in range(fit[n].npix):
                tempphat = phat[:,i].copy()
                tempphatgood = tempphat[fit[n].mask[0]]
                if i == 0:
                    phatgood = tempphatgood.copy()
                else:
                    phatgood = np.vstack((phatgood, tempphatgood))

            # Invert the new array because I lack foresight
            phatgood  = phatgood.T
            phasegood = event.phase[fit[n].mask]

            print("Rebinning to the best binning.", file=log)
            fit[n].binbest = fit[n].bintry[fit[n].ibin]

            binphase, binphot, binphoterr = zf.bindata(phasegood, phot,
                                                       fit[n].binbest,
                                                       yerr=photerr)

            binphotnorm    = binphot    / phot.mean()
            binphoterrnorm = binphoterr / phot.mean()

            for j in range(fit[n].npix):
                if j == 0:
                    _,     binphat = zf.bindata(phasegood,
                                                phatgood[:,j],
                                                fit[n].binbest)
                else:
                    _, tempbinphat = zf.bindata(phasegood,
                                                phatgood[:,j],
                                                fit[n].binbest)
                    binphat = np.column_stack((binphat, tempbinphat))

            fit[n].binphase   = binphase
            fit[n].binphot    = binphot
            fit[n].binphoterr = binphoterr
            fit[n].binphat    = binphat

            fit[n].binphoterrnorm = binphoterrnorm
            fit[n].binphotnorm    = binphotnorm

            fit[n].phase   = phasegood
            fit[n].phot    = phot
            fit[n].photerr = photerr
            fit[n].phat    = phatgood

            eventlist.append(event)
        eventlistlist.append(eventlist)

    # Set up for joint fits and run MCMC
    for n in range(nmodelsets):
        fits = [eventlistlist[i][n].fit[0] for i in range(nevents)]
        mc3y, mc3yerr = [], []
        params, pmin, pmax, stepsize, parnames = [], [], [], [], []
        prior, priorlow, priorup = [], [], []
        for i in range(nevents):
            escale = 1.
            if fits[0].chisqscale:
                print("Rescaling uncertainties for " + fits[i].eventname,
                      file=log)
                ss = fits[i].stepsize.copy()
                # Hacky fix for joint fit issues
                # Ideally we would interpret negative step sizes as
                # whether they set params equal within an event (and
                # use those setting) or whether they set params equal
                # between events and do the following. 
                ss[np.where(ss < 0)] = 1e-5
                indparams = [fits[i].binphase, fits[i].binphat,
                             fits[i].modelfuncs, fits[i].modeltypes,
                             fits[i].npars]
                chisq, _, _, _ = mc3.fit.modelfit(fits[i].params,
                                                  zf.zen, fits[i].binphotnorm,
                                                  fits[i].binphoterrnorm,
                                                  indparams=indparams,
                                                  stepsize=ss,
                                                  pmin=fits[i].pmin,
                                                  pmax=fits[i].pmax,
                                                  prior=fits[i].prior,
                                                  priorlow=fits[i].priorlow,
                                                  priorup=fits[i].priorup)
                nfreep = np.sum(fits[i].stepsize > 0)
                escale = np.sqrt(chisq / (fits[i].binphotnorm.size - nfreep))
                fits[i].binphoterrnorm *= escale
                fits[i].binphoterr     *= escale
            mc3y     = np.concatenate((mc3y,     fits[i].binphotnorm))
            mc3yerr  = np.concatenate((mc3yerr,  fits[i].binphoterrnorm))
            params   = np.concatenate((params,   fits[i].params))
            pmin     = np.concatenate((pmin,     fits[i].pmin))
            pmax     = np.concatenate((pmax,     fits[i].pmax))
            stepsize = np.concatenate((stepsize, fits[i].stepsize))
            parnames = np.concatenate((parnames, fits[i].parnames))
            prior    = np.concatenate((prior,    fits[i].prior))
            priorlow = np.concatenate((priorlow, fits[i].priorlow))
            priorup  = np.concatenate((priorup,  fits[i].priorup))

        # And we're off!    
        print("Beginning MCMC.", file=log)


        mcout = mc3.mcmc(data=mc3y, uncert=mc3yerr, func=zf.mc3zen,
                         indparams=[fits], parname=parnames,
                         params=params, pmin=pmin, pmax=pmax,
                         stepsize=stepsize, prior=prior,
                         priorlow=priorlow, priorup=priorup,
                         walk=fits[0].walk, nsamples=fits[0].nsamples,
                         nchains=fits[0].nchains, nproc=fits[0].nchains,
                         burnin=fits[0].burnin, leastsq=fits[0].leastsq,
                         chisqscale=False,
                         grtest=fits[0].grtest, grbreak=fits[0].grbreak,
                         plots=fits[0].plots,
                         savefile=fits[0].outdir+fits[0].savefile,
                         log=fits[0].outdir+fits[0].mcmclog,
                         chireturn=True)

        bp, CRlo, CRhi, stdp, posterior, Zchain, chiout = mcout

        bpchisq, redchisq, chifactor, bic = chiout

        for fit in fits:
            fit.bic       = bic
            fit.chifactor = chifactor
            fit.bpchisq   = bpchisq
            fit.redchisq  = bpchisq
            fit.bp        = bp
            fit.crlo      = CRlo
            fit.crhi      = CRhi
            fit.stdp      = stdp

        # Parse results between fit objects
        counter = 0
        for m in range(nevents):
            event = eventlistlist[m][n]
            fit   = eventlistlist[m][n].fit[0]
            fit.bp   = bp  [counter:counter+np.sum(fit.npars)]
            fit.stdp = stdp[counter:counter+np.sum(fit.npars)]
            counter += np.sum(fit.npars)
        
        # Post-fit analysis
        for m in range(nevents):
            event = eventlistlist[m][n]
            fit   = eventlistlist[m][n].fit[0]

            fit.binbestfit = zf.zen(fit.bp, fit.binphase, fit.binphat,
                                    fit.modelfuncs, fit.modeltypes, fit.npars)

            # Update errors
            fit.binphoterr     *= chifactor
            fit.binphoterrnorm *= chifactor

            # Make a list of best parameters for each model
            bplist = []
            parind = 0

            for i in range(len(fit.modelstrs)):
                bplist.append(fit.bp[parind:parind+fit.npars[i]])
                parind += fit.npars[i]

            # Calculate model fit without the eclispe
            noeclfit = zf.noeclipse(fit.bp, fit.binphase, fit.binphat,
                                    fit.modelfuncs, fit.modeltypes, fit.npars,
                                    fit.parnames)

            # In case of multiple ecl/tr models, we subtract 1 from each
            # and then add it back in at the end
            fit.bestecl = np.zeros(len(fit.binphase))
            for i in range(len(fit.modelfuncs)):
                if fit.modeltypes[i] == 'ecl/tr':
                    fit.bestecl += (fit.modelfuncs[i](bplist[i], fit.binphase) - 1)

            fit.bestecl += 1

            # Make plots
            print("Making plots.", file=log)
            fit.binnumplot = int(len(fit.binphot)/fit.nbinplot)

            if fit.binnumplot == 0:
                fit.binnumplot = 1

            pbinphase, pbinphot, pbinphoterr = zf.bindata(fit.binphase,
                                                          fit.binphot,
                                                          fit.binnumplot,
                                                          yerr=fit.binphoterr)
            pbinphase, pbinnoeclfit          = zf.bindata(fit.binphase,
                                                          noeclfit,
                                                          fit.binnumplot)
            pbinphase, pbinbestecl           = zf.bindata(fit.binphase,
                                                          fit.bestecl,
                                                          fit.binnumplot)
            pbinphase, pbinbestfit           = zf.bindata(fit.binphase,
                                                          fit.binbestfit,
                                                          fit.binnumplot)


            pbinphotnorm    = pbinphot    / pbinphot.mean()
            pbinphoterrnorm = pbinphoterr / pbinphot.mean()

            zp.normlc(pbinphase, pbinphotnorm, pbinphoterrnorm,
                      pbinnoeclfit, pbinbestecl, fit.binphase,
                      fit.bestecl, 1, title=fit.titles,
                      eventname=fit.eventname, savedir=fit.outdir)

            zp.models(fit, savedir=fit.outdir)

        # Skip post-fit analysis if not desired (saves considerable time)
        if not fit.postanal:
            continue
        
        for m in range(nevents):
            event = eventlistlist[m][n]
            fit   = eventlistlist[m][n].fit[0]
            # Calculate eclipse times in BJD_UTC and BJD_TDB
            # Code adapted from POET p7
            print('Calculating eclipse times in Julian days', file=log)
            offset = event.bjdtdb.flat[0] - event.bjdutc.flat[0]
            if   event.timestd == 'utc':
                fit.ephtimeutc = event.ephtime
                fit.ephtimetdb = event.ephtime + offset
            elif event.timestd == 'tdb':
                fit.ephtimetdb = event.ephtime
                fit.ephtimeutc = event.ephtime - offset
            else:
                print('Assuming that ephemeris is reported in BJD_UTC. Verify!',
                      file=log)
                fit.ephtimeutc = event.ephtime
                fit.ephtimetdb = event.ephtime + offset

            print('BJD_TDB - BJD_UTC = ' + str(offset * 86400.) + ' seconds.',
                  file=log)

            fit.bestmidpt  = fit.bp[  fit.parnames.index('Eclipse Phase')]
            fit.ecltimeerr = fit.stdp[fit.parnames.index('Eclipse Phase')]*event.period

            startutc = event.bjdutc.flat[0]
            starttdb = event.bjdtdb.flat[0]

            fit.ecltimeutc = (np.floor((startutc-fit.ephtimeutc)/event.period) +
                              fit.bestmidpt) * event.period + fit.ephtimeutc
            fit.ecltimetdb = (np.floor((starttdb-fit.ephtimetdb)/event.period) +
                              fit.bestmidpt) * event.period + fit.ephtimetdb

            print('Eclipse time = ' + str(fit.ecltimeutc)
                  + '+/-' + str(fit.ecltimeerr) + ' BJD_UTC', file=log)
            print('Eclipse time = ' + str(fit.ecltimetdb)
                  + '+/-' + str(fit.ecltimeerr) + ' BJD_TDB', file=log)

            # Brightness temperature calculation
            print('Starting Monte-Carlo Temperature Calculation', file=log)    
            kout = kurucz_inten.read(event.kuruczfile, freq=True)

            filterf = np.loadtxt(event.filtfile, unpack=True)
            filterf = np.concatenate((filterf[0:2,::-1].T,[filterf[0:2,0]]))

            logg     = np.log10(event.tep.g.val*100.)
            loggerr  = np.log10(event.tep.g.uncert*100.)
            tstar    = event.tstar
            tstarerr = event.tstarerr

            # Find index of depth
            countfix = 0
            for i in range(len(fit.parnames)):
                if fit.parnames[i] in ['Depth', 'depth', 'Maximum Eclipse Depth', 'Eclipse Depth']:
                    idepth = i

            # Count number of fixed parameters prior to the depth
            # parameter, to adjust the idepth
            for i in range(idepth):
                if fit.stepsize[i] <= 0:
                    countfix += 1

            idepthpost = idepth - countfix

            depthpost = posterior[:,idepthpost]

            if posterior.shape[0] < fit.numcalc:
                print("WARNING: not enough samples for Temperature Monte-Carlo!",
                      file=log)
                print("Reducing numcalc to match size of MCMC posterior.",
                      file=log)
                fit.numcalc  = posterior.shape[0]
                slicenum = posterior.shape[0] // fit.numcalc # always 1, but for clarity
                slicelim = slicenum * fit.numcalc
            else:
                # Since slice step must be an integer, we need to calculate
                # the limit of the posterior to slice such that we get
                # an array of the correct length
                slicenum = posterior.shape[0] // fit.numcalc
                slicelim = slicenum * fit.numcalc

            bsdata    = np.zeros((3,fit.numcalc))

            # Use every nth eclipse depth except the 0th
            bsdata[0] = depthpost[:slicelim:slicenum]
            bsdata[1] = np.random.normal(logg,  loggerr,  fit.numcalc)
            bsdata[2] = np.random.normal(tstar, tstarerr, fit.numcalc)

            tb, tbg, numnegf, fmfreq = zf.calcTb(bsdata, kout, filterf, event)

            tbm   = np.median(tb [np.where(tb  > 0)])
            tbsd  = np.std(   tb [np.where(tb  > 0)])
            tbgm  = np.median(tbg[np.where(tbg > 0)])
            tbgsd = np.std(   tbg[np.where(tbg > 0)])

            print('Band-center brightness temp = '
                  + str(round(tbgm,  2)) + ' +/- '
                  + str(round(tbgsd, 2)) + ' K', file=log)
            print('Integral    brightness temp = '
                  + str(round(tbm,  2)) + ' +/- '
                  + str(round(tbsd, 2)) + ' K', file=log)

            event.fit[0].fluxuc   = event.fp.aplev[np.where(event.good)] 
            event.fit[0].clipmask = fit.clipmask[np.where(event.good)]
            event.fit[0].flux     = event.fp.aplev[fit.mask] # Clipped flux
            event.fit[0].bestfit  = zf.zen(bp, fit.phase, fit.phat, fit.modelfuncs,
                                        fit.modeltypes, fit.npars) # Best fit (norm)

            # Data from plot
            event.fit[0].pbinphase       = pbinphase
            event.fit[0].pbinphot        = pbinphot
            event.fit[0].pbinphoterr     = pbinphoterr
            event.fit[0].pbinnoeclfit    = pbinnoeclfit
            event.fit[0].pbinbestfit     = pbinbestfit
            event.fit[0].pbinphotnorm    = pbinphotnorm
            event.fit[0].pbinphoterrnorm = pbinphoterrnorm

            # Temperatures
            event.fit[0].tbm   = tbm
            event.fit[0].tbsd  = tbsd
            event.fit[0].tbgm  = tbgm
            event.fit[0].tbgsd = tbgsd

            # Optimal phot description
            event.fit[0].bestphotdir   = fit.photdir[fit.iphot]
            event.fit[0].bestcentdir   = fit.centdir[fit.icent]
            event.fit[0].bestbinsize   = fit.bintry [fit.ibin]



            # Write IRSA table and FITS file
            if not os.path.exists(fit.outdir + 'irsa'):
                os.mkdir(fit.outdir + 'irsa')

            # Set the topstring
            topstring = zf.topstring(fit.papername, fit.month, fit.year,
                                     fit.journal, fit.instruments,
                                     fit.programs, fit.authors)

            irsa.do_irsa(event, event.fit[0], directory=fit.outdir,
                         topstring=topstring)

            
    print("Saving.")
    for n in range(nmodelsets):
        for i in range(nevents):
            event = eventlistlist[i][n]
            fit   = eventlistlist[i][n].fit[0]    
            run.p6Save(event, fit.outdir)
            
    minbic = np.inf
    for n in range(nmodelsets):
        for i in range(nevents):
            event = eventlistlist[i][n]
            fit   = eventlistlist[i][n].fit[0]
            print("For models " + ' '.join(fit.modelstrs) + ":", file=log)
            print("Best aperture:  " +     fit.photdir[fit.iphot], file=log)
            print("Best centering: " +     fit.centdir[fit.icent], file=log)
            print("Best binning:   " + str(fit.bintry[ fit.ibin]), file=log)
            minbic = np.min((minbic, fit.bic))

    print("Models\tBIC\tdelBIC", file=log)            
    for n in range(nmodelsets):
        for i in range(nevents):
            event = eventlistlist[i][n]
            fit   = eventlistlist[i][n].fit[0]            
            print(' '.join(fit.modelstrs) + '\t' + str(fit.bic) +
                  '\t' + str(fit.bic - minbic), file=log)
    
    print("End:  %s" % time.ctime(), file=log)

    log.close()
    for n in range(nmodelsets):
        for i in range(nevents):
            fit   = eventlistlist[i][n].fit[0]
            shutil.copy(templogfile, fit.outdir + logfile)

    # Delete temporary log
    os.unlink(templogfile)

    # Return directory of output (not used for joint fits)
    return fit.outdir, fit.centdir[fit.icent], fit.photdir[fit.iphot], chiout
예제 #16
0
def checks1(eventname, cwd, period=None, ephtime=None):

    owd = os.getcwd()
    os.chdir(cwd)

    # Load the Event
    event = me.loadevent(eventname)

    # Create a log
    oldlogname = event.logname
    logname = event.eventname + "_p5.log"
    log = le.Logedit(logname, oldlogname)
    log.writelog('\nStart Checks: ' + time.ctime())

    # If p5 run after p3: we are using results from PSFfit:
    if not hasattr(event, "phottype"):
        event.phottype = "psffit"
        try:
            os.mkdir("psffit/")
        except:
            pass
        os.chdir("psffit/")

    # Move frame parameters to fit Kevin's syntax:
    # event.fp.param --> event.param
    event.filenames = event.fp.filename
    event.x = event.fp.x
    event.y = event.fp.y
    event.time = event.fp.time
    event.pos = event.fp.pos
    event.frmvis = event.fp.frmvis
    event.filename = event.eventname

    event.aplev = event.fp.aplev
    event.background = event.fp.skylev
    event.good = event.fp.good

    if event.phottype == "aper":
        event.aperr = event.fp.aperr
        log.writelog('Photometry method is APERTURE')
    elif event.phottype == "var":
        event.aperr = event.fp.aperr
        log.writelog('Photometry method is VARIABLE APERTURE')
    elif event.phottype == "ell":
        event.aperr = event.fp.aperr
        log.writelog('Photometry method is ELLIPTICAL APERTURE')
    elif event.phottype == "psffit":
        # FINDME: do something with aperr
        event.aperr = .0025 * np.mean(event.aplev) * np.ones(
            np.shape(event.aplev))
        log.writelog('Photometry method is PSF FITTING')
    elif event.phottype == "optimal":
        event.aperr = event.fp.aperr
        log.writelog('Photometry method is OPTIMAL')

    # UPDATE period AND ephtime
    if period is not None:
        event.period = period[0]
        event.perioderr = period[1]
    if ephtime is not None:
        event.ephtime = ephtime[0]
        event.ephtimeerr = ephtime[1]

    log.writelog("\nCurrent event = " + event.eventname)
    log.writelog("Kurucz file     = " + event.kuruczfile)
    log.writelog("Filter file     = " + event.filtfile)

    # Light-time correction to BJD:

    # Julian observation date
    #event.juldat = event.jdjf80 + event.fp.time / 86400.0
    event.juldat = event.fp.juldat = event.j2kjd + event.fp.time / 86400.0

    if not event.ishorvec:
        log.writeclose('\nHorizon file not found!')
        return
    print("Calculating BJD correction...")

    event.fp.bjdcor = np.zeros(event.fp.juldat.shape)
    # Sometimes bad files are just missing files, in which case they have
    # times of 0, which causes problem in the following interpolation. So
    # we must mask out these files. We don't use the event.fp.good mask
    # because we may want to know the bjd of bad images

    nonzero = np.where(event.fp.time != 0.0)
    event.fp.bjdcor[nonzero] = stc.suntimecorr(event.ra, event.dec,
                                               event.fp.juldat[nonzero],
                                               event.horvecfile)

    # Get bjd times:
    event.bjdcor = event.fp.bjdcor
    #event.bjddat = event.fp.juldat + event.fp.bjdcor / 86400.0
    event.bjdutc = event.fp.juldat + event.fp.bjdcor / 86400.0  # utc bjd date
    event.bjdtdb = np.empty(event.bjdutc.shape)
    for i in range(event.bjdtdb.shape[0]):
        event.bjdtdb[i] = utc_tt.utc_tdb(event.bjdutc[i], event.topdir + '/' +
                                         event.leapdir)  # terrestial bjd date

    # ccampo 3/18/2011: check which units phase should be in
    try:
        if event.tep.ttrans.unit == "BJDTDB":
            event.timestd = "tdb"
            event.fp.phase = tp.time2phase(event.bjdtdb, event.ephtime,
                                           event.period, event.ecltype)
        else:
            event.timestd = "utc"
            event.fp.phase = tp.time2phase(event.bjdutc, event.ephtime,
                                           event.period, event.ecltype)
    except:
        event.timestd = "utc"
        event.fp.phase = tp.time2phase(event.bjdutc, event.ephtime,
                                       event.period, event.ecltype)

    # assign phase variable
    event.phase = event.fp.phase

    # ccampo 3/18/2011: moved this above
    # Eclipse phase, BJD
    #event.fp.phase = tp.time2phase(event.fp.juldat + event.fp.bjdcor / 86400.0,
    #                               event.ephtime, event.period, event.ecltype)

    # verify leapsecond correction
    hfile = event.filenames[0, 0]
    try:
        image, event.header = fits.getdata(hfile, header=True)
        dt = ((event.bjdtdb - event.bjdutc) * 86400.0)[0, 0]
        dt2 = event.header['ET_OBS'] - event.header['UTCS_OBS']
        log.writelog('Leap second correction : ' + str(dt) + ' = ' + str(dt2))
    except:
        log.writelog('Could not verify leap-second correction.')

    log.writelog('Min and Max light-time correction: ' +
                 np.str(np.amin(event.fp.bjdcor)) + ', ' +
                 np.str(np.amax(event.fp.bjdcor)) + ' seconds')

    # Verify light-time correction
    try:
        image, event.header = fits.getdata(hfile, header=True)
        try:
            log.writelog('BJD Light-time correction: ' +
                         str(event.bjdcor[0, 0]) + ' = ' +
                         str((event.header['BMJD_OBS'] -
                              event.header['MJD_OBS']) * 86400))
        except:
            log.writelog('HJD Light-time correction: ' +
                         str(event.bjdcor[0, 0]) + ' = ' +
                         str((event.header['HMJD_OBS'] -
                              event.header['MJD_OBS']) * 86400))
    except:
        log.writelog('Could not verify light-time correction.')

    # Number of good frames should be > 95%
    log.writelog("Good Frames = %7.3f" % (np.mean(event.good) * 100) + " %")

    log.writelog('\nCentering:     X mean     X stddev  Y mean     Y stddev')
    for pos in range(event.npos):
        log.writelog(
            'position %2d:' % pos +
            ' %10.5f' % np.mean(event.x[pos, np.where(event.good[pos])]) +
            ' %9.5f' % np.std(event.x[pos, np.where(event.good[pos])]) +
            ' %10.5f' % np.mean(event.y[pos, np.where(event.good[pos])]) +
            ' %9.5f' % np.std(event.y[pos, np.where(event.good[pos])]))

    # COMPUTE RMS POSITION CONSISTENCY
    event.xprecision = np.sqrt(np.mean(np.ediff1d(event.x)**2))
    event.yprecision = np.sqrt(np.mean(np.ediff1d(event.y)**2))

    log.writelog('RMS of x precision = ' + str(np.round(event.xprecision, 4)) +
                 ' pixels.')
    log.writelog('RMS of y precision = ' + str(np.round(event.yprecision, 4)) +
                 ' pixels.')
    if event.phottype == "aper":
        log.writelog('\nCenter & photometry half-width/aperture sizes = ' +
                     str(event.ctrim) + ', ' + str(event.photap) + ' pixels.')
    log.writelog('Period = ' + str(event.period) + ' +/- ' +
                 str(event.perioderr) + ' days')
    log.writelog('Ephemeris = ' + str(event.ephtime) + ' +/- ' +
                 str(event.ephtimeerr) + ' JD')

    # Compute elliptical area if gaussian centering
    if event.method == 'fgc' or event.method == 'rfgc':
        event.fp.ellarea = np.pi * (3 * event.fp.xsig) * (3 * event.fp.ysig)

    fmt1 = [
        'bo', 'go', 'yo', 'ro', 'ko', 'co', 'mo', 'bs', 'gs', 'ys', 'rs', 'ks',
        'cs', 'ms'
    ]
    fmt2 = ['b,', 'g,', 'y,', 'r,']
    fmt3 = ['b.', 'g.', 'y.', 'r.']

    plt.figure(501)
    plt.clf()
    plt.figure(502, figsize=(8, 12))
    plt.clf()
    plt.figure(503)
    plt.clf()
    plt.figure(504)
    plt.clf()
    plt.figure(505)
    plt.clf()

    for pos in range(event.npos):
        wheregood = np.where(event.good[pos, :])
        # CHOOSE ONLY GOOD FRAMES FOR PLOTTING
        phase = event.phase[pos, :][wheregood]
        aplev = event.aplev[pos, :][wheregood]
        jdtime = event.bjdutc[pos, :][wheregood]
        background = event.background[pos, :][wheregood]
        noisepix = event.fp.noisepix[pos, :][wheregood]
        if event.method == "fgc" or event.method == "rfgc":
            ellarea = event.fp.ellarea[pos, :][wheregood]
            rot = event.fp.rot[pos, :][wheregood]
        # COMPUTE X AND Y PIXEL LOCATION RELATIVE TO ...
        if event.npos > 1:
            # CENTER OF EACH PIXEL
            y = (event.y[pos, :] - np.round(event.y[pos, :]))[wheregood]
            x = (event.x[pos, :] - np.round(event.x[pos, :]))[wheregood]
        else:
            # CENTER OF MEDIAN PIXEL
            y = (event.y[pos, :] - np.round(np.median(event.y)))[wheregood]
            x = (event.x[pos, :] - np.round(np.median(event.x)))[wheregood]

        # SORT aplev BY x, y AND radial POSITIONS
        rad = np.sqrt(x**2 + y**2)
        xx = np.sort(x)
        yy = np.sort(y)
        rr = np.sort(rad)
        xaplev = aplev[np.argsort(x)]
        yaplev = aplev[np.argsort(y)]
        raplev = aplev[np.argsort(rad)]

        # BIN RESULTS FOR PLOTTING POSITION SENSITIVITY EFFECT
        nobj = aplev.size
        nbins = 120 // event.npos
        binxx = np.zeros(nbins)
        binyy = np.zeros(nbins)
        binrr = np.zeros(nbins)
        binxaplev = np.zeros(nbins)
        binyaplev = np.zeros(nbins)
        binraplev = np.zeros(nbins)
        binxapstd = np.zeros(nbins)
        binyapstd = np.zeros(nbins)
        binrapstd = np.zeros(nbins)
        binphase = np.zeros(nbins)
        binaplev = np.zeros(nbins)
        binapstd = np.zeros(nbins)
        binnpix = np.zeros(nbins)
        for i in range(nbins):
            start = int(1. * i * nobj / nbins)
            end = int(1. * (i + 1) * nobj / nbins)
            binxx[i] = np.mean(xx[start:end])
            binyy[i] = np.mean(yy[start:end])
            binrr[i] = np.mean(rr[start:end])
            binxaplev[i] = np.median(xaplev[start:end])
            binyaplev[i] = np.median(yaplev[start:end])
            binraplev[i] = np.median(raplev[start:end])
            binxapstd[i] = np.std(xaplev[start:end]) / np.sqrt(end - start)
            binyapstd[i] = np.std(yaplev[start:end]) / np.sqrt(end - start)
            binrapstd[i] = np.std(raplev[start:end]) / np.sqrt(end - start)
            binphase[i] = np.mean(phase[start:end])
            binaplev[i] = np.median(aplev[start:end])
            binapstd[i] = np.std(aplev[start:end]) / np.sqrt(end - start)
            binnpix[i] = np.mean(noisepix[start:end])

        # PLOT 1: flux
        plt.figure(501)
        plt.errorbar(binphase,
                     binaplev,
                     binapstd,
                     fmt=fmt1[pos],
                     linewidth=1,
                     label=('pos %i' % (pos)))
        plt.title(event.planetname + ' Phase vs. Binned Flux')
        plt.xlabel('Orbital Phase')
        plt.ylabel('Flux')
        plt.legend(loc='best')

        # PLOT 2: position-flux
        plt.figure(502)
        plt.subplot(2, 1, 1)
        plt.title(event.planetname + ' Position vs. Binned Flux')
        plt.errorbar(binyy,
                     binyaplev,
                     binyapstd,
                     fmt=fmt1[pos],
                     label=('pos %i y' % (pos)))
        plt.ylabel('Flux')
        plt.legend(loc='best')
        plt.subplot(2, 1, 2)
        plt.errorbar(binxx,
                     binxaplev,
                     binxapstd,
                     fmt=fmt1[pos],
                     label=('pos %i x' % (pos)))
        plt.xlabel('Pixel Postion')
        plt.ylabel('Flux')
        plt.legend(loc='best')

        #PLOT 3: position-phase
        plt.figure(503)

        plt.plot(phase, x, 'b,')
        plt.plot(phase, y, 'r,')
        plt.title(event.planetname + ' Phase vs. Position')
        plt.xlabel('Orbital Phase')
        plt.ylabel('Pixel Position')
        plt.legend('xy')

        #PLOT 4: flux-radial distance
        plt.figure(504)
        plt.errorbar(binrr,
                     binraplev,
                     binrapstd,
                     fmt=fmt1[pos],
                     label=('pos %i' % (pos)))
        plt.title(event.planetname + ' Radial Distance vs. Flux')
        plt.xlabel('Distance From Center of Pixel')
        plt.ylabel('Flux')
        plt.legend(loc='best')

        # ::::::::::: Background setting :::::::::::::::::
        if np.size(background) != 0:
            # number of points per bin:
            npoints = 42
            nbins = int(np.size(background) // npoints)
            medianbg = np.zeros(nbins)
            bphase = np.zeros(nbins)  # background bin phase
            bintime = np.zeros(nbins)  # background bin JD time
            for i in range(nbins):
                start = int(1.0 * i * npoints)
                end = int(1.0 * (i + 1) * npoints)
                medianbg[i] = np.median(background[start:end])
                bphase[i] = np.mean(phase[start:end])
                bintime[i] = np.mean(jdtime[start:end])

            # PLOT 5: background-phase
            day = int(np.floor(np.amin(jdtime)))
            timeunits1 = jdtime - day
            timeunits2 = bintime - day
            xlabel = 'JD - ' + str(day)
            if event.ecltype == 's':
                timeunits1 = phase
                timeunits2 = bphase
                xlabel = 'Phase'

            plt.figure(505)
            plt.plot(timeunits1,
                     background,
                     color='0.45',
                     linestyle='None',
                     marker=',')
            if np.size(background) > 10000:
                plt.plot(timeunits2, medianbg, fmt2[pos], label='median bins')
            plt.title(event.planetname + ' Background level')
            plt.xlabel(xlabel)
            plt.ylabel('Flux')
            plt.plot(timeunits1[0],
                     background[0],
                     color='0.45',
                     linestyle='None',
                     marker=',',
                     label='all points')
            plt.legend(loc='best')

        else:
            print("WARNING: background has zero size.")

        #PLOT 7: Noise Pixels Binned
        plt.figure(507)
        plt.scatter(binphase, binnpix)
        plt.xlabel("Orbital Phase")
        plt.ylabel("Noise Pixels")
        plt.title(event.planetname + " Binned Noise Pixels")

        #PLOT 8: Noise Pixel Variance
        plt.figure(508)
        npixvar = bd.subarnvar(noisepix, event)
        subarnbinphase = bd.subarnbin(phase, event)
        plt.scatter(subarnbinphase, npixvar, s=1)
        plt.xlabel("Orbital Phase")
        plt.ylabel("Noise Pixel Variance")
        plt.title(event.planetname + " Noise Pixels Variance")

        #PLOT 9 and 10: Elliptical Area and Variance
        if event.method == 'fgc' or event.method == 'rfgc':
            plt.figure(509)
            plt.scatter(phase, ellarea, s=0.1)
            plt.xlabel("Orbital Phase")
            plt.ylabel("Elliptical Area")
            plt.title(event.planetname + " Gaussian Centering Elliptical Area")

            plt.figure(510)
            ellareavar = bd.subarnvar(ellarea, event)
            plt.scatter(subarnbinphase, ellareavar, s=1)
            plt.xlabel("Orbital Phase")
            plt.ylabel("Elliptical Area Variance")
            plt.title(event.planetname + " Elliptical Area Variance")

        if event.method == 'rfgc':
            plt.figure(511)
            plt.scatter(phase, rot % (np.pi / 2) * 180 / np.pi, s=1)
            plt.xlabel("Orbital Phase")
            plt.ylabel("Rotation (deg)")
            plt.title(event.planetname + " Gaussian Centering Rotation")

    #PLOT 6: Preflash
    if event.havepreflash:
        plt.figure(506)
        plt.errorbar((event.prefp.time[0] - event.prefp.time[0, 0]) / 60.,
                     event.prefp.aplev[0],
                     yerr=event.prefp.aperr[0],
                     fmt="o")
        plt.xlabel("Time since start of preflash  (minutes)")
        plt.ylabel("Flux")
        plt.title(event.planetname + " Preflash")

    figname1 = str(event.eventname) + "-fig501.png"
    figname2 = str(event.eventname) + "-fig502.png"
    figname3 = str(event.eventname) + "-fig503.png"
    figname4 = str(event.eventname) + "-fig504.png"
    figname5 = str(event.eventname) + "-fig505.png"
    figname6 = str(event.eventname) + "-fig506.png"
    figname7 = str(event.eventname) + "-fig507.png"
    figname8 = str(event.eventname) + "-fig508.png"
    figname9 = str(event.eventname) + "-fig509.png"
    figname10 = str(event.eventname) + "-fig510.png"
    figname11 = str(event.eventname) + "-fig511.png"

    plt.figure(501)
    plt.savefig(figname1)
    plt.figure(502)
    plt.savefig(figname2)
    plt.figure(503)
    plt.savefig(figname3)
    plt.figure(504)
    plt.savefig(figname4)
    plt.figure(505)
    plt.savefig(figname5)

    plt.figure(506)
    if event.havepreflash:
        plt.savefig(figname6)

    plt.figure(507)
    plt.savefig(figname7)
    plt.figure(508)
    plt.savefig(figname8)

    if event.method == 'fgc' or event.method == 'rfgc':
        plt.figure(509)
        plt.savefig(figname9)
        plt.figure(510)
        plt.savefig(figname10)

    if event.method == 'rfgc':
        plt.figure(511)
        plt.savefig(figname11)

    # Saving
    me.saveevent(event, event.eventname + "_p5c")

    cwd += "/"
    # Print outputs, end-time, and close log.
    log.writelog("Output files:")
    log.writelog("Data:")
    log.writelog(" " + cwd + event.eventname + "_p5c.dat")
    log.writelog("Log:")
    log.writelog(" " + cwd + logname)
    log.writelog("Figures:")
    log.writelog(" " + cwd + figname1)
    log.writelog(" " + cwd + figname2)
    log.writelog(" " + cwd + figname3)
    log.writelog(" " + cwd + figname4)
    log.writelog(" " + cwd + figname5)
    if event.havepreflash:
        log.writelog(" " + cwd + figname6)
    log.writelog(" " + cwd + figname7)
    log.writelog(" " + cwd + figname8)
    if event.method == 'fgc' or event.method == 'rfgc':
        log.writelog(" " + cwd + figname9)
        log.writelog(" " + cwd + figname10)
    if event.method == 'rfgc':
        log.writelog(" " + cwd + figname11)
    log.writeclose('\nEnd Checks: ' + time.ctime())

    os.chdir(owd)

    return event
예제 #17
0
def poetRestore(directory='../', clip=None, rundir=None, modeldir=None, params_override=None):
    
    files    = []
    events   = []
    filename = ''
    for fname in os.listdir(directory):
        if (fname.endswith("_p5c.dat")):
            files.append(fname[:-4])
    files.sort()

    if len(files) == 0:
        print('Cannot find any files to restore.')
        return []

    for f in files:

        # Load event
        event = me.loadevent(directory + '/' + f)
        events.append(event)
        print('Finished loading: ' + event.eventname)
        filename = filename + event.eventname
        event.ancildir   = directory + '/' + modeldir + '/'


        # Clip data set to model and plot only a portion of the entire
        # light curve.  Good for modeling a transit or eclipse in an
        # around-the-orbit data set
        if clip is not None and clip != 'None':

            if type(clip) == str:

                #Convert from string to 2 ints
                start, end = clip.split(':',1)

                try:
                    start = int(start)
                except:
                    print("Error with format of optional variable clip.")
                    return []

                try:
                    end   = int(end)
                except:
                    end   = None

            else:
                if len(clip) == 2:
                    start, end = clip
                else:
                    start = clip[0]
                    end   = None

            # Use only data points from 'start' to 'end'
            event.phase      = event.phase [:,start:end]
            event.aplev      = event.aplev [:,start:end]
            event.aperr      = event.aperr [:,start:end]
            event.good       = event.good  [:,start:end]
            event.time       = event.time  [:,start:end]
            event.y          = event.y     [:,start:end]
            event.x          = event.x     [:,start:end]
            event.juldat     = event.juldat[:,start:end]
            event.bjdutc     = event.bjdutc[:,start:end]
            event.bjdtdb     = event.bjdtdb[:,start:end]

    for event in events:

        # Copy params file into output dir.
        paramsfile = event.eventname + '.pcf'
        event.paramsfile = directory + '/' + modeldir + '/' + paramsfile
        if not os.path.isfile(event.paramsfile):

            if params_override:
                mod = {"params" : params_override}
            else:
                mod = {}
            rd.copy_config(rundir + '/' + paramsfile, ['params'],
                           event.paramsfile, 'w', mod)

        # Copy initial values file into output dir
        initvfile = rd.read_pcf(event.paramsfile, "params",
                                simple=True).modelfile
        event.initvalsfile = directory + '/' + modeldir + '/' + initvfile
        if not os.path.isfile(event.initvalsfile):

            if os.path.isfile(rundir + '/' + initvfile):
                shutil.copy(rundir + '/' + initvfile,
                            event.initvalsfile)

            else:
                shutil.copy(rundir + '/initvals.txt',
                            event.initvalsfile)

    return events
예제 #18
0
# Centroid analysis
# This function performs image differencing between in- and out-of-transit frames
# Result is a PSF at the true spatial location of the transit source
# Amplitude equals the transit depth times the image intensity for target
# Centroid of direct image - centroid of difference image should be ~0
# 3*formal error (3 sigma) on PSF fit of differenced image = arcsec limit on background eclipsing binaries.

import sys, os
r = os.getcwd().split("/")
maindir = "/".join(r[:r.index("run")])
sys.path.append(maindir + '/lib/')

import manageevent  as me
import centerdriver as cd
tempevent = me.loadevent('../../gj436cp22_ctr', load=['data','uncd','mask'])
event[0].data = tempevent.data
del tempevent

'''
hist2d, xedges, yedges = np.histogram2d(event.fp.x[0,ipretr[0]:ipretr[1]], event.fp.y[0,ipretr[0]:ipretr[1]],20)
plt.figure(2)
plt.clf()
plt.suptitle('Pre-Transit')
a=plt.subplot(111)
a.yaxis.set_major_formatter(plt.matplotlib.ticker.FormatStrFormatter('%0.2f'))
plt.imshow(hist2d.T,extent=(xedges[0],xedges[-1],yedges[0],yedges[-1]), cmap=cm.gray_r, 
                            aspect='auto', origin='lower')
plt.colorbar()

hist2d, xedges, yedges = np.histogram2d(event.fp.x[0,iintr[0]:iintr[1]], event.fp.y[0,iintr[0]:iintr[1]],20)
예제 #19
0
def badpix(eventname, control=None):
    tini = time.time()

    # Load the event
    event = me.loadevent(eventname)
    # Load the data
    me.updateevent(event, eventname, event.loadnext)

    # Create a new log starting from the old one.
    oldlogname = event.logname
    logname = event.eventname + ".log"
    log = le.Logedit(logname, oldlogname)
    event.logname = logname
    log.writelog('\nMARK: ' + time.ctime() + ': Starting poet_2badpix.')

    # ccampo 3/18/2011: do this in p5
    # Julian observation date
    #event.fp.juldat = event.jdjf80 + event.fp.time / 86400.0

    # ::::::::::::::::::::::: UNCERTAINTIES ::::::::::::::::::::::::::::::::
    # IRAC subarray data come with bogus uncertainties that are not linearly
    # related to photon noise.  We scale them later, using the reduced chi
    # squared from the model fit.

    # ::::::::::::::::::::::: FLUX CONVERSION :::::::::::::::::::::::::::::
    # Do we want flux (uJy/pix) or surface brightness (MJy/sr) units?  If
    # doing photometry, convert to flux.  Since we care about relative
    # numbers, it doesn't really matter.

    # Convert from surface brightness (MJy/sr) to flux units (uJy/pix)
    if event.fluxunits:
        log.writelog('Converting surface brightness to flux')
        event.data, event.uncd = btf.poet_bright2flux(event.data, event.uncd,
                                                      event.posscl)
        if event.havecalaor:
            event.predata, event.preuncd = btf.poet_bright2flux(
                event.predata, event.preuncd, event.posscl)
            event.postdata, event.postuncd = btf.poet_bright2flux(
                event.postdata, event.postuncd, event.posscl)
    else:
        log.writelog('Did not convert bright to flux.')

    # Mean Background Estimate, from zodi model
    event.estbg = (np.mean(event.fp.zodi[np.where(event.fp.exist)]) +
                   np.mean(event.fp.ism[np.where(event.fp.exist)]) +
                   np.mean(event.fp.cib[np.where(event.fp.exist)]))

    if event.fluxunits:
        event.estbg *= (event.srperas * 1e12 * np.mean(event.posscl[0, :]) *
                        np.mean(event.posscl[1, :]))

    # Bad Pixel Masking
    log.writelog('Find and fix bad pixels')

    # Get permanent bad pixel mask.
    if not event.ispmask[0]:
        log.writelog('\nPermanent Bad pixel mask not found!')
    else:
        hdu = pf.open(str(event.pmaskfile[0].decode('utf-8')))
        if hdu[0].header['bitpix'] == -32:  # if data type is float
            hdu[0].scale(type='int16')  # cast it down to int16
        event.pmask = hdu[0].data

    # IRS FIX:
    # IRS data contains the blue peak subarray while its pmask contains
    # the whole array (Hard coding)
    if event.photchan == 5:
        event.pmask = event.pmask[3:59, 86:127]

    # Do NOT define sigma, we have a different scheme for finding baddies
    # adds Spitzer rejects: fp.nsstrej  &  our rejects: fp.nsigrej
    event.mask = pbm.poet_badmask(event.data,
                                  event.uncd,
                                  event.pmask,
                                  event.inst.pcrit,
                                  event.bdmskd,
                                  event.inst.dcrit,
                                  event.fp,
                                  nimpos=event.nimpos)

    # User rejected pixels:
    if event.userrej != None:
        for i in np.arange(np.shape(event.userrej)[0]):
            event.mask[:, event.userrej[i, 0], event.userrej[i, 1], :] = 0
        event.fp.userrej = np.sum(np.sum(1 - event.mask, axis=1), axis=1)
        event.fp.userrej = np.transpose(event.fp.userrej) - event.fp.nsstrej
    else:
        event.fp.userrej = np.zeros((int(event.npos), int(event.maxnimpos)),
                                    dtype=int)

    # define sigma here.
    # adds median sky: fp.medsky
    event.meanim = pcb.poet_chunkbad(event.data, event.uncd, event.mask,
                                     event.nimpos, event.sigma, event.szchunk,
                                     event.fp, event.nscyc)

    log.writelog('Masks combined')

    if event.havecalaor:
        event.premask = pbm.poet_badmask(event.predata,
                                         event.preuncd,
                                         event.pmask,
                                         event.inst.pcrit,
                                         event.prebdmskd,
                                         event.inst.dcrit,
                                         event.prefp,
                                         nimpos=event.calnimpos)

        event.premeanim = pcb.poet_chunkbad(event.predata, event.preuncd,
                                            event.premask, event.calnimpos,
                                            event.sigma, event.szchunk,
                                            event.prefp, event.nscyc)

        event.postmask = pbm.poet_badmask(event.postdata,
                                          event.postuncd,
                                          event.pmask,
                                          event.inst.pcrit,
                                          event.postbdmskd,
                                          event.inst.dcrit,
                                          event.postfp,
                                          nimpos=event.calnimpos)

        event.postmeanim = pcb.poet_chunkbad(event.postdata, event.postuncd,
                                             event.postmask, event.calnimpos,
                                             event.sigma, event.szchunk,
                                             event.postfp, event.nscyc)

    # Save the data

    if event.instrument == 'mips':
        todel = ['bdmskd', 'brmskd']  # what to delete
    else:
        todel = ['bdmskd']

    me.saveevent(event,
                 event.eventname + "_bpm",
                 save=['data', 'uncd', 'mask'],
                 delete=todel)

    # Print time elapsed and close log:
    cwd = os.getcwd() + "/"
    log.writelog("Output files:")
    log.writelog("Data:")
    log.writelog(" " + cwd + event.eventname + "_bpm.dat")
    log.writelog(" " + cwd + event.eventname + "_bpm.h5")
    log.writelog("Log:")
    log.writelog(" " + cwd + logname)

    dt = t.hms_time(time.time() - tini)
    log.writeclose('\nBad pixel masking time (h:m:s):  %s ' % dt)
예제 #20
0
def photometry(event, pcf, photdir, mute):
    tini = time.time()

    # Create photometry log
    logname = event.logname
    log = le.Logedit(photdir + "/" + logname, logname)
    log.writelog("\nStart " + photdir + " photometry: " + time.ctime())

    parentdir = os.getcwd() + "/"
    os.chdir(photdir)

    # copy photom.pcf in photdir
    pcf.make_file("photom.pcf")

    # Parse the attributes from the control file to the event:
    attrib = vars(pcf)
    keys = attrib.keys()
    for key in keys:
        setattr(event, key, attrib.get(key).get())

    maxnimpos, npos = event.maxnimpos, event.npos
    # allocating frame parameters:
    event.fp.aplev = np.zeros((npos, maxnimpos))  # aperture flux
    event.fp.aperr = np.zeros((npos, maxnimpos))  # aperture error
    event.fp.nappix = np.zeros((npos, maxnimpos))  # number of aperture  pixels
    event.fp.skylev = np.zeros((npos, maxnimpos))  # background sky flux level
    event.fp.skyerr = np.zeros((npos, maxnimpos))  # sky error
    event.fp.nskypix = np.zeros((npos, maxnimpos))  # number of sky pixels
    event.fp.nskyideal = np.zeros(
        (npos, maxnimpos))  # ideal number of sky pixels
    event.fp.status = np.zeros((npos, maxnimpos))  # apphot return status
    event.fp.good = np.zeros((npos, maxnimpos))  # good flag

    # Aperture photometry:
    if not event.dooptimal or event.from_aper == None:

        # Multy Process set up:
        # Shared memory arrays allow only 1D Arrays :(
        aplev = Array("d", np.zeros(npos * maxnimpos))
        aperr = Array("d", np.zeros(npos * maxnimpos))
        nappix = Array("d", np.zeros(npos * maxnimpos))
        skylev = Array("d", np.zeros(npos * maxnimpos))
        skyerr = Array("d", np.zeros(npos * maxnimpos))
        nskypix = Array("d", np.zeros(npos * maxnimpos))
        nskyideal = Array("d", np.zeros(npos * maxnimpos))
        status = Array("d", np.zeros(npos * maxnimpos))
        good = Array("d", np.zeros(npos * maxnimpos))
        # Size of chunk of data each core will process:
        chunksize = maxnimpos / event.ncores + 1

        print("Number of cores: " + str(event.ncores))
        # Start Muti Procecess:
        processes = []
        for nc in np.arange(event.ncores):
            start = nc * chunksize  # Starting index to process
            end = (nc + 1) * chunksize  # Ending   index to process
            proc = Process(target=do_aphot,
                           args=(start, end, event, log, mute, aplev, aperr,
                                 nappix, skylev, skyerr, nskypix, nskyideal,
                                 status, good))
            processes.append(proc)
            proc.start()

        # Make sure all processes finish their work:
        for nc in np.arange(event.ncores):
            processes[nc].join()

        # Put the results in the event. I need to reshape them:
        event.fp.aplev = np.asarray(aplev).reshape(npos, maxnimpos)
        event.fp.aperr = np.asarray(aperr).reshape(npos, maxnimpos)
        event.fp.nappix = np.asarray(nappix).reshape(npos, maxnimpos)
        event.fp.skylev = np.asarray(skylev).reshape(npos, maxnimpos)
        event.fp.skyerr = np.asarray(skyerr).reshape(npos, maxnimpos)
        event.fp.nskypix = np.asarray(nskypix).reshape(npos, maxnimpos)
        event.fp.nskyideal = np.asarray(nskyideal).reshape(npos, maxnimpos)
        event.fp.status = np.asarray(status).reshape(npos, maxnimpos)
        event.fp.good = np.asarray(good).reshape(npos, maxnimpos)

        # raw photometry (no sky subtraction):
        event.fp.apraw = (event.fp.aplev + (event.fp.skylev * event.fp.nappix))

        # Print results into the log if it wans't done before:
        for pos in np.arange(npos):
            for i in np.arange(event.nimpos[pos]):
                log.writelog(
                    '\nframe =%7d       ' % i + 'pos   =%5d       ' % pos +
                    'y =%7.3f       ' % event.fp.y[pos, i] +
                    'x =%7.3f' % event.fp.x[pos, i] + '\n' +
                    'aplev =%11.3f   ' % event.fp.aplev[pos, i] +
                    'aperr =%9.3f   ' % event.fp.aperr[pos, i] +
                    'nappix =%6.2f' % event.fp.nappix[pos, i] + '\n' +
                    'skylev=%11.3f   ' % event.fp.skylev[pos, i] +
                    'skyerr=%9.3f   ' % event.fp.skyerr[pos, i] +
                    'nskypix=%6.2f   ' % event.fp.nskypix[pos, i] +
                    'nskyideal=%6.2f' % event.fp.nskyideal[pos, i] + '\n' +
                    'status=%7d       ' % event.fp.status[pos, i] +
                    'good  =%5d' % event.fp.good[pos, i],
                    mute=True)

    elif event.from_aper != None:
        # Load previous aperture photometry if required for optimal:
        evt = me.loadevent(parentdir + event.from_aper + "/" +
                           event.eventname + "_pht")
        event.fp.aplev = evt.fp.aplev
        event.fp.aperr = evt.fp.aperr
        event.fp.nappix = evt.fp.nappix
        event.fp.skylev = evt.fp.skylev
        event.fp.skyerr = evt.fp.skyerr
        event.fp.nskypix = evt.fp.nskypix
        event.fp.nskyideal = evt.fp.nskyideal
        event.fp.status = evt.fp.status
        event.fp.good = evt.fp.good
        event.fp.apraw = evt.fp.apraw

    if event.dooptimal:
        ofp, psf = do.dooptphot(event.data,
                                event.uncd,
                                event.mask,
                                event.fp,
                                event.srcest,
                                event.nimpos,
                                rejlim=[10.45, 1000, 1.5],
                                order=1,
                                resize=event.oresize,
                                norm=1,
                                trim=event.otrim,
                                log=log)
        event.fp = ofp
        event.psf = psf

    elif event.ispsf:
        # PSF aperture correction:
        log.writelog('Calculating PSF aperture:')
        event.aperfrac,    event.psfnappix,    event.psfskylev, \
         event.psfnskypix, event.psfnskyideal, event.psfstatus  \
         = ap.apphot(event.psfim, event.psfctr,
                     event.photap * event.psfexpand,
                     event.skyin  * event.psfexpand,
                     event.skyout * event.psfexpand,
                     med    = event.skymed,
                     expand = event.apscale,
                     nappix  = True, skylev    = True,
                     nskypix = True, nskyideal = True,
                     status  = True)

        event.aperfrac += event.psfskylev * event.psfnappix

        event.fp.aplev /= event.aperfrac
        event.fp.aperr /= event.aperfrac

        log.writelog('Aperture contains %f of PSF.' % event.aperfrac)

    # For running pixel-level decorrelation (pld)
    if event.ispld and event.npos == 1:
        event.apdata = pld.pld_box(event.data, event.targpos, event.pldhw,
                                   event.fp.skylev)
        log.writelog(
            "Created " + str(event.pldhw * 2 + 1) + "x" +
            str(event.pldhw * 2 + 1) +
            " box around centroid for pixel-level decorrelation and normalized it in time."
        )
    elif event.ispld and event.npos != 1:
        log.writelog(
            "Could not perform pixel-level decorrelation because there is more than 1 nod position."
        )

    # save
    print("\nSaving ...")
    me.saveevent(event,
                 event.eventname + "_pht",
                 delete=['data', 'uncd', 'mask'])

    # Print time elapsed and close log:
    cwd = os.getcwd() + "/"
    log.writelog("Output files (" + event.photdir + "):")
    log.writelog("Data:")
    log.writelog(" " + cwd + event.eventname + "_pht.dat")
    log.writelog("Log:")
    log.writelog(" " + cwd + logname)

    dt = t.hms_time(time.time() - tini)
    log.writeclose("\nEnd Photometry. Time (h:m:s):  %s " % dt + "  (" +
                   photdir + ")")
    print("--------------  ------------\n")
예제 #21
0
def run_photometry(eventname, cwd):
    """
  Load the event.
  Read the control file.
  Launch a thread for each centering run.
  """

    owd = os.getcwd()
    os.chdir(cwd)
    config = os.path.basename(eventname)[:-4] + '.pcf'
    pcfs = rd.read_pcf(config, 'photometry')

    if len(pcfs) == 1:  #, I may be in photdir to re-run:
        pcf = pcfs[0]
        if pcf.offset < 0:
            sign = '-'
        else:
            sign = '+'
        # Get name of photometry dir:
        if pcf.phottype == "psffit":
            photdir = 'psffit'
        elif pcf.phottype == "optimal":
            photdir = 'optimal'
        elif pcf.phottype == "var":
            photdir = ('va%03d' % (pcf.photap * 100) + sign + '%03d' %
                       (np.abs(pcf.offset * 100)))
        elif pcf.phottype == "ell":
            photdir = ('el%03d' % (pcf.photap * 100) + sign + '%03d' %
                       (np.abs(pcf.offset * 100)))
        else:  # pcf[0].phottype == "aper":
            photdir = ('ap%03d' % (pcf.photap * 100) + sign + '%03d' %
                       (np.abs(pcf.offset * 100)))

        # Append suffix to folder if suplied:
        if pcf.pcfname is not None:
            photdir += "_" + str(pcf.pcfname)

        # If I am in the photometry dir already:
        if cwd[-len(photdir):] == photdir:
            # Go to dir where poet3 files were saved.
            cwd = cwd[:-len(photdir)]
            os.chdir(cwd)

        mute = False  # print to screen

    else:
        mute = True  # do not print to screen

    # Load the event data:
    if pcfs[0].denphot:  # Use denoised data if requested:
        readdata = 'dendata'
    else:
        readdata = 'data'
    event = me.loadevent(eventname, load=[readdata, 'uncd', 'mask'])

    # Loop over each run:
    for pcf in pcfs:

        # Make a copy of the event:
        this_event = copy.copy(event)

        if pcf.offset < 0:
            sign = '-'
        else:
            sign = '+'

        # Get name of photometry dir:
        if pcf.phottype == "psffit":
            photdir = 'psffit'
        elif pcf.phottype == "optimal":
            photdir = 'optimal'
        elif pcf.phottype == "var":
            photdir = ('va%03d' % (pcf.photap * 100) + sign + '%03d' %
                       (np.abs(pcf.offset * 100)))
        elif pcf.phottype == "ell":
            photdir = ('el%03d' % (pcf.photap * 100) + sign + '%03d' %
                       (np.abs(pcf.offset * 100)))
        else:
            photdir = ('ap%03d' % (pcf.photap * 100) + sign + '%03d' %
                       (np.abs(pcf.offset * 100)))

        # Append suffix to folder if suplied:
        if pcf.pcfname is not None:
            photdir += "_" + str(pcf.pcfname)
        this_event.photdir = photdir

        # Create the photometry directory if it doesn't exist:
        if not os.path.exists(photdir):
            os.mkdir(photdir)

        # copy photom.pcf in photdir
        pcf.make_file(photdir + '/' + config, 'photometry')

        # Launch the thread:
        p = Process(target=photometry,
                    args=(this_event, pcf, photdir, mute, owd))
        p.start()

    os.chdir(owd)
예제 #22
0
# For use in interactive mode

import sys, os
r = os.getcwd().split("/")
maindir = "/".join(r[:r.index("run")])
sys.path.append(maindir + '/lib/')
import manageevent as me

# Load to see and check the results:
# change evtname to the name of your event.
event = me.loadevent('evtname_ini', load=['data', 'uncd', 'bdmskd'])
event = me.loadevent('evtname_bpm', load=['data', 'uncd', 'mask'])
event = me.loadevent('evtname_den', load=['data', 'uncd', 'mask'])
event = me.loadevent('evtname_ctr', load=['data', 'uncd', 'mask'])
event = me.loadevent('evtname_pht')

# Check visually the centering results:
import frameviewer as fv
event = me.loadevent('evtname_pht')
fv.frameviewer(event, zoom=True)  # zoom-in around the target.
fv.frameviewer(event, zoom=False)

# If by any chance you want to run the pipeline from an interactive session:
import poet_1event as p1
import poet_2badpix as p2
import poet_3center as p3
import poet_4photom as p4
import poet_5checks as p5
import poet_denoise as pd

# Change evtname accordingly to your event name.
예제 #23
0
def lcWFC3(eventname,
           eventdir,
           nchan,
           madVariable,
           madVarSet,
           wmin=1.125,
           wmax=1.65,
           expand=1,
           smooth_len=None,
           correctDrift=True,
           isplots=True):
    '''
    Compute photometric flux over specified range of wavelengths

    Parameters
    ----------
    eventname   : Unique identifier for these data
    eventdir    : Location of save file
    nchan       : Number of spectrophotometric channels
    wmin        : minimum wavelength
    wmax        : maximum wavelength
    expand      : expansion factor
    isplots     : Set True to produce plots

    Returns
    -------
    None

    History
    -------
    Written by Kevin Stevenson      June 2012

    '''

    # Load saved data
    print("Loading saved data...")
    try:
        ev = me.loadevent(eventdir + '/d-' + eventname + '-w2')
        print('W2 data loaded\n')
    except:
        ev = me.loadevent(eventdir + '/d-' + eventname + '-w0')
        print('W0 data loaded\n')
    aux = me.loadevent(eventdir + '/d-' + eventname + '-data')
    ev.spectra = aux.spectra
    specerr = aux.specerr
    data_mhdr = aux.data_mhdr

    #Replace NaNs with zero
    ev.spectra[np.where(np.isnan(ev.spectra))] = 0

    # Determine wavelength bins
    binsize = (wmax - wmin) / nchan
    wave_low = np.round([i for i in np.linspace(wmin, wmax - binsize, nchan)],
                        3)
    wave_hi = np.round([i for i in np.linspace(wmin + binsize, wmax, nchan)],
                       3)
    #binwave     = (wave_low + wave_hi)/2.

    # Increase resolution of spectra
    nx = ev.spectra.shape[-1]
    if expand > 1:
        print("Increasing spectra resolution...")
        #hdspectra = np.zeros((ev.n_files,ev.n_reads-1,expand*nx))
        #hdspecerr = np.zeros((ev.n_files,ev.n_reads-1,expand*nx))
        hdspectra = spni.zoom(ev.spectra, zoom=[1, 1, expand])
        hdspecerr = spni.zoom(specerr, zoom=[1, 1, expand]) * np.sqrt(expand)
        hdwave = np.zeros((ev.n_img, ev.n_spec, expand * nx))
        for j in range(ev.n_img):
            hdwave[j] = spni.zoom(ev.wave[j][0], zoom=expand)
        ev.spectra = hdspectra
        specerr = hdspecerr
        ev.wave = hdwave
        nx *= expand

    # Smooth spectra
    if smooth_len != None:
        for m in range(ev.n_files):
            for n in range(ev.n_reads - 1):
                ev.spectra[m, n] = smooth.smooth(ev.spectra[m, n], smooth_len,
                                                 'flat')
    """
    # First read is bad for IMA files
    if ev.n_reads > 2:
        print('WARNING: Marking all first reads as bad.')
        istart = 1
    else:
        print('Using first reads.')
        istart = 0
    """
    print('Using first reads.')
    istart = 0

    if correctDrift == True:
        #Shift 1D spectra
        #Calculate drift over all frames and non-destructive reads
        print('Applying drift correction...')
        ev.drift, ev.goodmask = hst.drift_fit2(ev,
                                               preclip=0,
                                               postclip=None,
                                               width=5 * expand,
                                               deg=2,
                                               validRange=11 * expand,
                                               istart=istart,
                                               iref=ev.iref[0])
        # Correct for drift
        for m in range(ev.n_files):
            for n in range(istart, ev.n_reads - 1):
                spline = spi.UnivariateSpline(np.arange(nx),
                                              ev.spectra[m, n],
                                              k=3,
                                              s=0)
                #ev.spectra[m,n,p] = spline(np.arange(nx)+ev.drift_model[n,m,p])
                #if m==13:
                #    ev.drift[n,m,p] -= 0.476
                #Using measured drift, not model fit
                ev.spectra[m, n] = spline(np.arange(nx) + ev.drift[m, n])
    '''
    # Look for bad columns
    igoodcol    = np.ones(nx)
    normspec    = ev.spectra/np.mean(ev.spectra,axis=2)[:,:,np.newaxis]
    sumspec     = np.sum(normspec,axis=1)/(ev.n_reads-istart-1)
    stdsumspec  = np.std(sumspec, axis=0)
    igoodcol[np.where(stdsumspec > 0.007)] = 0  #FINDME: hard coded
    '''

    print("Generating light curves...")
    ev.eventname2 = ev.eventname
    for i in range(nchan):
        ev.wave_low = wave_low[i]
        ev.wave_hi = wave_hi[i]
        print("Bandpass = %.3f - %.3f" % (ev.wave_low, ev.wave_hi))
        # Calculate photometric flux for each spectrum
        ev.photflux = np.zeros(
            (ev.n_spec, ev.n_files, np.max((1, ev.n_reads - 1 - istart))))
        ev.photfluxerr = np.zeros(
            (ev.n_spec, ev.n_files, np.max((1, ev.n_reads - 1 - istart))))
        #ev.wave         = []
        if ev.detector == 'IR':
            #Compute common wavelength and indeces to apply over all observations
            wave = np.zeros(nx)
            for j in range(ev.n_img):
                wave += ev.wave[j][0]
            wave /= ev.n_img
            #index = np.where(np.bitwise_and(wave >= wave_low[i], wave <= wave_hi[i]))[0]
            index = np.where((wave >= wave_low[i]) * (wave <= wave_hi[i]))[0]
            #define numgoodcol, totcol
        else:
            # UVIS: Use all pixels for aperture photometry
            index = range(len(ev.spectra[0, 0, 0]))
        for m in range(ev.n_files):
            '''
            #Select appropriate orbit-dependent wavelength
            if ev.n_img == (np.max(ev.orbitnum)+1):
                j = int(ev.orbitnum[m])
            else:
                j = 0
            #Method 1
            ev.wave.append(np.mean(ev.wavegrid[j][n],axis=0))
            index = np.where(np.bitwise_and(ev.wave[n] >= wave_low, ev.wave[n] <= wave_hi))[0]
            #Method 2
            index = np.where(np.bitwise_and(ev.wave[j][n] >= wave_low, ev.wave[j][n] <= wave_hi))[0]
            '''
            ev.photflux[0, m] = np.sum(ev.spectra[m, istart:, index], axis=0)
            ev.photfluxerr[0, m] = np.sqrt(
                np.sum(specerr[m, istart:, index]**2, axis=0))

        # Save results
        ev.eventname = ev.eventname2 + '_' + str(int(
            ev.wave_low * 1e3)) + '_' + str(int(ev.wave_hi * 1e3))
        #me.saveevent(ev, eventdir + '/d-' + ev.eventname + '-w3', delete=['data_mhdr', 'spectra', 'specerr'])
        me.saveevent(ev, eventdir + '/d-' + ev.eventname + '-w3')

        # Produce plot
        if isplots == True:
            plt.figure(3000 + i, figsize=(10, 8))
            plt.clf()
            plt.suptitle('Wavelength range: ' + str(wave_low[i]) + '-' +
                         str(wave_hi[i]))
            ax = plt.subplot(111)
            #for n in range(ev.n_spec):
            #plt.subplot(ev.n_spec,1,1)
            #plt.title('Star ' + str(n))
            #igood   = np.where(ev.goodmask[0])[0]
            iscan0 = np.where(ev.scandir == 0)[0]
            iscan1 = np.where(ev.scandir == 1)[0]
            mjd = np.floor(ev.bjdtdb[0])
            flux0 = np.sum(ev.photflux[0][iscan0], axis=1) / np.sum(
                ev.photflux[0, [iscan0[-1]]])  # forward scan
            #err  = np.sqrt(1 / np.sum(1/ev.photfluxerr[0]**2,axis=1))/np.sum(ev.photflux[0,-1])
            try:
                err0 = np.sqrt(np.sum(ev.photfluxerr[0][iscan0]**2,
                                      axis=1)) / np.sum(
                                          ev.photflux[0, [iscan0[-1]]])
            except:
                err0 = 0
                #err1    = 0
            plt.errorbar(ev.bjdtdb[iscan0] - mjd, flux0, err0, fmt='bo')
            plt.text(
                0.05,
                0.1,
                "MAD = " +
                str(np.round(1e6 * np.median(np.abs(np.ediff1d(flux0))))) +
                " ppm",
                transform=ax.transAxes,
                color='b')
            #print(len(iscan1))
            flux1 = 0

            if len(iscan1) > 0:
                flux1 = np.sum(ev.photflux[0][iscan1], axis=1) / np.sum(
                    ev.photflux[0, [iscan0[-1]]])  # reverse scan
                err1 = np.sqrt(np.sum(ev.photfluxerr[0][iscan1]**2,
                                      axis=1)) / np.sum(
                                          ev.photflux[0, [iscan0[-1]]])
                plt.errorbar(ev.bjdtdb[iscan1] - mjd, flux1, err1, fmt='ro')
                plt.text(
                    0.05,
                    0.05,
                    "MAD = " +
                    str(np.round(1e6 * np.median(np.abs(np.ediff1d(flux1))))) +
                    " ppm",
                    transform=ax.transAxes,
                    color='r')
            plt.ylabel('Normalized Flux')
            plt.xlabel('Time [MJD + ' + str(mjd) + ']')

            plt.subplots_adjust(left=0.10,
                                right=0.95,
                                bottom=0.10,
                                top=0.90,
                                hspace=0.20,
                                wspace=0.3)
            plt.savefig(eventdir + '/figs/Fig' + str(3000 + i) + '-' +
                        ev.eventname + '.png')
            #plt.pause(0.1)

            # f = open('2017-07-15-w1_spec_width_20/W5_MAD_'+ev.madVarStr+'_1D.txt','a+')
            # fooTemp = getattr(ev,madVariable)
            # print('W5: ' + ev.madVarStr + ' = ' + str(fooTemp))
            # f.write(str(fooTemp) + ',' + str(np.round(1e6*np.median(np.abs(np.ediff1d(flux0))))) + ',' + str(np.round(1e6*np.median(np.abs(np.ediff1d(flux1))))) +'\n')
            # f.close()
            # print('W5_MAD_'+ ev.madVarStr +'_1D.txt saved\n')

    if (isplots >= 1) and (ev.detector == 'IR'):
        # Drift
        plt.figure(3100, figsize=(10, 8))
        plt.clf()
        plt.subplot(211)
        for j in range(istart, ev.n_reads - 1):
            plt.plot(ev.drift2D[:, j, 1], '.')
        plt.ylabel('Spectrum Drift Along y')
        plt.subplot(212)
        for j in range(istart, ev.n_reads - 1):
            plt.plot(ev.drift2D[:, j, 0] + ev.drift[:, j], '.')
        plt.ylabel('Spectrum Drift Along x')
        plt.xlabel('Frame Number')
        plt.savefig(eventdir + '/figs/fig3100-Drift.png')

        # 2D light curve with drift correction
        plt.figure(3200, figsize=(7.85, ev.n_files / 20. + 0.8))
        plt.clf()
        vmin = 0.98
        vmax = 1.01
        #FINDME
        normspec = np.zeros((ev.n_files, ev.spectra.shape[2]))
        for p in range(2):
            iscan = np.where(ev.scandir == p)[0]
            if len(iscan) > 0:
                normspec[iscan] = np.mean(ev.spectra[iscan],axis=1)/ \
                                  np.mean(ev.spectra[iscan[ev.inormspec[0]:ev.inormspec[1]]],axis=(0,1))
                #normspec[iscan] = np.mean(ev.spectra[iscan],axis=1)/np.mean(ev.spectra[ev.iref[p]],axis=0)
        #normspec    = np.mean(ev.spectra[:,istart:],axis=1)/np.mean(ev.spectra[ev.inormspec[0]:ev.inormspec[1],istart:],axis=(0,1))
        ediff = np.zeros(ev.n_files)
        iwmin = np.where(ev.wave[0][0] > wmin)[0][0]
        iwmax = np.where(ev.wave[0][0] > wmax)[0][0]
        for m in range(ev.n_files):
            ediff[m] = 1e6 * np.median(
                np.abs(np.ediff1d(normspec[m, iwmin:iwmax])))
            plt.scatter(ev.wave[0][0],
                        np.zeros(normspec.shape[-1]) + m,
                        c=normspec[m],
                        s=14,
                        linewidths=0,
                        vmin=vmin,
                        vmax=vmax,
                        marker='s',
                        cmap=plt.cm.RdYlBu_r)
        plt.title("MAD = " + str(np.round(np.mean(ediff), 0)) + " ppm")
        plt.xlim(wmin, wmax)
        if nchan > 1:
            xticks = np.round([i for i in np.linspace(wmin, wmax, nchan + 1)],
                              3)
            plt.xticks(xticks, xticks)
            plt.vlines(xticks, 0, ev.n_files, 'k', 'dashed')
        plt.ylim(0, ev.n_files)
        plt.ylabel('Frame Number')
        plt.xlabel('Wavelength ($\mu m$)')
        plt.xticks(rotation=30)
        plt.colorbar()
        plt.tight_layout()
        plt.savefig(eventdir + '/figs/fig3200-' + str(nchan) + '-2D_LC.png')
        #plt.savefig(eventdir+'/figs/fig3200-'+str(nchan)+'-2D_LC_'+madVariable+'_'+str(madVarSet)+'.png')

        #ev.mad5 = np.round(np.mean(ediff),0)
        # f = open('2017-07-15-w1_spec_width_20/W5_MAD_'+ev.madVarStr+'.txt','a+')
        # fooTemp = getattr(ev,madVariable)
        # print('W5: ' + ev.madVarStr + ' = ' + str(fooTemp))
        # f.write(str(fooTemp) + ',' + str(np.round(np.mean(ediff),0)) + '\n')
        # f.close()
        # print('W5_MAD_'+ ev.madVarStr +'.txt saved\n')

    if (isplots >= 3) and (ev.detector == 'IR'):
        # Plot individual non-destructive reads
        vmin = 0.97
        vmax = 1.03
        iwmin = np.where(ev.wave[0][0] > wmin)[0][0]
        iwmax = np.where(ev.wave[0][0] > wmax)[0][0]
        #FINDME
        normspec = ev.spectra[:, istart:] / np.mean(
            ev.spectra[ev.inormspec[0]:ev.inormspec[1], istart:], axis=0)
        for n in range(ev.n_reads - 1):
            plt.figure(3300 + n, figsize=(8, ev.n_files / 20. + 0.8))
            plt.clf()
            ediff = np.zeros(ev.n_files)
            for m in range(ev.n_files):
                ediff[m] = 1e6 * np.median(
                    np.abs(np.ediff1d(normspec[m, n, iwmin:iwmax])))
                plt.scatter(ev.wave[0][0],
                            np.zeros(normspec.shape[-1]) + m,
                            c=normspec[m, n],
                            s=14,
                            linewidths=0,
                            vmin=vmin,
                            vmax=vmax,
                            marker='s',
                            cmap=plt.cm.RdYlBu_r)
            plt.title("MAD = " + str(np.round(np.mean(ediff), 0)) + " ppm")
            plt.xlim(wmin, wmax)
            plt.ylim(0, ev.n_files)
            plt.ylabel('Frame Number')
            plt.xlabel('Wavelength ($\mu m$)')
            plt.colorbar()
            plt.tight_layout()
            plt.savefig(ev.eventdir + '/figs/fig' + str(3300 + n) +
                        '-2D_LC.png')
        """
        # Aligned 1D spectra
        plt.figure(3300, figsize=(8,6.5))
        plt.clf()
        #istart=0
        #normspec    = ev.spectra[:,istart:]/np.mean(ev.spectra[:,istart:],axis=2)[:,:,np.newaxis]
        normspec    = ev.spectra[:,:,1:]/np.mean(ev.spectra[:,:,1:],axis=2)[:,:,np.newaxis]
        wave        = ev.wave[0][0][1:]
        sumspec     = np.sum(normspec,axis=1)/(ev.n_reads-istart-1)
        for m in range(10,16):
            plt.plot(wave,sumspec[m],'r-')
        for m in range(7,10):
            plt.plot(wave,sumspec[m],'.k-')
        """

    return ev