示例#1
0
def poetSave(event, filedir='..', topdir=None):
    #Append system path
    if topdir == None:
        r = os.getcwd().split("/")
        topdir = "/".join(r[:r.index("run")])
    sys.path.append(topdir + '/lib/')
    me.saveevent(event, filedir + "/" + event.eventname + "_p5c")
示例#2
0
def reduceWFC3(eventname,
               eventdir,
               madVariable=False,
               madVarSet=False,
               isplots=False):
    '''
    Reduces data images and calculated optimal spectra.

    Parameters
    ----------
    isplots     : Set True to produce plots

    Returns
    -------
    None

    Remarks
    -------
    Requires eventname_params file to intialize event object

    Steps
    -----
    1.  Read in all data frames and header info
    2.  Record JD, scan direction, etc
    3.  Group images by frame, batch, and orbit number
    4.  Calculate centroid of direct image(s)
    5.  Calculate trace and 1D+2D wavelength solutions
    6.  Make flats, apply flat field correction
    7.  Manually mask regions
    8.  Apply light-time correction
    9.  Compute difference frames
    10. Compute scan length
    11. Perform outlier rejection of BG region
    12. Background subtraction
    13. Compute 2D drift, apply rough (integer-pixel) correction
    14. Full-frame outlier rejection for time-series stack of NDRs
    15. Apply sub-pixel 2D drift correction
    16. Extract spectrum through summation
    17. Compute median frame
    18. Optimal spectral extraction
    19. Save results, plot figures

    History
    -------
    Written by Kevin Stevenson      January 2017

    '''
    evpname = eventname + '_params'
    #exec 'import ' + evpname + ' as evp' in locals()
    #exec('import ' + evpname + ' as evp', locals())
    exec('import ' + evpname + ' as evp', globals())
    reload(evp)

    t0 = time.time()
    # Initialize event object
    # All parameters are specified in this file
    ev = evp.event_init()
    try:
        aux = evp.aux_init()
    except:
        print("Need to update event file to include auxiliary object.")
        return

    ev.eventdir = eventdir

    # Create directories
    if not os.path.exists(ev.eventdir):
        os.makedirs(ev.eventdir)
    if not os.path.exists(ev.eventdir + "/figs"):
        os.makedirs(ev.eventdir + "/figs")

    # Copy ev_params file
    shutil.copyfile(evpname + '.py', ev.eventdir + '/' + evpname + '.py')

    # Reset attribute for MAD variable (added by K. Showalter)
    if madVariable:
        setattr(ev, madVariable, madVarSet)
        ev.madVarStr = madVariable
        ev.madVariable = madVarSet

    # Object
    ev.obj_list = []  #Do not rename ev.obj_list!
    if ev.objfile == None:
        #Retrieve files within specified range
        for i in range(ev.objstart, ev.objend):
            ev.obj_list.append(ev.loc_sci + ev.filebase + str(i).zfill(4) +
                               ".fits")
    elif ev.objfile == 'all':
        #Retrieve all files from science directory
        for fname in os.listdir(ev.loc_sci):
            ev.obj_list.append(ev.loc_sci + '/' + fname)
        ev.obj_list = sn.sort_nicely(ev.obj_list)
    else:
        #Retrieve filenames from list
        files = np.genfromtxt(ev.objfile, dtype=str, comments='#')
        for fname in files:
            ev.obj_list.append(ev.loc_sci + '/' + fname)
        # handle = open(ev.objfile)
        # for line in handle:
        #     print(line)
        #     ev.obj_list.append(ev.loc_sci + line)
        # handle.close()
    ev.n_files = len(ev.obj_list)

    #Determine image size and filter/grism
    hdulist = pf.open(ev.obj_list[0].rstrip())
    nx = hdulist['SCI', 1].header['NAXIS1']
    ny = hdulist['SCI', 1].header['NAXIS2']
    ev.grism = hdulist[0].header['FILTER']
    ev.detector = hdulist[0].header['DETECTOR']
    ev.flatoffset = [[
        -1 * hdulist['SCI', 1].header['LTV2'],
        -1 * hdulist['SCI', 1].header['LTV1']
    ]]
    n_reads = hdulist['SCI', 1].header['SAMPNUM']
    hdulist.close()

    # Record JD and exposure times
    print('Reading data & headers, recording JD and exposure times...')
    ywindow = ev.ywindow[0]
    xwindow = ev.xwindow[0]
    subny = ywindow[1] - ywindow[0]
    subnx = xwindow[1] - xwindow[0]
    subdata = np.zeros((ev.n_files, n_reads, subny, subnx))
    suberr = np.zeros((ev.n_files, n_reads, subny, subnx))
    data_mhdr = []
    data_hdr = []
    ev.jd = np.zeros(ev.n_files)
    ev.exptime = np.zeros(ev.n_files)
    for m in range(ev.n_files):
        data, err, hdr, mhdr = hst.read(ev.obj_list[m].rstrip())
        subdata[m] = data[0, :, ywindow[0]:ywindow[1], xwindow[0]:xwindow[1]]
        suberr[m] = err[0, :, ywindow[0]:ywindow[1], xwindow[0]:xwindow[1]]
        data_mhdr.append(mhdr[0])
        data_hdr.append(hdr[0])
        ev.jd[m] = 2400000.5 + 0.5 * (data_mhdr[m]['EXPSTART'] +
                                      data_mhdr[m]['EXPEND'])
        ev.exptime[m] = data_mhdr[m]['EXPTIME']

    # Assign scan direction
    ev.scandir = np.zeros(ev.n_files, dtype=int)
    ev.n_scan0 = 0
    ev.n_scan1 = 0
    try:
        scan0 = data_mhdr[0]['POSTARG2']
        scan1 = data_mhdr[1]['POSTARG2']
        for m in range(ev.n_files):
            if data_mhdr[m]['POSTARG2'] == scan0:
                ev.n_scan0 += 1
            elif data_mhdr[m]['POSTARG2'] == scan1:
                ev.scandir[m] = 1
                ev.n_scan1 += 1
            else:
                print('WARNING: Unknown scan direction for file ' + str(m) +
                      '.')
        print("# of files in scan direction 0: " + str(ev.n_scan0))
        print("# of files in scan direction 1: " + str(ev.n_scan1))
    except:
        ev.n_scan0 = ev.n_files
        print("Unable to determine scan direction, assuming unidirectional.")

    # Group frames into frame, batch, and orbit number
    ev.framenum, ev.batchnum, ev.orbitnum = hst.groupFrames(ev.jd)

    # Determine read noise and gain
    ev.readNoise = np.mean(
        (data_mhdr[0]['READNSEA'], data_mhdr[0]['READNSEB'],
         data_mhdr[0]['READNSEC'], data_mhdr[0]['READNSED']))
    print('Read noise: ' + str(ev.readNoise))
    print('Gain: ' + str(ev.gain))
    #ev.v0 = (ev.readNoise/ev.gain)**2     #Units of ADU
    ev.v0 = ev.readNoise**2  #Units of electrons

    # Calculate centroid of direct image(s)
    ev.img_list = []
    if isinstance(ev.directfile, str) and ev.directfile.endswith('.fits'):
        ev.img_list.append(ev.loc_cal + ev.directfile)
    else:
        #Retrieve filenames from list
        handle = open(ev.directfile)
        for line in handle:
            ev.img_list.append(ev.loc_cal + line)
        handle.close()
    ev.n_img = len(ev.img_list)
    ev.centroid, ev.directim = hst.imageCentroid(ev.img_list, ev.centroidguess,
                                                 ev.centroidtrim, ny,
                                                 ev.obj_list[0])
    """
    # Calculate theoretical centroids along spatial scan direction
    ev.centroids = []
    for j in range(ev.n_img):
        ev.centroids.append([])
        for i in range(ev.n_spec):
            # Can assume that scan direction is only in y direction (no x component)
            # because we will apply drift correction to make it so
            ev.centroids[j].append([np.zeros(subny)+ev.centroid[j][0],ev.centroid[j][1]])

    # Calculate trace
    print("Calculating 2D trace and wavelength assuming " + ev.grism + " filter/grism...")
    ev.xrange   = []
    for i in range(ev.n_spec):
        ev.xrange.append(np.arange(ev.xwindow[i][0],ev.xwindow[i][1]))
    ev.trace2d  = []
    ev.wave2d   = []
    for j in range(ev.n_img):
        ev.trace2d.append([])
        ev.wave2d.append([])
        for i in range(ev.n_spec):
            ev.trace2d[j].append(hst.calcTrace(ev.xrange[i], ev.centroids[j][i], ev.grism))
            ev.wave2d[j].append(hst.calibrateLambda(ev.xrange[i], ev.centroids[j][i], ev.grism)/1e4)     #wavelength in microns

    if ev.detector == 'IR':
        print("Calculating slit shift values using last frame...")
        i = 0   #Use first spectrum
        j = -1  #Use last image
        #spectrum    = subdata[j]
        spectrum    = pf.getdata(ev.obj_list[j])
        ev.slitshift, ev.shift_values, ev.yfit = hst.calc_slitshift2(spectrum, ev.xrange[i], ev.ywindow[i], ev.xwindow[i])
        ev.wavegrid  = ev.wave2d
        ev.wave = []
        for j in range(ev.n_img):
            ev.wave.append([])
            for i in range(ev.n_spec):
                ev.wave[j].append(np.mean(ev.wavegrid[j][i],axis=0))
    else:
        # Assume no slitshift for UVIS
        ev.yfit         = range(ev.ywindow[0][1] - ev.ywindow[0][0])
        ev.slitshift    = np.zeros(ev.ywindow[0][1] - ev.ywindow[0][0])
        ev.shift_values = np.zeros(len(ev.yfit))

    # Make list of master flat field frames
    subflat     = np.ones((ev.n_img,ev.n_spec,subny,subnx))
    flatmask    = np.ones((ev.n_img,ev.n_spec,subny,subnx))
    if ev.flatfile == None:
        print('No flat frames found.')
        flat_hdr    = None
        flat_mhdr   = None
    else:
        print('Loading flat frames...')
        for j in range(ev.n_img):
            tempflat, tempmask = hst.makeflats(ev.flatfile, ev.wavegrid[j], ev.xwindow, ev.ywindow, ev.flatoffset, ev.n_spec, ny, nx, sigma=ev.flatsigma)
            for i in range(ev.n_spec):
                subflat[j][i]   = tempflat[i][ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
                flatmask[j][i]  = tempmask[i][ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]

    # Manually mask regions [specnum, colstart, colend]
    if hasattr(ev, 'manmask'):
        print("\rMasking manually identified bad pixels.")
        for j in range(ev.n_img):
            for i in range(len(ev.manmask)):
                ind, colstart, colend, rowstart, rowend = ev.manmask[i]
                n = ind % ev.n_spec
                flatmask[j][n][rowstart:rowend,colstart:colend] = 0 #ev.window[:,ind][0]:ev.window[:,ind][1]

    # Calculate reduced image
    for m in range(ev.n_files):
        #Select appropriate flat, mask, and slitshift
        if ev.n_img == (np.max(ev.orbitnum)+1):
            j = int(ev.orbitnum[m])
        else:
            j = 0
        for n in range(n_reads):
            subdata[m][n] /= subflat[j][0]

    """
    # Read in drift2D from previous iteration
    # np.save("drift2D.npy",ev.drift2D)
    #try:
    #    drift2D = np.load("drift2D.npy")
    #except:
    #    print("drift2D.npy not found.")
    drift2D = np.zeros((ev.n_files, n_reads - 1, 2))
    # Calculate centroids for each grism frame
    ev.centroids = np.zeros((ev.n_files, n_reads - 1, 2))
    for m in range(ev.n_files):
        for n in range(n_reads - 1):
            ev.centroids[m, n] = np.array([
                ev.centroid[0][0] + drift2D[m, n, 0],
                ev.centroid[0][1] + drift2D[m, n, 1]
            ])
            #ev.centroids[m,n] = np.array([np.zeros(subny)+ev.centroid[0][0]+drift2D[m,n,0],
            #                              np.zeros(subnx)+ev.centroid[0][1]+drift2D[m,n,1]])

    # Calculate trace
    print("Calculating 2D trace and wavelength assuming " + ev.grism +
          " filter/grism...")
    ev.xrange = np.arange(ev.xwindow[0][0], ev.xwindow[0][1])
    trace2d = np.zeros((ev.n_files, n_reads - 1, subny, subnx))
    wave2d = np.zeros((ev.n_files, n_reads - 1, subny, subnx))
    for m in range(ev.n_files):
        for n in range(n_reads - 1):
            trace2d[m, n] = hst.calcTrace(ev.xrange, ev.centroids[m, n],
                                          ev.grism)
            wave2d[m, n] = hst.calibrateLambda(
                ev.xrange, ev.centroids[m, n],
                ev.grism) / 1e4  #wavelength in microns

    # Assume no slitshift
    ev.yfit = range(ev.ywindow[0][1] - ev.ywindow[0][0])
    ev.slitshift = np.zeros(ev.ywindow[0][1] - ev.ywindow[0][0])
    ev.shift_values = np.zeros(len(ev.yfit))
    ev.wave = np.mean(wave2d, axis=2)
    print("Wavelength Range: %.3f - %.3f" % (np.min(ev.wave), np.max(ev.wave)))
    #iwmax       = np.where(ev.wave[0][0]>1.65)[0][0]
    #print(ev.wave[0,0])
    #print(ev.wave[0,1])
    #print(ev.centroids)

    # Make list of master flat field frames
    subflat = np.ones((ev.n_files, subny, subnx))
    flatmask = np.ones((ev.n_files, subny, subnx))
    if ev.flatfile == None:
        print('No flat frames found.')
        flat_hdr = None
        flat_mhdr = None
    else:
        print('Loading flat frames...')
        print(ev.flatfile)
        for m in range(ev.n_files):
            tempflat, tempmask = hst.makeflats(ev.flatfile,
                                               [np.mean(wave2d[m], axis=0)],
                                               ev.xwindow,
                                               ev.ywindow,
                                               ev.flatoffset,
                                               ev.n_spec,
                                               ny,
                                               nx,
                                               sigma=ev.flatsigma)
            #tempflat    = [pf.getdata(ev.flatfile)]
            #tempmask    = [np.ones(tempflat[0].shape)]
            subflat[m] = tempflat[0][ywindow[0]:ywindow[1],
                                     xwindow[0]:xwindow[1]]
            flatmask[m] = tempmask[0][ywindow[0]:ywindow[1],
                                      xwindow[0]:xwindow[1]]

    # Manually mask regions [specnum, colstart, colend]
    if hasattr(ev, 'manmask'):
        print("\rMasking manually identified bad pixels.")
        for m in range(ev.n_files):
            for i in range(len(ev.manmask)):
                ind, colstart, colend, rowstart, rowend = ev.manmask[i]
                flatmask[m][rowstart:rowend, colstart:colend] = 0

    #FINDME: Change flat field
    #subflat[:,:,28] /= 1.015
    #subflat[:,:,50] /= 1.015
    #subflat[:,:,70] *= 1.01
    """
    plt.figure(2)
    plt.clf()
    plt.imshow(np.copy(subdata[10,-1]),origin='lower',aspect='auto',
                vmin=0,vmax=25000,cmap=plt.cm.RdYlBu_r)
    plt.ylim(65,95)
    plt.show()
    """
    # Calculate reduced image
    subdata /= subflat[:, np.newaxis]
    #subdata /= np.mean(subflat,axis=0)[np.newaxis,np.newaxis]
    """
    # FINDME
    # Perform self flat field calibration
    # drift2D_int  = np.round(edrift2D,0)
    # Identify frames outside SAA
    iNoSAA      = np.where(np.round(drift2D[:,0,0],0)==0)[0]
    # Select subregion with lots of photons
    normdata    = np.copy(subdata[iNoSAA,-1,69:91,15:147])
    normmask    = flatmask[iNoSAA,69:91,15:147]
    normdata[np.where(normmask==0)] = 0
    # Normalize flux in each row to remove ramp/transit/variable scan rate
    normdata   /= np.sum(normdata,axis=2)[:,:,np.newaxis]
    # Divide by mean spectrum to remove wavelength dependence
    normdata   /= np.mean(normdata,axis=(0,1))[np.newaxis,np.newaxis,:]
    # Average frames to get flat-field correction
    flat_norm   = np.mean(normdata,axis=0)
    flat_norm[np.where(np.mean(normmask,axis=0)<1)] = 1
    '''
    normdata   /= np.mean(normdata,axis=(1,2))[:,np.newaxis,np.newaxis]
    flat_window = np.median(normdata,axis=0)
    medflat     = np.median(flat_window, axis=0)
    flat_window /= medflat
    flat_window /= np.median(flat_window,axis=1)[:,np.newaxis]
    flat_norm   = flat_window/np.mean(flat_window)
    '''
    plt.figure(3)
    plt.clf()
    plt.imshow(np.copy(subdata[10,-1]),origin='lower',aspect='auto',
                vmin=0,vmax=25000,cmap=plt.cm.RdYlBu_r)
    plt.ylim(65,95)

    ff      = np.load('ff.npy')
    subff   = ff[ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]

    #subdata[:,:,69:91,15:147] /= flat_norm
    subdata /= subff

    plt.figure(4)
    plt.clf()
    plt.imshow(subdata[10,-1],origin='lower',aspect='auto',vmin=0,vmax=25000,cmap=plt.cm.RdYlBu_r)
    plt.ylim(65,95)

    plt.figure(1)
    plt.clf()
    plt.imshow(flat_norm,origin='lower',aspect='auto')
    plt.colorbar()
    plt.tight_layout()
    plt.pause(0.1)

    ev.flat_norm = flat_norm
    return ev
    """
    """
    if isplots:
        # Plot normalized flat fields
        plt.figure(1000, figsize=(12,8))
        plt.clf()
        plt.suptitle('Master Flat Frames')
        for i in range(ev.n_spec):
            for j in range(ev.n_img):
                #plt.subplot(ev.n_spec,ev.n_img,i*ev.n_img+j+1)
                plt.subplot(2,np.ceil(ev.n_img/2.),i*ev.n_img+j+1)
                plt.title(str(j) +','+ str(i))
                plt.imshow(subflat[j][i], origin='lower')
        plt.tight_layout()
        plt.savefig(ev.eventdir + '/figs/fig1000-Flats.png')
        # Plot masks
        plt.figure(1001, figsize=(12,8))
        plt.clf()
        plt.suptitle('Mask Frames')
        for i in range(ev.n_spec):
            for j in range(ev.n_img):
                #plt.subplot(ev.n_spec,ev.n_img,i*ev.n_img+j+1)
                plt.subplot(2,np.ceil(ev.n_img/2.),i*ev.n_img+j+1)
                plt.title(str(j) +','+ str(i))
                plt.imshow(flatmask[j][i], origin='lower')
        plt.tight_layout()
        plt.savefig(ev.eventdir + '/figs/fig1001-Masks.png')
        if ev.detector == 'IR':
            # Plot Slit shift
            plt.figure(1004, figsize=(12,8))
            plt.clf()
            plt.suptitle('Model Slit Tilts/Shifts')
            plt.plot(ev.shift_values, ev.yfit, '.')
            plt.plot(ev.slitshift, range(ev.ywindow[0][0],ev.ywindow[0][1]), 'r-', lw=2)
            plt.xlim(-1,1)
            plt.savefig(ev.eventdir + '/figs/fig1004-SlitTilt.png')
        plt.pause(0.1)
    """

    ev.ra = data_mhdr[0]['RA_TARG'] * np.pi / 180
    ev.dec = data_mhdr[0]['DEC_TARG'] * np.pi / 180
    if ev.horizonsfile != None:
        # Apply light-time correction, convert to BJD_TDB
        # Horizons file created for HST around time of observations
        print("Converting times to BJD_TDB...")
        ev.bjd_corr = suntimecorr.suntimecorr(ev.ra, ev.dec, ev.jd,
                                              ev.horizonsfile)
        bjdutc = ev.jd + ev.bjd_corr / 86400.
        ev.bjdtdb = utc_tt.utc_tt(bjdutc, ev.leapdir)
        print('BJD_corr range: ' + str(ev.bjd_corr[0]) + ', ' +
              str(ev.bjd_corr[-1]))
    else:
        print("No Horizons file found.")
        ev.bjdtdb = ev.jd

    if n_reads > 1:
        ev.n_reads = n_reads
        # Subtract pairs of subframes
        diffdata = np.zeros((ev.n_files, ev.n_reads - 1, subny, subnx))
        differr = np.zeros((ev.n_files, ev.n_reads - 1, subny, subnx))
        for m in range(ev.n_files):
            for n in range(n_reads - 1):
                #diffmask[m,n] = np.copy(flatmask[j][0])
                #diffmask[m,n][np.where(suberr[m,n  ] > diffthresh*np.std(suberr[m,n  ]))] = 0
                #diffmask[m,n][np.where(suberr[m,n+1] > diffthresh*np.std(suberr[m,n+1]))] = 0
                diffdata[m, n] = subdata[m, n + 1] - subdata[m, n]
                differr[m, n] = np.sqrt(suberr[m, n + 1]**2 + suberr[m, n]**2)
    else:
        # FLT data has already been differenced
        # FLT files subtract first from last, 2 reads
        ev.n_reads = 2
        diffdata = subdata
        differr = suberr

    diffmask = np.zeros((ev.n_files, ev.n_reads - 1, subny, subnx))
    guess = np.zeros((ev.n_files, ev.n_reads - 1), dtype=int)
    for m in range(ev.n_files):
        #Select appropriate mask
        #if ev.n_img == (np.max(ev.orbitnum)+1):
        #    j = int(ev.orbitnum[m])
        #else:
        #    j = 0
        for n in range(n_reads - 1):
            diffmask[m, n] = np.copy(flatmask[m][0])
            try:
                diffmask[m, n][np.where(
                    differr[m, n] > ev.diffthresh *
                    np.median(differr[m, n], axis=1)[:, np.newaxis])] = 0
                #diffdata[m,n] *= diffmask[m,n]
            except:
                # May fail for FLT files
                print("Diffthresh failed.")

            foo = diffdata[m, n] * diffmask[m, n]
            guess[m,
                  n] = np.median(np.where(foo > np.mean(foo))[0]).astype(int)
        # Guess may be skewed if first file is zeros
        if guess[m, 0] < 0 or guess[m, 0] > subny:
            guess[m, 0] = guess[m, 1]

    # Compute full scan length
    ev.scanHeight = np.zeros(ev.n_files)
    for m in range(ev.n_files):
        scannedData = np.sum(subdata[m, -1], axis=1)
        xmin = np.min(guess[m])
        xmax = np.max(guess[m])
        scannedData /= np.median(scannedData[xmin:xmax + 1])
        scannedData -= 0.5
        #leftEdge    = np.where(scannedData > 0)/2)[0][0]
        #rightEdge   = np.where(scannedData > 0)/2)[0][-1]
        #yrng        = range(leftEdge-5, leftEdge+5, 1)
        yrng = range(subny)
        spline = spi.UnivariateSpline(yrng, scannedData[yrng], k=3, s=0)
        roots = spline.roots()
        try:
            ev.scanHeight[m] = roots[1] - roots[0]
        except:
            pass

    #Outlier rejection of sky background along time axis
    print("Performing background outlier rejection...")
    import sigrej, optspex
    for p in range(2):
        iscan = np.where(ev.scandir == p)[0]
        if len(iscan) > 0:
            for n in range(ev.n_reads - 1):
                # Set limits on the sky background
                x1 = (guess[iscan, n].min() - ev.fitbghw).astype(int)
                x2 = (guess[iscan, n].max() + ev.fitbghw).astype(int)
                bgdata1 = diffdata[iscan, n, :x1]
                bgmask1 = diffmask[iscan, n, :x1]
                bgdata2 = diffdata[iscan, n, x2:]
                bgmask2 = diffmask[iscan, n, x2:]
                bgerr1 = np.median(suberr[iscan, n, :x1])
                bgerr2 = np.median(suberr[iscan, n, x2:])
                estsig1 = [bgerr1 for j in range(len(ev.sigthresh))]
                estsig2 = [bgerr2 for j in range(len(ev.sigthresh))]
                diffmask[iscan,
                         n, :x1] = sigrej.sigrej(bgdata1, ev.sigthresh,
                                                 bgmask1, estsig1)
                diffmask[iscan, n,
                         x2:] = sigrej.sigrej(bgdata2, ev.sigthresh, bgmask2,
                                              estsig2)

    # Write background
    #global bg, diffmask
    def writeBG(arg):
        background, mask, m, n = arg
        bg[m, n] = background
        diffmask[m, n] = mask
        return

    # STEP 3: Fit sky background with out-of-spectra data
    # FINDME: parallelrize bg subtraction
    print("Performing background subtraction...")
    x1 = np.zeros((ev.n_files, ev.n_reads - 1), dtype=int)
    x2 = np.zeros((ev.n_files, ev.n_reads - 1), dtype=int)
    bg = np.zeros((diffdata.shape))
    if ev.ncpu == 1:
        # Only 1 CPU
        for m in range(ev.n_files):
            for n in range(ev.n_reads - 1):
                x1[m, n] = (guess[m, n] - ev.fitbghw).astype(int)
                x2[m, n] = (guess[m, n] + ev.fitbghw).astype(int)
                writeBG(
                    hst.fitbg(diffdata[m, n], diffmask[m, n], x1[m, n],
                              x2[m, n], ev.bgdeg, ev.p3thresh, isplots, m, n,
                              ev.n_files))
    else:
        # Multiple CPUs
        pool = mp.Pool(ev.ncpu)
        for m in range(ev.n_files):
            for n in range(ev.n_reads - 1):
                x1[m, n] = (guess[m, n] - ev.fitbghw).astype(int)
                x2[m, n] = (guess[m, n] + ev.fitbghw).astype(int)
                res = pool.apply_async(hst.fitbg,
                                       args=(diffdata[m,
                                                      n], diffmask[m,
                                                                   n], x1[m,
                                                                          n],
                                             x2[m, n], ev.bgdeg, ev.p3thresh,
                                             isplots, m, n, ev.n_files),
                                       callback=writeBG)
        pool.close()
        pool.join()
        res.wait()
    print(" Done.")

    # STEP 2: Calculate variance
    bgerr = np.std(bg, axis=2) / np.sqrt(np.sum(diffmask, axis=2))
    bgerr[np.where(np.isnan(bgerr))] = 0.
    ev.v0 += np.mean(bgerr**2)
    variance = abs(diffdata) / ev.gain + ev.v0
    #variance    = abs(subdata*submask) / gain + v0
    # Perform background subtraction
    diffdata -= bg

    #
    '''
    foo = np.sum(diffdata*diffmask, axis=2)
    guess = []
    for i in range(nreads-1):
        guess.append(np.median(np.where(foo[i] > np.mean(foo[i]))[0]).astype(int))
    guess   = np.array(guess)
    # Guess may be skewed if first file is zeros
    if guess[0] < 0 or guess[0] > subnx:
        guess[0] = guess[1]
    '''

    # Write drift2D
    def writeDrift2D(arg):
        drift2D, m, n = arg
        # Assign to array of spectra and uncertainties
        ev.drift2D[m, n] = drift2D
        return

    '''
    # Calulate drift2D
    def calcDrift2D():#im1, im2, m, n):
        print("test")
        drift2D = imr.chi2_shift(im1, im2, boundary='constant', nthreads=4,
                                 zeromean=False, return_error=False)
        return (drift2D, m, n)
    '''
    print("Calculating 2D drift...")
    #FINDME: instead of calculating scanHeight, consider fitting stretch factor
    ev.drift2D = np.zeros((ev.n_files, ev.n_reads - 1, 2))
    if ev.ncpu == 1:
        # Only 1 CPU
        for m in range(ev.n_files):
            p = int(ev.scandir[m])
            for n in range(ev.n_reads - 1):
                writeDrift2D(
                    hst.calcDrift2D(
                        diffdata[ev.iref[p], n] * diffmask[ev.iref[p], n],
                        diffdata[m, n] * diffmask[m, n], m, n, ev.n_files))
    else:
        # Multiple CPUs
        pool = mp.Pool(ev.ncpu)
        for m in range(ev.n_files):
            p = int(ev.scandir[m])
            for n in range(ev.n_reads - 1):
                #res = pool.apply_async(hst.calcDrift2D)
                res = pool.apply_async(
                    hst.calcDrift2D,
                    args=(diffdata[ev.iref[p], n] * diffmask[ev.iref[p], n],
                          diffdata[m, n] * diffmask[m, n], m, n, ev.n_files),
                    callback=writeDrift2D)
        pool.close()
        pool.join()
        res.wait()
    print(" Done.")
    #np.save("drift2D.npy",ev.drift2D)

    #global shiftdata, shiftmask
    print("Performing rough, pixel-scale drift correction...")
    import scipy.ndimage.interpolation as spni
    ev.drift2D_int = np.round(ev.drift2D, 0)
    shiftdata = np.zeros(diffdata.shape)
    shiftmask = np.zeros(diffmask.shape)
    shiftvar = np.zeros(diffdata.shape)
    shiftbg = np.zeros(diffdata.shape)
    # Correct for drift by integer pixel numbers, no interpolation
    for m in range(ev.n_files):
        for n in range(ev.n_reads - 1):
            shiftdata[m, n] = spni.shift(diffdata[m, n],
                                         -1 * ev.drift2D_int[m, n, ::-1],
                                         order=0,
                                         mode='constant',
                                         cval=0)
            shiftmask[m, n] = spni.shift(diffmask[m, n],
                                         -1 * ev.drift2D_int[m, n, ::-1],
                                         order=0,
                                         mode='constant',
                                         cval=0)
            shiftvar[m, n] = spni.shift(variance[m, n],
                                        -1 * ev.drift2D_int[m, n, ::-1],
                                        order=0,
                                        mode='constant',
                                        cval=0)
            shiftbg[m, n] = spni.shift(bg[m, n],
                                       -1 * ev.drift2D_int[m, n, ::-1],
                                       order=0,
                                       mode='constant',
                                       cval=0)
            """
            # spni.shift does not handle constant boundaries correctly
            if ev.drift2D_int[m,n,0] > 0:
                shiftdata[m,n,:,-1*ev.drift2D_int[m,n,0]:] = 0
                shiftmask[m,n,:,-1*ev.drift2D_int[m,n,0]:] = 0
                shiftvar [m,n,:,-1*ev.drift2D_int[m,n,0]:] = 0
                shiftbg  [m,n,:,-1*ev.drift2D_int[m,n,0]:] = 0
            elif ev.drift2D_int[m,n,0] < 0:
                #print(m,n,-1*ev.drift2D_int[m,n,0])
                shiftdata[m,n,:,:-1*ev.drift2D_int[m,n,0]] = 0
                shiftmask[m,n,:,:-1*ev.drift2D_int[m,n,0]] = 0
                shiftvar [m,n,:,:-1*ev.drift2D_int[m,n,0]] = 0
                shiftbg  [m,n,:,:-1*ev.drift2D_int[m,n,0]] = 0
            """

    # Outlier rejection of full frame along time axis
    print("Performing full-frame outlier rejection...")
    for p in range(2):
        iscan = np.where(ev.scandir == p)[0]
        if len(iscan) > 0:
            for n in range(ev.n_reads - 1):
                #y1  = guess[ev.iref,n] - ev.spec_width
                #y2  = guess[ev.iref,n] + ev.spec_width
                #estsig      = [differr[ev.iref,n,y1:y2] for j in range(len(ev.sigthresh))]
                shiftmask[iscan,
                          n] = sigrej.sigrej(shiftdata[iscan, n], ev.sigthresh,
                                             shiftmask[iscan, n])  #, estsig)
    """
    # Replace bad pixels using 2D Gaussian kernal along x and time axes
    def writeReplacePixels(arg):
        shift, m, n, i, j   = arg
        shiftdata[m,n,i,j]  = shift
        return

    #import smoothing
    #reload(smoothing)
    ny, nx, sy, sx = (2,2,1,1)
    wherebad    = np.array(np.where(shiftmask==0)).T
    #smdata      = np.copy(shiftdata)
    print("Replacing " + str(len(wherebad)) + " bad pixels...")
    k       = 0
    ktot    = len(wherebad)
    #FINDME: multiple CPUs is inefficient
    if ev.ncpu >= 1:
        # Only 1 CPU
        for m,n,i,j in wherebad:
            #sys.stdout.write('\r'+str(k+1)+'/'+str(len(wherebad)))
            #sys.stdout.flush()
            writeReplacePixels(hst.replacePixels(shiftdata[:,n,:,j], shiftmask[:,n,:,j], m, n, i, j, k, ktot, ny, nx, sy, sx))
            #Pad image initially with zeros
            #newim = np.zeros(np.array(shiftdata[:,n,:,j].shape) + 2*np.array((ny, nx)))
            #newim[ny:-ny, nx:-nx] = shiftdata[:,n,:,j]
            #Calculate kernel
            #gk = smoothing.gauss_kernel_mask2((ny,nx), (sy,sx), (m,i), shiftmask[:,n,:,j])
            #shiftdata[m,n,i,j] = np.sum(gk * newim[m:m+2*ny+1, i:i+2*nx+1])
            k += 1
    else:
        # Multiple CPUs
        pool = mp.Pool(ev.ncpu)
        for m,n,i,j in wherebad:
            res = pool.apply_async(hst.replacePixels, args=(shiftdata[:,n,:,j], shiftmask[:,n,:,j], m, n, i, j, k, ktot, ny, nx, sy, sx), callback=writeReplacePixels)
            k += 1
        pool.close()
        pool.join()
        res.wait()
    print(" Done.")
    """
    if isplots >= 3:
        for m in range(ev.n_files):
            for n in range(ev.n_reads - 1):
                plt.figure(1010)
                plt.clf()
                plt.suptitle(str(m) + "," + str(n))
                plt.subplot(211)
                plt.imshow(shiftdata[m, n] * shiftmask[m, n],
                           origin='lower',
                           aspect='auto',
                           vmin=0,
                           vmax=500)
                plt.subplot(212)
                #plt.imshow(submask[i], origin='lower', aspect='auto', vmax=1)
                mean = np.median(shiftbg[m, n])
                std = np.std(shiftbg[m, n])
                plt.imshow(shiftbg[m, n],
                           origin='lower',
                           aspect='auto',
                           vmin=mean - 3 * std,
                           vmax=mean + 3 * std)
                plt.savefig(ev.eventdir + '/figs/fig1010-' + str(m) + '-' +
                            str(n) + '-Image+Background.png')
                #plt.pause(0.1)
    """
    apdata  = np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx))
    apmask  = np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx))
    apvar   = np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx))
    apbg    = np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx))
    for n in range(ev.n_reads-1):
        y1  = guess[ev.iref,n] - ev.spec_width
        y2  = guess[ev.iref,n] + ev.spec_width
        apdata[:,n] = shiftdata[:,n,y1:y2]
        apmask[:,n] = shiftmask[:,n,y1:y2]
        apvar [:,n] = shiftvar [:,n,y1:y2]
        apbg  [:,n] = shiftbg  [:,n,y1:y2]
    """
    print("Performing sub-pixel drift correction...")
    istart = 0
    #corrdata    = np.zeros(diffdata.shape)
    #corrmask    = np.zeros(diffdata.shape)
    # Select aperture data
    apdata = np.zeros((ev.n_files, ev.n_reads - 1, ev.spec_width * 2, subnx))
    apmask = np.zeros((ev.n_files, ev.n_reads - 1, ev.spec_width * 2, subnx))
    apvar = np.zeros((ev.n_files, ev.n_reads - 1, ev.spec_width * 2, subnx))
    apbg = np.zeros((ev.n_files, ev.n_reads - 1, ev.spec_width * 2, subnx))
    iy, ix = np.indices((ev.spec_width * 2, subnx))
    iyy, ixx = np.indices((subny, subnx))
    #FINDME: should be using (3,3)
    kx, ky = (1, 1)
    # Correct for drift
    for n in range(ev.n_reads - 1):
        #FINDME: change below to handle case of single scan direction
        y1 = [
            guess[ev.iref[0], n] - ev.spec_width,
            guess[ev.iref[1], n] - ev.spec_width
        ]
        #y2  = guess[ev.iref,n] + ev.spec_width
        for m in range(ev.n_files):
            p = int(ev.scandir[m])
            # Data
            spline = spi.RectBivariateSpline(range(subny),
                                             range(subnx),
                                             shiftdata[m, n],
                                             kx=kx,
                                             ky=ky,
                                             s=0)
            apdata[m,n] = (spline.ev((iy+y1[p]+ev.drift2D[m,n,1]-ev.drift2D_int[m,n,1]).flatten(),
                          (ix+ev.drift2D[m,n,0]-ev.drift2D_int[m,n,0]).flatten())).reshape \
                          (ev.spec_width*2,subnx)
            #spni.shift works identically to spi.RectBivariateSpline
            #set prefilter=False and order=3 to enable spline filtering (smoothing)
            #apdata[m,n] = spni.shift(shiftdata[m,n], ev.drift2D_int[m,n,::-1]-ev.drift2D[m,n,::-1], order=1,
            #                         mode='constant', cval=0, prefilter=True)[y1:y2]
            # Mask
            spline = spi.RectBivariateSpline(range(subny),
                                             range(subnx),
                                             shiftmask[m, n],
                                             kx=kx,
                                             ky=ky,
                                             s=0)
            apmask[m,n] = np.round((spline.ev((iy+y1[p]+ev.drift2D[m,n,1]-ev.drift2D_int[m,n,1]).flatten(),
                          (ix+ev.drift2D[m,n,0]-ev.drift2D_int[m,n,0]).flatten())).reshape \
                          (ev.spec_width*2,subnx),0).astype(int)
            # Variance
            spline = spi.RectBivariateSpline(range(subny),
                                             range(subnx),
                                             shiftvar[m, n],
                                             kx=kx,
                                             ky=ky,
                                             s=0)
            apvar[m,n] = (spline.ev((iy+y1[p]+ev.drift2D[m,n,1]-ev.drift2D_int[m,n,1]).flatten(),
                         (ix+ev.drift2D[m,n,0]-ev.drift2D_int[m,n,0]).flatten())).reshape \
                         (ev.spec_width*2,subnx)
            # Background
            spline = spi.RectBivariateSpline(range(subny),
                                             range(subnx),
                                             shiftbg[m, n],
                                             kx=kx,
                                             ky=ky,
                                             s=0)
            apbg[m,n] = (spline.ev((iy+y1[p]+ev.drift2D[m,n,1]-ev.drift2D_int[m,n,1]).flatten(),
                        (ix+ev.drift2D[m,n,0]-ev.drift2D_int[m,n,0]).flatten())).reshape \
                        (ev.spec_width*2,subnx)
    """
    #Outlier rejection of aperture along time axis
    print("Performing aperture outlier rejection...")
    for n in range(ev.n_reads-1):
        #y1  = guess[ev.iref,n] - ev.spec_width
        #y2  = guess[ev.iref,n] + ev.spec_width
        #estsig      = [differr[ev.iref,n,y1:y2] for j in range(len(ev.sigthresh))]
        apmask[:,n] = sigrej.sigrej(apdata[:,n], ev.sigthresh, apmask[:,n])#, estsig)
    """
    # STEP 4: Extract standard spectrum and its variance
    #stdspec     = np.zeros((ev.n_files,subnx))
    #stdvar      = np.zeros((ev.n_files,subnx))
    #stdbg       = np.zeros((ev.n_files,subnx))
    #fracMaskReg = np.zeros(nreads-1)
    #for n in range(nreads-1):
    #stdspec     = np.sum((apdata*apmask), axis=2)
    #stdvar      = np.sum((apvar *apmask), axis=2)
    stdspec = np.sum(apdata, axis=2)
    stdvar = np.sum(apvar, axis=2)
    #stdbg       = np.sum((bg      *apmask), axis=2)
    # Compute fraction of masked pixels within regular spectral extraction window
    numpixels = 2. * ev.spec_width * subnx
    fracMaskReg = (numpixels - np.sum(apmask, axis=(2, 3))) / numpixels

    # Compute median frame
    ev.meddata = np.median(apdata, axis=0)

    # Extract optimal spectrum with uncertainties
    print("Performing optimal spectral extraction...")
    spectra = np.zeros((stdspec.shape))
    specerr = np.zeros((stdspec.shape))
    fracMaskOpt = np.zeros((ev.n_files, ev.n_reads - 1))
    #tempmask    = np.ones((ev.spec_width*2,subnx))
    for m in range(ev.n_files):
        sys.stdout.write('\r' + str(m + 1) + '/' + str(ev.n_files))
        sys.stdout.flush()
        for n in range(ev.n_reads - 1):
            #smoothspec  = smooth.medfilt(stdspec[i], window_len)
            spectra[m, n], specerr[m, n], mask = optspex.optimize(
                apdata[m, n],
                apmask[m, n],
                apbg[m, n],
                stdspec[m, n],
                ev.gain,
                ev.v0,
                p5thresh=ev.p5thresh,
                p7thresh=ev.p7thresh,
                fittype=ev.fittype,
                window_len=ev.window_len,
                deg=ev.deg,
                n=m,
                iread=n,
                isplots=isplots,
                eventdir=ev.eventdir,
                meddata=ev.meddata[n])
            # Compute fraction of masked pixels within optimal spectral extraction window
            numpixels = 1. * mask.size
            fracMaskOpt[m,
                        n] = (np.sum(apmask[m, n]) - np.sum(mask)) / numpixels
    print(" Done.")

    if isplots >= 3:
        for m in range(ev.n_files):
            for n in range(ev.n_reads - 1):
                plt.figure(1011)
                plt.clf()
                plt.suptitle(str(m) + "," + str(n))
                #plt.errorbar(ev.wave[m], stdspec, np.sqrt(stdvar), fmt='-')
                plt.errorbar(range(subnx),
                             stdspec[m, n],
                             np.sqrt(stdvar[m, n]),
                             fmt='b-')
                plt.errorbar(range(subnx),
                             spectra[m, n],
                             specerr[m, n],
                             fmt='g-')
                plt.savefig(ev.eventdir + '/figs/fig1011-' + str(m) + '-' +
                            str(n) + '-Spectrum.png')
                #plt.pause(0.1)

    # Calculate total time
    total = (time.time() - t0) / 60.
    print('\nTotal time (min): ' + str(np.round(total, 2)))

    ev.guess = guess

    # Save results
    print('Saving results...')
    aux.spectra = spectra
    aux.specerr = specerr
    #aux.specbg      = specbg
    aux.fracMaskReg = fracMaskReg
    aux.fracMaskOpt = fracMaskOpt
    aux.data_hdr = data_hdr
    aux.data_mhdr = data_mhdr
    aux.mask = mask
    #aux.trace2d     = trace2d
    #aux.wave2d      = wave2d
    #aux.bias_mhdr   = bias_mhdr
    aux.subflat = subflat
    me.saveevent(aux, ev.eventdir + '/d-' + ev.eventname + '-data')
    me.saveevent(ev, ev.eventdir + '/d-' + ev.eventname +
                 '-w2')  #, delete=['flat_mhdr'])

    if isplots:
        # 2D light curve without drift correction
        plt.figure(1012, figsize=(8, ev.n_files / 20. + 0.8))
        plt.clf()
        if ev.grism == 'G102':
            wmin = 0.82
            wmax = 1.22
        else:  #G141
            wmin = 1.125
            wmax = 1.65
        iwmin = np.where(ev.wave[0][0] > wmin)[0][0]
        iwmax = np.where(ev.wave[0][0] > wmax)[0][0]
        vmin = 0.97
        vmax = 1.03
        #FINDME
        normspec = np.zeros((ev.n_files, subnx))
        for p in range(2):
            iscan = np.where(ev.scandir == p)[0]
            if len(iscan) > 0:
                normspec[iscan] = np.mean(spectra[iscan],axis=1)/ \
                                  np.mean(spectra[iscan[ev.inormspec[0]:ev.inormspec[1]]],axis=(0,1))
        #normspec    = np.mean(spectra,axis=1)/np.mean(spectra[ev.inormspec[0]:ev.inormspec[1]],axis=(0,1))
        #normspec    = np.mean(spectra,axis=1)/np.mean(spectra[-6:],axis=(0,1))
        #normspec    = np.mean(ev.stdspec,axis=1)/np.mean(ev.stdspec[-6:],axis=(0,1))
        ediff = np.zeros(ev.n_files)
        for m in range(ev.n_files):
            ediff[m] = 1e6 * np.median(
                np.abs(np.ediff1d(normspec[m, iwmin:iwmax])))
            plt.scatter(ev.wave[0][0],
                        np.zeros(subnx) + m,
                        c=normspec[m],
                        s=14,
                        linewidths=0,
                        vmin=vmin,
                        vmax=vmax,
                        marker='s',
                        cmap=plt.cm.RdYlBu_r)
        plt.title("MAD = " + str(np.round(np.mean(ediff), 0).astype(int)) +
                  " ppm")
        plt.xlim(wmin, wmax)
        plt.ylim(0, ev.n_files)
        plt.ylabel('Frame Number')
        plt.xlabel('Wavelength ($\mu m$)')
        plt.colorbar()
        plt.tight_layout()
        plt.savefig(ev.eventdir + '/figs/fig1012-2D_LC.png')
        '''
        # Plot individual non-destructive reads
        vmin        = 0.97
        vmax        = 1.03
        iwmin       = np.where(ev.wave[0][0]>wmin)[0][0]
        iwmax       = np.where(ev.wave[0][0]>wmax)[0][0]
        normspec    = spectra[:,istart:]/np.mean(spectra[-6:,istart:],axis=0)
        for n in range(ev.n_reads-1):
            plt.figure(1100+n, figsize=(8,6.5))
            plt.clf()
            ediff       = np.zeros(ev.n_files)
            for m in range(ev.n_files):
                ediff[m]    = 1e6*np.median(np.abs(np.ediff1d(normspec[m,n,iwmin:iwmax])))
                plt.scatter(ev.wave[0][0], np.zeros(normspec.shape[-1])+m, c=normspec[m,n],
                            s=14,linewidths=0,vmin=vmin,vmax=vmax,marker='s',cmap=plt.cm.RdYlBu_r)
            plt.title("MAD = "+str(np.round(np.mean(ediff),0)) + " ppm")
            plt.xlim(wmin,wmax)
            plt.ylim(0,ev.n_files)
            plt.ylabel('Frame Number')
            plt.xlabel('Wavelength ($\mu m$)')
            plt.colorbar()
            plt.tight_layout()
            plt.savefig(ev.eventdir+'/figs/fig'+str(1100+n)+'-2D_LC.png')
        '''

    #FINDME
    ev.spectra = spectra
    ev.subflat = subflat
    ev.subdata = subdata
    ev.suberr = suberr
    ev.diffdata = diffdata
    ev.differr = differr
    ev.diffmask = diffmask
    ev.shiftdata = shiftdata
    ev.shiftmask = shiftmask
    ev.bg = bg
    ev.apdata = apdata
    ev.apmask = apmask
    ev.stdspec = stdspec
    '''
    #ev.mad2 = np.round(np.mean(ediff),0).astype(int)
    f = open('W2_MAD_'+ madVariable +'.txt','a+')
    f.write(str(madVarSet) + ',' + str(np.round(np.mean(ediff),0).astype(int)) + '\n')
    f.close()
    print('W2_MAD_'+ madVariable +'.txt saved\n')
    '''

    return ev
示例#3
0
def photometry(event, pcf, photdir, mute):
    tini = time.time()

    # Create photometry log
    logname = event.logname
    log = le.Logedit(photdir + "/" + logname, logname)
    log.writelog("\nStart " + photdir + " photometry: " + time.ctime())

    parentdir = os.getcwd() + "/"
    os.chdir(photdir)

    # copy photom.pcf in photdir
    pcf.make_file("photom.pcf")

    # Parse the attributes from the control file to the event:
    attrib = vars(pcf)
    keys = attrib.keys()
    for key in keys:
        setattr(event, key, attrib.get(key).get())

    maxnimpos, npos = event.maxnimpos, event.npos
    # allocating frame parameters:
    event.fp.aplev = np.zeros((npos, maxnimpos))  # aperture flux
    event.fp.aperr = np.zeros((npos, maxnimpos))  # aperture error
    event.fp.nappix = np.zeros((npos, maxnimpos))  # number of aperture  pixels
    event.fp.skylev = np.zeros((npos, maxnimpos))  # background sky flux level
    event.fp.skyerr = np.zeros((npos, maxnimpos))  # sky error
    event.fp.nskypix = np.zeros((npos, maxnimpos))  # number of sky pixels
    event.fp.nskyideal = np.zeros(
        (npos, maxnimpos))  # ideal number of sky pixels
    event.fp.status = np.zeros((npos, maxnimpos))  # apphot return status
    event.fp.good = np.zeros((npos, maxnimpos))  # good flag

    # Aperture photometry:
    if not event.dooptimal or event.from_aper == None:

        # Multy Process set up:
        # Shared memory arrays allow only 1D Arrays :(
        aplev = Array("d", np.zeros(npos * maxnimpos))
        aperr = Array("d", np.zeros(npos * maxnimpos))
        nappix = Array("d", np.zeros(npos * maxnimpos))
        skylev = Array("d", np.zeros(npos * maxnimpos))
        skyerr = Array("d", np.zeros(npos * maxnimpos))
        nskypix = Array("d", np.zeros(npos * maxnimpos))
        nskyideal = Array("d", np.zeros(npos * maxnimpos))
        status = Array("d", np.zeros(npos * maxnimpos))
        good = Array("d", np.zeros(npos * maxnimpos))
        # Size of chunk of data each core will process:
        chunksize = maxnimpos / event.ncores + 1

        print("Number of cores: " + str(event.ncores))
        # Start Muti Procecess:
        processes = []
        for nc in np.arange(event.ncores):
            start = nc * chunksize  # Starting index to process
            end = (nc + 1) * chunksize  # Ending   index to process
            proc = Process(target=do_aphot,
                           args=(start, end, event, log, mute, aplev, aperr,
                                 nappix, skylev, skyerr, nskypix, nskyideal,
                                 status, good))
            processes.append(proc)
            proc.start()

        # Make sure all processes finish their work:
        for nc in np.arange(event.ncores):
            processes[nc].join()

        # Put the results in the event. I need to reshape them:
        event.fp.aplev = np.asarray(aplev).reshape(npos, maxnimpos)
        event.fp.aperr = np.asarray(aperr).reshape(npos, maxnimpos)
        event.fp.nappix = np.asarray(nappix).reshape(npos, maxnimpos)
        event.fp.skylev = np.asarray(skylev).reshape(npos, maxnimpos)
        event.fp.skyerr = np.asarray(skyerr).reshape(npos, maxnimpos)
        event.fp.nskypix = np.asarray(nskypix).reshape(npos, maxnimpos)
        event.fp.nskyideal = np.asarray(nskyideal).reshape(npos, maxnimpos)
        event.fp.status = np.asarray(status).reshape(npos, maxnimpos)
        event.fp.good = np.asarray(good).reshape(npos, maxnimpos)

        # raw photometry (no sky subtraction):
        event.fp.apraw = (event.fp.aplev + (event.fp.skylev * event.fp.nappix))

        # Print results into the log if it wans't done before:
        for pos in np.arange(npos):
            for i in np.arange(event.nimpos[pos]):
                log.writelog(
                    '\nframe =%7d       ' % i + 'pos   =%5d       ' % pos +
                    'y =%7.3f       ' % event.fp.y[pos, i] +
                    'x =%7.3f' % event.fp.x[pos, i] + '\n' +
                    'aplev =%11.3f   ' % event.fp.aplev[pos, i] +
                    'aperr =%9.3f   ' % event.fp.aperr[pos, i] +
                    'nappix =%6.2f' % event.fp.nappix[pos, i] + '\n' +
                    'skylev=%11.3f   ' % event.fp.skylev[pos, i] +
                    'skyerr=%9.3f   ' % event.fp.skyerr[pos, i] +
                    'nskypix=%6.2f   ' % event.fp.nskypix[pos, i] +
                    'nskyideal=%6.2f' % event.fp.nskyideal[pos, i] + '\n' +
                    'status=%7d       ' % event.fp.status[pos, i] +
                    'good  =%5d' % event.fp.good[pos, i],
                    mute=True)

    elif event.from_aper != None:
        # Load previous aperture photometry if required for optimal:
        evt = me.loadevent(parentdir + event.from_aper + "/" +
                           event.eventname + "_pht")
        event.fp.aplev = evt.fp.aplev
        event.fp.aperr = evt.fp.aperr
        event.fp.nappix = evt.fp.nappix
        event.fp.skylev = evt.fp.skylev
        event.fp.skyerr = evt.fp.skyerr
        event.fp.nskypix = evt.fp.nskypix
        event.fp.nskyideal = evt.fp.nskyideal
        event.fp.status = evt.fp.status
        event.fp.good = evt.fp.good
        event.fp.apraw = evt.fp.apraw

    if event.dooptimal:
        ofp, psf = do.dooptphot(event.data,
                                event.uncd,
                                event.mask,
                                event.fp,
                                event.srcest,
                                event.nimpos,
                                rejlim=[10.45, 1000, 1.5],
                                order=1,
                                resize=event.oresize,
                                norm=1,
                                trim=event.otrim,
                                log=log)
        event.fp = ofp
        event.psf = psf

    elif event.ispsf:
        # PSF aperture correction:
        log.writelog('Calculating PSF aperture:')
        event.aperfrac,    event.psfnappix,    event.psfskylev, \
         event.psfnskypix, event.psfnskyideal, event.psfstatus  \
         = ap.apphot(event.psfim, event.psfctr,
                     event.photap * event.psfexpand,
                     event.skyin  * event.psfexpand,
                     event.skyout * event.psfexpand,
                     med    = event.skymed,
                     expand = event.apscale,
                     nappix  = True, skylev    = True,
                     nskypix = True, nskyideal = True,
                     status  = True)

        event.aperfrac += event.psfskylev * event.psfnappix

        event.fp.aplev /= event.aperfrac
        event.fp.aperr /= event.aperfrac

        log.writelog('Aperture contains %f of PSF.' % event.aperfrac)

    # For running pixel-level decorrelation (pld)
    if event.ispld and event.npos == 1:
        event.apdata = pld.pld_box(event.data, event.targpos, event.pldhw,
                                   event.fp.skylev)
        log.writelog(
            "Created " + str(event.pldhw * 2 + 1) + "x" +
            str(event.pldhw * 2 + 1) +
            " box around centroid for pixel-level decorrelation and normalized it in time."
        )
    elif event.ispld and event.npos != 1:
        log.writelog(
            "Could not perform pixel-level decorrelation because there is more than 1 nod position."
        )

    # save
    print("\nSaving ...")
    me.saveevent(event,
                 event.eventname + "_pht",
                 delete=['data', 'uncd', 'mask'])

    # Print time elapsed and close log:
    cwd = os.getcwd() + "/"
    log.writelog("Output files (" + event.photdir + "):")
    log.writelog("Data:")
    log.writelog(" " + cwd + event.eventname + "_pht.dat")
    log.writelog("Log:")
    log.writelog(" " + cwd + logname)

    dt = t.hms_time(time.time() - tini)
    log.writeclose("\nEnd Photometry. Time (h:m:s):  %s " % dt + "  (" +
                   photdir + ")")
    print("--------------  ------------\n")
示例#4
0
def centering(event, pcf, centerdir, owd):

    os.chdir(centerdir)

    tini = time.time()

    # Create centering log
    log = le.Logedit(event.logname, event.logname)
    log.writelog("\nStart " + centerdir + " centering: " + time.ctime())

    # Parse the attributes from the control file to the event:
    attrib = vars(pcf)
    keys = attrib.keys()
    for key in keys:
        setattr(event, key, attrib.get(key))

    # Check least asym parameters work:
    if event.method in ['lac', 'lag']:
        if event.ctrim < (event.cradius + event.csize) and event.ctrim != 0:
            event.ctrim = event.cradius + event.csize + 1
            log.writelog('Trim radius is too small, changed to: %i' %
                         event.ctrim)
        if event.psfctrim < (event.psfcrad +
                             event.psfcsize) and event.psfctrim != 0:
            event.psfctrim = event.psfcrad + event.psfcsize + 1
            log.writelog('PSF Trim radius is too small, changed to: %i' %
                         event.psfctrim)

    # Centering bad pixel mask:
    centermask = np.ones((event.ny, event.nx))
    if event.ymask is not None:
        ymask = np.asarray(event.ymask, int)
        xmask = np.asarray(event.xmask, int)
        for i in range(len(ymask)):
            centermask[ymask[i], xmask[i]] = 0

    # PSF:
    # Re-evaluate if a PSF has been redefined:
    if event.newpsf is not None:
        event.ispsf = os.path.isfile(event.newpsf)
        if event.ispsf:
            event.psffile = event.newpsf
            log.writelog('The PSF file has been redefined!')
            log.writelog("PSF:     " + event.psffile)

    # PSF Centering:
    if event.ispsf:
        event.psfim = fits.getdata(event.psffile)
        # Guess of the center of the PSF (center of psfim)
        psfctrguess = np.asarray(np.shape(event.psfim)) // 2
        # Do not find center of PSF:
        if event.nopsfctr:
            event.psfctr = psfctrguess
        # Find center of PSF:
        else:
            if event.method == "bpf" or event.method == "ipf":
                method = "fgc"
            else:
                method = event.method
            event.psfctr, extra = cd.centerdriver(method,
                                                  event.psfim,
                                                  psfctrguess,
                                                  event.psfctrim,
                                                  event.psfcrad,
                                                  event.psfcsize,
                                                  npskyrad=(event.npskyin,
                                                            event.npskyout))
        log.writelog('PSF center found.')
    else:
        event.psfim = None
        event.psfctr = None
        log.writelog('No PSF supplied.')
    # Find center of the mean Image:
    event.targpos = np.zeros((2, event.npos))

    # Override target position estimate if specified
    if type(pcf.srcesty) != type(None) and type(pcf.srcestx) != type(None):
        srcesty = str(pcf.srcesty).split(',')
        srcestx = str(pcf.srcestx).split(',')

        if len(srcestx) != len(srcesty):
            print("WARNING: Length of srcest inputs do not match!")
        if len(srcestx) != event.npos or len(srcesty) != event.npos:
            print("WARNING: Length of srcest inputs do not match npos!")
        if len(srcestx) > 1 or len(srcesty) > 1:
            print(
                "Verify that srcest override order matches telescope pos order."
            )

        for pos in range(event.npos):
            event.srcest[0, pos] = srcesty[pos]
            event.srcest[1, pos] = srcestx[pos]

    for pos in range(event.npos):
        print("Fitting mean image at pos: " + str(pos))
        meanim = event.meanim[:, :, pos]
        guess = event.srcest[:, pos]
        targpos, extra = cd.centerdriver(event.method,
                                         meanim,
                                         guess,
                                         event.ctrim,
                                         event.cradius,
                                         event.csize,
                                         fitbg=event.fitbg,
                                         psf=event.psfim,
                                         psfctr=event.psfctr,
                                         expand=event.expand,
                                         npskyrad=(event.npskyin,
                                                   event.npskyout))

        event.targpos[:, pos] = targpos
    log.writelog("Center position(s) of the mean Image(s):\n" +
                 str(np.transpose(event.targpos)))

    # Inclusion ::::::::
    # Multy Process set up:
    # Shared memory arrays allow only 1D Arrays :(
    x = Array("d", np.zeros(event.npos * event.maxnimpos))
    y = Array("d", np.zeros(event.npos * event.maxnimpos))
    xerr = Array("d", np.zeros(event.npos * event.maxnimpos))
    yerr = Array("d", np.zeros(event.npos * event.maxnimpos))
    xsig = Array("d", np.zeros(event.npos * event.maxnimpos))
    ysig = Array("d", np.zeros(event.npos * event.maxnimpos))
    rot = Array("d", np.zeros(event.npos * event.maxnimpos))
    noisepix = Array("d", np.zeros(event.npos * event.maxnimpos))
    flux = Array("d", np.zeros(event.npos * event.maxnimpos))
    sky = Array("d", np.zeros(event.npos * event.maxnimpos))
    goodfit = Array("d", np.zeros(event.npos * event.maxnimpos))

    # Size of chunk of data each core will process:
    chunksize = event.maxnimpos // event.ccores + 1
    print("Number of cores: " + str(event.ccores))

    # Start Muti Procecess: ::::::::::::::::::::::::::::::::::::::
    processes = []
    for nc in range(event.ccores):
        start = nc * chunksize  # Starting index to process
        end = (nc + 1) * chunksize  # Ending   index to process
        proc = Process(target=do_center,
                       args=(start, end, event, centermask, log, x, y, flux,
                             sky, goodfit, xerr, yerr, xsig, ysig, noisepix,
                             rot))
        processes.append(proc)
        proc.start()
    # Make sure all processes finish their work:
    for nc in range(event.ccores):
        processes[nc].join()

    # Put the results in the event. I need to reshape them:
    event.fp.x = np.asarray(x).reshape(event.npos, event.maxnimpos)
    event.fp.y = np.asarray(y).reshape(event.npos, event.maxnimpos)
    event.fp.xerr = np.asarray(xerr).reshape(event.npos, event.maxnimpos)
    event.fp.yerr = np.asarray(yerr).reshape(event.npos, event.maxnimpos)
    event.fp.noisepix = np.asarray(noisepix).reshape(event.npos,
                                                     event.maxnimpos)
    # If Gaussian fit:
    if event.method == 'fgc' or event.method == 'rfgc':
        event.fp.xsig = np.asarray(xsig).reshape(event.npos, event.maxnimpos)
        event.fp.ysig = np.asarray(ysig).reshape(event.npos, event.maxnimpos)
        event.fp.rot = np.asarray(rot).reshape(event.npos, event.maxnimpos)
    # If PSF fit:
    if event.method in ["ipf", "bpf"]:
        event.fp.flux = np.asarray(flux).reshape(event.npos, event.maxnimpos)
        event.fp.psfsky = np.asarray(sky).reshape(event.npos, event.maxnimpos)
        event.fp.goodfit = np.asarray(goodfit).reshape(event.npos,
                                                       event.maxnimpos)
    # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

    # Pixel R position:
    event.fp.r = np.sqrt((event.fp.x % 1.0 - 0.5)**2.0 +
                         (event.fp.y % 1.0 - 0.5)**2.0)

    log.writelog("End frames centering.")

    # Save
    print("\nSaving")
    if event.denoised:
        me.saveevent(event,
                     event.eventname + "_ctr",
                     save=['dendata', 'data', 'uncd', 'mask'])
    else:
        me.saveevent(event,
                     event.eventname + "_ctr",
                     save=['data', 'uncd', 'mask'])

    # Print time elapsed and close log:
    cwd = os.getcwd()
    log.writelog("Output files (" + event.centerdir + "):")
    log.writelog("Data:")
    log.writelog(" " + cwd + '/' + event.eventname + "_ctr.dat")
    log.writelog(" " + cwd + '/' + event.eventname + "_ctr.h5")
    log.writelog("Log:")
    log.writelog(" " + cwd + '/' + event.logname)

    dt = t.hms_time(time.time() - tini)
    log.writeclose("\nEnd Centering. Time (h:m:s):  %s" % dt + "  (" +
                   event.centerdir + ")")
    print("-------------  ------------\n")

    os.chdir(owd)

    if event.runp4:
        os.system("python3 poet.py p4 %s" % event.centerdir)
示例#5
0
def checks(eventname, period=None, ephtime=None, cwd=None):
    if cwd == None:
        cwd = os.getcwd()
    os.chdir(cwd)

    # Load the Event
    event = me.loadevent(eventname)

    # Create a log
    oldlogname = event.logname
    logname = event.eventname + "_p5.log"
    log = le.Logedit(logname, oldlogname)
    log.writelog('\nStart Checks: ' + time.ctime())

    # If p5 run after p3: we are using results from PSFfit:
    if not hasattr(event, "phottype"):
        event.phottype = "psffit"
        try:
            os.mkdir("psffit/")
        except:
            pass
        os.chdir("psffit/")

    # Move frame parameters to fit Kevin's syntax:
    # event.fp.param --> event.param
    event.filenames = event.fp.filename
    event.x = event.fp.x
    event.y = event.fp.y
    event.sx = event.fp.sx
    event.sy = event.fp.sy
    event.time = event.fp.time
    event.pos = event.fp.pos
    event.frmvis = event.fp.frmvis
    event.filename = event.eventname

    if event.phottype == "aper":
        event.good = event.fp.good
        event.aplev = event.fp.aplev
        event.aperr = event.fp.aperr
        event.background = event.fp.skylev
        log.writelog('Photometry method is APERTURE')
    elif event.phottype == "psffit":
        event.aplev = event.fp.psfflux
        event.background = event.fp.psfsky
        # FINDME: do something with aperr and good
        event.aperr = 0.0025 * np.mean(
            event.fp.psfflux) * (event.aplev * 0 + 1)
        event.good = np.ones(np.shape(event.aplev))
        log.writelog('Photometry method is PSF FITTING')
    elif event.phottype == "optimal":
        event.good = event.fp.ogood
        event.aplev = event.fp.ophotlev
        event.aperr = event.fp.ophoterr
        # FINDME: Background from optimal?
        event.background = event.fp.psfsky
        log.writelog('Photometry method is OPTIMAL')

    # UPDATE period AND ephtime
    if period != None:
        event.period = period[0]
        event.perioderr = period[1]
    if ephtime != None:
        event.ephtime = ephtime[0]
        event.ephtimeerr = ephtime[1]

    log.writelog("\nCurrent event = " + event.eventname)
    log.writelog("Kurucz file     = " + event.kuruczfile)
    log.writelog("Filter file     = " + event.filtfile)

    # Light-time correction to BJD:

    # Julian observation date
    #event.juldat = event.jdjf80 + event.fp.time / 86400.0
    event.juldat = event.fp.juldat = event.j2kjd + event.fp.time / 86400.0

    if not event.ishorvec:
        log.writeclose('\nHorizon file not found!')
        return
    print("Calculating BJD correction...")
    event.fp.bjdcor = stc.suntimecorr(event.ra, event.dec, event.fp.juldat,
                                      event.horvecfile)

    # Get bjd times:
    event.bjdcor = event.fp.bjdcor
    #event.bjddat = event.fp.juldat + event.fp.bjdcor / 86400.0
    event.bjdutc = event.fp.juldat + event.fp.bjdcor / 86400.0  # utc bjd date
    event.bjdtdb = np.empty(event.bjdutc.shape)
    for i in range(event.bjdtdb.shape[0]):
        event.bjdtdb[i] = utc_tt.utc_tdb(
            event.bjdutc[i])  # terrestial bjd date

    # ccampo 3/18/2011: check which units phase should be in
    try:
        if event.tep.ttrans.unit == "BJDTDB":
            event.timestd = "tdb"
            event.fp.phase = tp.time2phase(event.bjdtdb, event.ephtime,
                                           event.period, event.ecltype)
        else:
            event.timestd = "utc"
            event.fp.phase = tp.time2phase(event.bjdutc, event.ephtime,
                                           event.period, event.ecltype)
    except:
        event.timestd = "utc"
        event.fp.phase = tp.time2phase(event.bjdutc, event.ephtime,
                                       event.period, event.ecltype)

    # assign phase variable
    event.phase = event.fp.phase

    # ccampo 3/18/2011: moved this above
    # Eclipse phase, BJD
    #event.fp.phase = tp.time2phase(event.fp.juldat + event.fp.bjdcor / 86400.0,
    #                               event.ephtime, event.period, event.ecltype)

    # verify leapsecond correction
    hfile = event.filenames[0, 0]
    try:
        image, event.header = pf.getdata(hfile.decode('utf-8'), header=True)
        dt = ((event.bjdtdb - event.bjdutc) * 86400.0)[0, 0]
        dt2 = event.header['ET_OBS'] - event.header['UTCS_OBS']
        log.writelog('Leap second correction : ' + str(dt) + ' = ' + str(dt2))
    except:
        log.writelog('Could not verify leap-second correction.')

    log.writelog('Min and Max light-time correction: ' +
                 np.str(np.amin(event.fp.bjdcor)) + ', ' +
                 np.str(np.amax(event.fp.bjdcor)) + ' seconds')

    # Verify light-time correction
    try:
        image, event.header = pf.getdata(hfile.decode('utf-8'), header=True)
        try:
            log.writelog('BJD Light-time correction: ' +
                         str(event.bjdcor[0, 0]) + ' = ' +
                         str((event.header['BMJD_OBS'] -
                              event.header['MJD_OBS']) * 86400))
        except:
            log.writelog('HJD Light-time correction: ' +
                         str(event.bjdcor[0, 0]) + ' = ' +
                         str((event.header['HMJD_OBS'] -
                              event.header['MJD_OBS']) * 86400))
    except:
        log.writelog('Could not verify light-time correction.')

    # Number of good frames should be > 95%
    log.writelog("Good Frames = %7.3f" % (np.mean(event.good) * 100) + " %")

    log.writelog('\nCentering:     X mean     X stddev  Y mean     Y stddev')
    for pos in np.arange(event.npos):
        log.writelog(
            'position %2d:' % pos +
            ' %10.5f' % np.mean(event.x[pos, np.where(event.good[pos])]) +
            ' %9.5f' % np.std(event.x[pos, np.where(event.good[pos])]) +
            ' %10.5f' % np.mean(event.y[pos, np.where(event.good[pos])]) +
            ' %9.5f' % np.std(event.y[pos, np.where(event.good[pos])]))

    # COMPUTE RMS POSITION CONSISTENCY
    event.xprecision = np.sqrt(np.median(np.ediff1d(event.x)**2))
    event.yprecision = np.sqrt(np.median(np.ediff1d(event.y)**2))

    log.writelog('RMS of x precision = ' + str(np.round(event.xprecision, 4)) +
                 ' pixels.')
    log.writelog('RMS of y precision = ' + str(np.round(event.yprecision, 4)) +
                 ' pixels.')
    if event.phottype == "aper":
        log.writelog('\nCenter & photometry half-width/aperture sizes = ' +
                     str(event.ctrim) + ', ' + str(event.photap) + ' pixels.')
    log.writelog('Period = ' + str(event.period) + ' +/- ' +
                 str(event.perioderr) + ' days')
    log.writelog('Ephemeris = ' + str(event.ephtime) + ' +/- ' +
                 str(event.ephtimeerr) + ' JD')

    fmt1 = [
        'C0o', 'C1o', 'C2o', 'ro', 'ko', 'co', 'mo', 'bs', 'gs', 'ys', 'rs',
        'ks', 'cs', 'ms'
    ]
    fmt2 = ['b,', 'g,', 'y,', 'r,']

    plt.figure(501)
    plt.clf()
    plt.figure(502, figsize=(8, 12))
    plt.clf()
    plt.figure(503)
    plt.clf()
    plt.figure(504)
    plt.clf()
    plt.figure(505)
    plt.clf()
    plt.figure(506)
    plt.clf()

    for pos in np.arange(event.npos):
        wheregood = np.where(event.good[pos, :])
        # CHOOSE ONLY GOOD FRAMES FOR PLOTTING
        phase = event.phase[pos, :][wheregood]
        aplev = event.aplev[pos, :][wheregood]
        jdtime = event.bjdutc[pos, :][wheregood]
        background = event.background[pos, :][wheregood]
        # COMPUTE X AND Y PIXEL LOCATION RELATIVE TO ...
        if event.npos > 1:
            # CENTER OF EACH PIXEL
            y = (event.y[pos, :] - np.round(event.y[pos, :]))[wheregood]
            x = (event.x[pos, :] - np.round(event.x[pos, :]))[wheregood]
        else:
            # CENTER OF MEDIAN PIXEL
            y = (event.y[pos, :] - np.round(np.median(event.y)))[wheregood]
            x = (event.x[pos, :] - np.round(np.median(event.x)))[wheregood]

        # SORT aplev BY x, y AND radial POSITIONS
        rad = np.sqrt(x**2 + y**2)
        xx = np.sort(x)
        yy = np.sort(y)
        sxx = np.sort(event.sx[0])
        syy = np.sort(event.sy[0])
        rr = np.sort(rad)
        xaplev = aplev[np.argsort(x)]
        yaplev = aplev[np.argsort(y)]
        raplev = aplev[np.argsort(rad)]

        # BIN RESULTS FOR PLOTTING POSITION SENSITIVITY EFFECT
        nobj = aplev.size
        nbins = int(120 / event.npos)
        binxx = np.zeros(nbins)
        binyy = np.zeros(nbins)
        binsxx = np.zeros(nbins)
        binsyy = np.zeros(nbins)
        binrr = np.zeros(nbins)
        binxaplev = np.zeros(nbins)
        binyaplev = np.zeros(nbins)
        binraplev = np.zeros(nbins)
        binxapstd = np.zeros(nbins)
        binyapstd = np.zeros(nbins)
        binrapstd = np.zeros(nbins)
        binphase = np.zeros(nbins)
        binaplev = np.zeros(nbins)
        binapstd = np.zeros(nbins)
        for i in range(nbins):
            start = int(1. * i * nobj / nbins)
            end = int(1. * (i + 1) * nobj / nbins)
            binxx[i] = np.mean(xx[start:end])
            binyy[i] = np.mean(yy[start:end])
            binsxx[i] = np.mean(sxx[start:end])
            binsyy[i] = np.mean(syy[start:end])
            binrr[i] = np.mean(rr[start:end])
            binxaplev[i] = np.median(xaplev[start:end])
            binyaplev[i] = np.median(yaplev[start:end])
            binraplev[i] = np.median(raplev[start:end])
            binxapstd[i] = np.std(xaplev[start:end]) / np.sqrt(end - start)
            binyapstd[i] = np.std(yaplev[start:end]) / np.sqrt(end - start)
            binrapstd[i] = np.std(raplev[start:end]) / np.sqrt(end - start)
            binphase[i] = np.mean(phase[start:end])
            binaplev[i] = np.median(aplev[start:end])
            binapstd[i] = np.std(aplev[start:end]) / np.sqrt(end - start)

        # PLOT 1: flux
        plt.figure(501)
        plt.errorbar(binphase,
                     binaplev,
                     binapstd,
                     fmt=fmt1[pos],
                     linewidth=1,
                     label=('pos %i' % (pos)))
        plt.title(event.planetname + ' Phase vs. Binned Flux')
        plt.xlabel('Orbital Phase')
        plt.ylabel('Flux')
        plt.legend(loc='best')

        # PLOT 2: position-flux
        plt.figure(502)
        plt.subplot(2, 1, 1)
        plt.title(event.planetname + ' Position vs. Binned Flux')
        plt.errorbar(binyy,
                     binyaplev,
                     binyapstd,
                     fmt=fmt1[pos],
                     label=('pos %i y' % (pos)))
        plt.ylabel('Flux')
        plt.legend(loc='best')
        plt.subplot(2, 1, 2)
        plt.errorbar(binxx,
                     binxaplev,
                     binxapstd,
                     fmt=fmt1[pos],
                     label=('pos %i x' % (pos)))
        plt.xlabel('Pixel Postion')
        plt.ylabel('Flux')
        plt.legend(loc='best')

        #PLOT 3: position-phase
        plt.figure(503)

        plt.plot(phase, x, 'b,')
        plt.plot(phase, y, 'r,')
        plt.title(event.planetname + ' Phase vs. Position')
        plt.xlabel('Orbital Phase')
        plt.ylabel('Pixel Position')
        plt.legend('xy')

        #PLOT 4: flux-radial distance
        plt.figure(504)
        plt.errorbar(binrr,
                     binraplev,
                     binrapstd,
                     fmt=fmt1[pos],
                     label=('pos %i' % (pos)))
        plt.title(event.planetname + ' Radial Distance vs. Flux')
        plt.xlabel('Distance From Center of Pixel')
        plt.ylabel('Flux')
        plt.legend(loc='best')

        # ::::::::::: Background setting :::::::::::::::::
        if np.size(background) != 0:
            # number of points per bin:
            npoints = 42
            nbins = int(np.size(background) / npoints)
            medianbg = np.zeros(nbins)
            bphase = np.zeros(nbins)  # background bin phase
            bintime = np.zeros(nbins)  # background bin JD time
            for i in range(nbins):
                start = int(1.0 * i * npoints)
                end = int(1.0 * (i + 1) * npoints)
                medianbg[i] = np.median(background[start:end])
                bphase[i] = np.mean(phase[start:end])
                bintime[i] = np.mean(jdtime[start:end])

            # PLOT 5: background-phase
            day = int(np.floor(np.amin(jdtime)))
            timeunits1 = jdtime - day
            timeunits2 = bintime - day
            xlabel = 'JD - ' + str(day)
            if event.ecltype == 's':
                timeunits1 = phase
                timeunits2 = bphase
                xlabel = 'Phase'

            plt.figure(505)
            plt.plot(timeunits1,
                     background,
                     color='0.45',
                     linestyle='None',
                     marker=',')
            if np.size(background) > 10000:
                plt.plot(timeunits2, medianbg, fmt2[pos], label='median bins')
            plt.title(event.planetname + ' Background level')
            plt.xlabel(xlabel)
            plt.ylabel('Flux')

        # PLOT 6: width-flux
        plt.figure(506)
        plt.subplot(2, 1, 1)
        plt.title(event.planetname + ' Gaussian Width vs. Binned Flux')
        plt.errorbar(binsyy,
                     binyaplev,
                     binyapstd,
                     fmt=fmt1[pos],
                     label=('width %i y' % (pos)))
        plt.ylabel('Flux')
        plt.legend(loc='best')
        plt.subplot(2, 1, 2)
        plt.errorbar(binsxx,
                     binxaplev,
                     binxapstd,
                     fmt=fmt1[pos],
                     label=('width %i x' % (pos)))
        plt.xlabel('Gaussian Width')
        plt.ylabel('Flux')
        plt.legend(loc='best')

    figname1 = str(event.eventname) + "-fig501.png"
    figname2 = str(event.eventname) + "-fig502.png"
    figname3 = str(event.eventname) + "-fig503.png"
    figname4 = str(event.eventname) + "-fig504.png"
    figname5 = str(event.eventname) + "-fig505.png"
    figname6 = str(event.eventname) + "-fig506.png"

    plt.figure(501)
    plt.savefig(figname1)
    plt.figure(502)
    plt.savefig(figname2)
    plt.figure(503)
    plt.savefig(figname3)
    plt.figure(504)
    plt.savefig(figname4)
    plt.figure(505)
    plt.plot(timeunits1[0],
             background[0],
             color='0.45',
             linestyle='None',
             marker=',',
             label='all points')
    plt.legend(loc='best')
    plt.savefig(figname5)
    plt.figure(506)
    plt.savefig(figname6)

    # Saving
    me.saveevent(event, event.eventname + "_p5c")

    cwd = os.getcwd() + "/"
    # Print outputs, end-time, and close log.
    log.writelog("Output files:")
    log.writelog("Data:")
    log.writelog(" " + cwd + event.eventname + "_p5c.dat")
    log.writelog("Log:")
    log.writelog(" " + cwd + logname)
    log.writelog("Figures:")
    log.writelog(" " + cwd + figname1)
    log.writelog(" " + cwd + figname2)
    log.writelog(" " + cwd + figname3)
    log.writelog(" " + cwd + figname4)
    log.writelog(" " + cwd + figname5)
    log.writelog(" " + cwd + figname6)
    log.writeclose('\nEnd Checks: ' + time.ctime())

    return event
示例#6
0
def checks1(eventname, cwd, period=None, ephtime=None):

    owd = os.getcwd()
    os.chdir(cwd)

    # Load the Event
    event = me.loadevent(eventname)

    # Create a log
    oldlogname = event.logname
    logname = event.eventname + "_p5.log"
    log = le.Logedit(logname, oldlogname)
    log.writelog('\nStart Checks: ' + time.ctime())

    # If p5 run after p3: we are using results from PSFfit:
    if not hasattr(event, "phottype"):
        event.phottype = "psffit"
        try:
            os.mkdir("psffit/")
        except:
            pass
        os.chdir("psffit/")

    # Move frame parameters to fit Kevin's syntax:
    # event.fp.param --> event.param
    event.filenames = event.fp.filename
    event.x = event.fp.x
    event.y = event.fp.y
    event.time = event.fp.time
    event.pos = event.fp.pos
    event.frmvis = event.fp.frmvis
    event.filename = event.eventname

    event.aplev = event.fp.aplev
    event.background = event.fp.skylev
    event.good = event.fp.good

    if event.phottype == "aper":
        event.aperr = event.fp.aperr
        log.writelog('Photometry method is APERTURE')
    elif event.phottype == "var":
        event.aperr = event.fp.aperr
        log.writelog('Photometry method is VARIABLE APERTURE')
    elif event.phottype == "ell":
        event.aperr = event.fp.aperr
        log.writelog('Photometry method is ELLIPTICAL APERTURE')
    elif event.phottype == "psffit":
        # FINDME: do something with aperr
        event.aperr = .0025 * np.mean(event.aplev) * np.ones(
            np.shape(event.aplev))
        log.writelog('Photometry method is PSF FITTING')
    elif event.phottype == "optimal":
        event.aperr = event.fp.aperr
        log.writelog('Photometry method is OPTIMAL')

    # UPDATE period AND ephtime
    if period is not None:
        event.period = period[0]
        event.perioderr = period[1]
    if ephtime is not None:
        event.ephtime = ephtime[0]
        event.ephtimeerr = ephtime[1]

    log.writelog("\nCurrent event = " + event.eventname)
    log.writelog("Kurucz file     = " + event.kuruczfile)
    log.writelog("Filter file     = " + event.filtfile)

    # Light-time correction to BJD:

    # Julian observation date
    #event.juldat = event.jdjf80 + event.fp.time / 86400.0
    event.juldat = event.fp.juldat = event.j2kjd + event.fp.time / 86400.0

    if not event.ishorvec:
        log.writeclose('\nHorizon file not found!')
        return
    print("Calculating BJD correction...")

    event.fp.bjdcor = np.zeros(event.fp.juldat.shape)
    # Sometimes bad files are just missing files, in which case they have
    # times of 0, which causes problem in the following interpolation. So
    # we must mask out these files. We don't use the event.fp.good mask
    # because we may want to know the bjd of bad images

    nonzero = np.where(event.fp.time != 0.0)
    event.fp.bjdcor[nonzero] = stc.suntimecorr(event.ra, event.dec,
                                               event.fp.juldat[nonzero],
                                               event.horvecfile)

    # Get bjd times:
    event.bjdcor = event.fp.bjdcor
    #event.bjddat = event.fp.juldat + event.fp.bjdcor / 86400.0
    event.bjdutc = event.fp.juldat + event.fp.bjdcor / 86400.0  # utc bjd date
    event.bjdtdb = np.empty(event.bjdutc.shape)
    for i in range(event.bjdtdb.shape[0]):
        event.bjdtdb[i] = utc_tt.utc_tdb(event.bjdutc[i], event.topdir + '/' +
                                         event.leapdir)  # terrestial bjd date

    # ccampo 3/18/2011: check which units phase should be in
    try:
        if event.tep.ttrans.unit == "BJDTDB":
            event.timestd = "tdb"
            event.fp.phase = tp.time2phase(event.bjdtdb, event.ephtime,
                                           event.period, event.ecltype)
        else:
            event.timestd = "utc"
            event.fp.phase = tp.time2phase(event.bjdutc, event.ephtime,
                                           event.period, event.ecltype)
    except:
        event.timestd = "utc"
        event.fp.phase = tp.time2phase(event.bjdutc, event.ephtime,
                                       event.period, event.ecltype)

    # assign phase variable
    event.phase = event.fp.phase

    # ccampo 3/18/2011: moved this above
    # Eclipse phase, BJD
    #event.fp.phase = tp.time2phase(event.fp.juldat + event.fp.bjdcor / 86400.0,
    #                               event.ephtime, event.period, event.ecltype)

    # verify leapsecond correction
    hfile = event.filenames[0, 0]
    try:
        image, event.header = fits.getdata(hfile, header=True)
        dt = ((event.bjdtdb - event.bjdutc) * 86400.0)[0, 0]
        dt2 = event.header['ET_OBS'] - event.header['UTCS_OBS']
        log.writelog('Leap second correction : ' + str(dt) + ' = ' + str(dt2))
    except:
        log.writelog('Could not verify leap-second correction.')

    log.writelog('Min and Max light-time correction: ' +
                 np.str(np.amin(event.fp.bjdcor)) + ', ' +
                 np.str(np.amax(event.fp.bjdcor)) + ' seconds')

    # Verify light-time correction
    try:
        image, event.header = fits.getdata(hfile, header=True)
        try:
            log.writelog('BJD Light-time correction: ' +
                         str(event.bjdcor[0, 0]) + ' = ' +
                         str((event.header['BMJD_OBS'] -
                              event.header['MJD_OBS']) * 86400))
        except:
            log.writelog('HJD Light-time correction: ' +
                         str(event.bjdcor[0, 0]) + ' = ' +
                         str((event.header['HMJD_OBS'] -
                              event.header['MJD_OBS']) * 86400))
    except:
        log.writelog('Could not verify light-time correction.')

    # Number of good frames should be > 95%
    log.writelog("Good Frames = %7.3f" % (np.mean(event.good) * 100) + " %")

    log.writelog('\nCentering:     X mean     X stddev  Y mean     Y stddev')
    for pos in range(event.npos):
        log.writelog(
            'position %2d:' % pos +
            ' %10.5f' % np.mean(event.x[pos, np.where(event.good[pos])]) +
            ' %9.5f' % np.std(event.x[pos, np.where(event.good[pos])]) +
            ' %10.5f' % np.mean(event.y[pos, np.where(event.good[pos])]) +
            ' %9.5f' % np.std(event.y[pos, np.where(event.good[pos])]))

    # COMPUTE RMS POSITION CONSISTENCY
    event.xprecision = np.sqrt(np.mean(np.ediff1d(event.x)**2))
    event.yprecision = np.sqrt(np.mean(np.ediff1d(event.y)**2))

    log.writelog('RMS of x precision = ' + str(np.round(event.xprecision, 4)) +
                 ' pixels.')
    log.writelog('RMS of y precision = ' + str(np.round(event.yprecision, 4)) +
                 ' pixels.')
    if event.phottype == "aper":
        log.writelog('\nCenter & photometry half-width/aperture sizes = ' +
                     str(event.ctrim) + ', ' + str(event.photap) + ' pixels.')
    log.writelog('Period = ' + str(event.period) + ' +/- ' +
                 str(event.perioderr) + ' days')
    log.writelog('Ephemeris = ' + str(event.ephtime) + ' +/- ' +
                 str(event.ephtimeerr) + ' JD')

    # Compute elliptical area if gaussian centering
    if event.method == 'fgc' or event.method == 'rfgc':
        event.fp.ellarea = np.pi * (3 * event.fp.xsig) * (3 * event.fp.ysig)

    fmt1 = [
        'bo', 'go', 'yo', 'ro', 'ko', 'co', 'mo', 'bs', 'gs', 'ys', 'rs', 'ks',
        'cs', 'ms'
    ]
    fmt2 = ['b,', 'g,', 'y,', 'r,']
    fmt3 = ['b.', 'g.', 'y.', 'r.']

    plt.figure(501)
    plt.clf()
    plt.figure(502, figsize=(8, 12))
    plt.clf()
    plt.figure(503)
    plt.clf()
    plt.figure(504)
    plt.clf()
    plt.figure(505)
    plt.clf()

    for pos in range(event.npos):
        wheregood = np.where(event.good[pos, :])
        # CHOOSE ONLY GOOD FRAMES FOR PLOTTING
        phase = event.phase[pos, :][wheregood]
        aplev = event.aplev[pos, :][wheregood]
        jdtime = event.bjdutc[pos, :][wheregood]
        background = event.background[pos, :][wheregood]
        noisepix = event.fp.noisepix[pos, :][wheregood]
        if event.method == "fgc" or event.method == "rfgc":
            ellarea = event.fp.ellarea[pos, :][wheregood]
            rot = event.fp.rot[pos, :][wheregood]
        # COMPUTE X AND Y PIXEL LOCATION RELATIVE TO ...
        if event.npos > 1:
            # CENTER OF EACH PIXEL
            y = (event.y[pos, :] - np.round(event.y[pos, :]))[wheregood]
            x = (event.x[pos, :] - np.round(event.x[pos, :]))[wheregood]
        else:
            # CENTER OF MEDIAN PIXEL
            y = (event.y[pos, :] - np.round(np.median(event.y)))[wheregood]
            x = (event.x[pos, :] - np.round(np.median(event.x)))[wheregood]

        # SORT aplev BY x, y AND radial POSITIONS
        rad = np.sqrt(x**2 + y**2)
        xx = np.sort(x)
        yy = np.sort(y)
        rr = np.sort(rad)
        xaplev = aplev[np.argsort(x)]
        yaplev = aplev[np.argsort(y)]
        raplev = aplev[np.argsort(rad)]

        # BIN RESULTS FOR PLOTTING POSITION SENSITIVITY EFFECT
        nobj = aplev.size
        nbins = 120 // event.npos
        binxx = np.zeros(nbins)
        binyy = np.zeros(nbins)
        binrr = np.zeros(nbins)
        binxaplev = np.zeros(nbins)
        binyaplev = np.zeros(nbins)
        binraplev = np.zeros(nbins)
        binxapstd = np.zeros(nbins)
        binyapstd = np.zeros(nbins)
        binrapstd = np.zeros(nbins)
        binphase = np.zeros(nbins)
        binaplev = np.zeros(nbins)
        binapstd = np.zeros(nbins)
        binnpix = np.zeros(nbins)
        for i in range(nbins):
            start = int(1. * i * nobj / nbins)
            end = int(1. * (i + 1) * nobj / nbins)
            binxx[i] = np.mean(xx[start:end])
            binyy[i] = np.mean(yy[start:end])
            binrr[i] = np.mean(rr[start:end])
            binxaplev[i] = np.median(xaplev[start:end])
            binyaplev[i] = np.median(yaplev[start:end])
            binraplev[i] = np.median(raplev[start:end])
            binxapstd[i] = np.std(xaplev[start:end]) / np.sqrt(end - start)
            binyapstd[i] = np.std(yaplev[start:end]) / np.sqrt(end - start)
            binrapstd[i] = np.std(raplev[start:end]) / np.sqrt(end - start)
            binphase[i] = np.mean(phase[start:end])
            binaplev[i] = np.median(aplev[start:end])
            binapstd[i] = np.std(aplev[start:end]) / np.sqrt(end - start)
            binnpix[i] = np.mean(noisepix[start:end])

        # PLOT 1: flux
        plt.figure(501)
        plt.errorbar(binphase,
                     binaplev,
                     binapstd,
                     fmt=fmt1[pos],
                     linewidth=1,
                     label=('pos %i' % (pos)))
        plt.title(event.planetname + ' Phase vs. Binned Flux')
        plt.xlabel('Orbital Phase')
        plt.ylabel('Flux')
        plt.legend(loc='best')

        # PLOT 2: position-flux
        plt.figure(502)
        plt.subplot(2, 1, 1)
        plt.title(event.planetname + ' Position vs. Binned Flux')
        plt.errorbar(binyy,
                     binyaplev,
                     binyapstd,
                     fmt=fmt1[pos],
                     label=('pos %i y' % (pos)))
        plt.ylabel('Flux')
        plt.legend(loc='best')
        plt.subplot(2, 1, 2)
        plt.errorbar(binxx,
                     binxaplev,
                     binxapstd,
                     fmt=fmt1[pos],
                     label=('pos %i x' % (pos)))
        plt.xlabel('Pixel Postion')
        plt.ylabel('Flux')
        plt.legend(loc='best')

        #PLOT 3: position-phase
        plt.figure(503)

        plt.plot(phase, x, 'b,')
        plt.plot(phase, y, 'r,')
        plt.title(event.planetname + ' Phase vs. Position')
        plt.xlabel('Orbital Phase')
        plt.ylabel('Pixel Position')
        plt.legend('xy')

        #PLOT 4: flux-radial distance
        plt.figure(504)
        plt.errorbar(binrr,
                     binraplev,
                     binrapstd,
                     fmt=fmt1[pos],
                     label=('pos %i' % (pos)))
        plt.title(event.planetname + ' Radial Distance vs. Flux')
        plt.xlabel('Distance From Center of Pixel')
        plt.ylabel('Flux')
        plt.legend(loc='best')

        # ::::::::::: Background setting :::::::::::::::::
        if np.size(background) != 0:
            # number of points per bin:
            npoints = 42
            nbins = int(np.size(background) // npoints)
            medianbg = np.zeros(nbins)
            bphase = np.zeros(nbins)  # background bin phase
            bintime = np.zeros(nbins)  # background bin JD time
            for i in range(nbins):
                start = int(1.0 * i * npoints)
                end = int(1.0 * (i + 1) * npoints)
                medianbg[i] = np.median(background[start:end])
                bphase[i] = np.mean(phase[start:end])
                bintime[i] = np.mean(jdtime[start:end])

            # PLOT 5: background-phase
            day = int(np.floor(np.amin(jdtime)))
            timeunits1 = jdtime - day
            timeunits2 = bintime - day
            xlabel = 'JD - ' + str(day)
            if event.ecltype == 's':
                timeunits1 = phase
                timeunits2 = bphase
                xlabel = 'Phase'

            plt.figure(505)
            plt.plot(timeunits1,
                     background,
                     color='0.45',
                     linestyle='None',
                     marker=',')
            if np.size(background) > 10000:
                plt.plot(timeunits2, medianbg, fmt2[pos], label='median bins')
            plt.title(event.planetname + ' Background level')
            plt.xlabel(xlabel)
            plt.ylabel('Flux')
            plt.plot(timeunits1[0],
                     background[0],
                     color='0.45',
                     linestyle='None',
                     marker=',',
                     label='all points')
            plt.legend(loc='best')

        else:
            print("WARNING: background has zero size.")

        #PLOT 7: Noise Pixels Binned
        plt.figure(507)
        plt.scatter(binphase, binnpix)
        plt.xlabel("Orbital Phase")
        plt.ylabel("Noise Pixels")
        plt.title(event.planetname + " Binned Noise Pixels")

        #PLOT 8: Noise Pixel Variance
        plt.figure(508)
        npixvar = bd.subarnvar(noisepix, event)
        subarnbinphase = bd.subarnbin(phase, event)
        plt.scatter(subarnbinphase, npixvar, s=1)
        plt.xlabel("Orbital Phase")
        plt.ylabel("Noise Pixel Variance")
        plt.title(event.planetname + " Noise Pixels Variance")

        #PLOT 9 and 10: Elliptical Area and Variance
        if event.method == 'fgc' or event.method == 'rfgc':
            plt.figure(509)
            plt.scatter(phase, ellarea, s=0.1)
            plt.xlabel("Orbital Phase")
            plt.ylabel("Elliptical Area")
            plt.title(event.planetname + " Gaussian Centering Elliptical Area")

            plt.figure(510)
            ellareavar = bd.subarnvar(ellarea, event)
            plt.scatter(subarnbinphase, ellareavar, s=1)
            plt.xlabel("Orbital Phase")
            plt.ylabel("Elliptical Area Variance")
            plt.title(event.planetname + " Elliptical Area Variance")

        if event.method == 'rfgc':
            plt.figure(511)
            plt.scatter(phase, rot % (np.pi / 2) * 180 / np.pi, s=1)
            plt.xlabel("Orbital Phase")
            plt.ylabel("Rotation (deg)")
            plt.title(event.planetname + " Gaussian Centering Rotation")

    #PLOT 6: Preflash
    if event.havepreflash:
        plt.figure(506)
        plt.errorbar((event.prefp.time[0] - event.prefp.time[0, 0]) / 60.,
                     event.prefp.aplev[0],
                     yerr=event.prefp.aperr[0],
                     fmt="o")
        plt.xlabel("Time since start of preflash  (minutes)")
        plt.ylabel("Flux")
        plt.title(event.planetname + " Preflash")

    figname1 = str(event.eventname) + "-fig501.png"
    figname2 = str(event.eventname) + "-fig502.png"
    figname3 = str(event.eventname) + "-fig503.png"
    figname4 = str(event.eventname) + "-fig504.png"
    figname5 = str(event.eventname) + "-fig505.png"
    figname6 = str(event.eventname) + "-fig506.png"
    figname7 = str(event.eventname) + "-fig507.png"
    figname8 = str(event.eventname) + "-fig508.png"
    figname9 = str(event.eventname) + "-fig509.png"
    figname10 = str(event.eventname) + "-fig510.png"
    figname11 = str(event.eventname) + "-fig511.png"

    plt.figure(501)
    plt.savefig(figname1)
    plt.figure(502)
    plt.savefig(figname2)
    plt.figure(503)
    plt.savefig(figname3)
    plt.figure(504)
    plt.savefig(figname4)
    plt.figure(505)
    plt.savefig(figname5)

    plt.figure(506)
    if event.havepreflash:
        plt.savefig(figname6)

    plt.figure(507)
    plt.savefig(figname7)
    plt.figure(508)
    plt.savefig(figname8)

    if event.method == 'fgc' or event.method == 'rfgc':
        plt.figure(509)
        plt.savefig(figname9)
        plt.figure(510)
        plt.savefig(figname10)

    if event.method == 'rfgc':
        plt.figure(511)
        plt.savefig(figname11)

    # Saving
    me.saveevent(event, event.eventname + "_p5c")

    cwd += "/"
    # Print outputs, end-time, and close log.
    log.writelog("Output files:")
    log.writelog("Data:")
    log.writelog(" " + cwd + event.eventname + "_p5c.dat")
    log.writelog("Log:")
    log.writelog(" " + cwd + logname)
    log.writelog("Figures:")
    log.writelog(" " + cwd + figname1)
    log.writelog(" " + cwd + figname2)
    log.writelog(" " + cwd + figname3)
    log.writelog(" " + cwd + figname4)
    log.writelog(" " + cwd + figname5)
    if event.havepreflash:
        log.writelog(" " + cwd + figname6)
    log.writelog(" " + cwd + figname7)
    log.writelog(" " + cwd + figname8)
    if event.method == 'fgc' or event.method == 'rfgc':
        log.writelog(" " + cwd + figname9)
        log.writelog(" " + cwd + figname10)
    if event.method == 'rfgc':
        log.writelog(" " + cwd + figname11)
    log.writeclose('\nEnd Checks: ' + time.ctime())

    os.chdir(owd)

    return event
示例#7
0
def centering(event, pcf, centerdir):
  tini = time.time()

  # Create centering log
  logname = event.logname
  log = le.Logedit(centerdir + "/" + logname, logname)
  log.writelog("\nStart " + centerdir + " centering: " + time.ctime())

  os.chdir(centerdir)

  # copy center.pcf in centerdir
  pcf.make_file("center.pcf")

  # Parse the attributes from the control file to the event:
  attrib = vars(pcf)
  keys = attrib.keys()
  for key in keys:
    setattr(event, key, attrib.get(key).get())

  # Check least asym parameters work:
  if event.method in ['lac', 'lag']:
    if event.ctrim < (event.cradius + event.csize) and event.ctrim is not 0:
      event.ctrim = event.cradius + event.csize + 1
      log.writelog('Trim radius is too small, changed to: %i'%event.ctrim)
    if event.psfctrim < (event.psfcrad + event.psfcsize) and event.psfctrim is not 0:
      event.psfctrim = event.psfcrad + event.psfcsize + 1
      log.writelog('PSF Trim radius is too small, changed to: %i'
                   %event.psfctrim)

  # Centering bad pixel mask:
  centermask = np.ones((event.ny, event.nx))
  if event.ymask is not None:
    ymask = np.asarray(event.ymask, int)
    xmask = np.asarray(event.xmask, int)
    for i in np.arange(len(ymask)):
      centermask[ymask[i], xmask[i]] = 0

  # PSF:
  # Re-evaluate if a PSF has been redefined:
  if event.newpsf is not None:
    event.ispsf = os.path.isfile(event.newpsf)
    if event.ispsf:
      event.psffile = event.newpsf
      log.writelog('The PSF file has been redefined!')
      log.writelog("PSF:     " + event.psffile)

  # PSF Centering:
  if event.ispsf:
    event.psfim = pf.getdata(event.psffile)
    # Guess of the center of the PSF (center of psfim)
    psfctrguess = np.asarray(np.shape(event.psfim))/2
    # Do not find center of PSF:
    if event.nopsfctr:
      event.psfctr = psfctrguess
    # Find center of PSF:
    else:
      '''
      if event.method == "bpf" or event.method == "ipf":
        method = "fgc"
      else:
        method = event.method
      event.psfctr, extra = cd.centerdriver(method, event.psfim, psfctrguess,
                                 event.psfctrim, event.psfcrad, event.psfcsize)
      '''
      # Always use 'fgc' on PSF, for testing
      event.psfctr, extra = cd.centerdriver("fgc", event.psfim, psfctrguess,
                                 event.psfctrim, event.psfcrad, event.psfcsize) #FINDME
    log.writelog('PSF center found.')
    print(event.psfctr) #FINDME
  else:
    event.psfim  = None
    event.psfctr = None
    log.writelog('No PSF supplied.')

  # Find center of the mean Image:
  event.targpos = np.zeros((2, event.npos))
  for pos in np.arange(event.npos):
    meanim = event.meanim[:,:,pos]
    guess  = event.srcest[:, pos]
    targpos, extra = cd.centerdriver(event.method, meanim,
                                     guess, event.ctrim,
                                     event.cradius, event.csize,
                                     fitbg=event.fitbg, psf=event.psfim,
                                     psfctr=event.psfctr, expand=event.expand)
    event.targpos[:,pos] = targpos
  log.writelog("Center position(s) of the mean Image(s):\n" +
               str(np.transpose(event.targpos)))
  
  # Inclusion ::::::::
  # Multy Process set up:
  # Shared memory arrays allow only 1D Arrays :(
  event.maxnimpos = int(event.maxnimpos)
  event.npos = int(event.npos)
  x       = Array("d", np.zeros(event.npos * event.maxnimpos))
  y       = Array("d", np.zeros(event.npos * event.maxnimpos))
  sx      = Array("d", np.zeros(event.npos * event.maxnimpos))
  sy      = Array("d", np.zeros(event.npos * event.maxnimpos))
  flux    = Array("d", np.zeros(event.npos * event.maxnimpos))
  sky     = Array("d", np.zeros(event.npos * event.maxnimpos))
  goodfit = Array("d", np.zeros(event.npos * event.maxnimpos))

  # Size of chunk of data each core will process:
  chunksize = event.maxnimpos/event.ccores + 1
  print("Number of cores: " + str(event.ccores))

  # Start Muti Procecess: ::::::::::::::::::::::::::::::::::::::
  processes = []
  for nc in np.arange(event.ccores):
    start =  nc    * chunksize # Starting index to process
    end   = (nc+1) * chunksize # Ending   index to process
    proc = Process(target=do_center, args=(start, end, event, centermask, log,
                                      x, y, sx, sy, flux, sky, goodfit))
    processes.append(proc)
    proc.start()
  # Make sure all processes finish their work:
  for nc in np.arange(event.ccores):
    processes[nc].join()

  # Put the results in the event. I need to reshape them:
  event.fp.x        = np.asarray(x      ).reshape(event.npos,event.maxnimpos)
  event.fp.y        = np.asarray(y      ).reshape(event.npos,event.maxnimpos)
  event.fp.sx       = np.asarray(sx     ).reshape(event.npos,event.maxnimpos)
  event.fp.sy       = np.asarray(sy     ).reshape(event.npos,event.maxnimpos)
  # If PSF fit:
  if event.method in ["ipf", "bpf"]:
    event.fp.flux    = np.asarray(flux   ).reshape(event.npos,event.maxnimpos)
    event.fp.psfsky  = np.asarray(sky    ).reshape(event.npos,event.maxnimpos)
    event.fp.goodfit = np.asarray(goodfit).reshape(event.npos,event.maxnimpos)
  # ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

  # Pixel R position:
  event.fp.r = np.sqrt((event.fp.x % 1.0 - 0.5)**2.0 +
                       (event.fp.y % 1.0 - 0.5)**2.0 )

  log.writelog("End frames centering.")

  # Save
  print("\nSaving")
  if event.denoised:
    me.saveevent(event, event.eventname + "_ctr", save=['dendata', 'data',
                                                        'uncd', 'mask'])
  else:
    me.saveevent(event, event.eventname + "_ctr", save=['data', 'uncd', 'mask'])

  # Print time elapsed and close log:
  cwd = os.getcwd() + "/"
  log.writelog("Output files (" + event.centerdir + "):")
  log.writelog("Data:")
  log.writelog(" " + cwd + event.eventname + "_ctr.dat")
  log.writelog(" " + cwd + event.eventname + "_ctr.h5")
  log.writelog("Log:")
  log.writelog(" " + cwd + event.logname)

  dt = t.hms_time(time.time()-tini)
  log.writeclose("\nEnd Centering. Time (h:m:s):  %s"%dt  +
                 "  (" + event.centerdir + ")")
  print("-------------  ------------\n")

  if hasattr(event, 'runp4') and event.runp4 == True:
    os.chdir(event.eventdir)
    os.system("poet.py p4 %s/"%event.centerdir)
示例#8
0
def badpix(eventname, cwd):
    """
  Modification History:
  ---------------------
  2010-??-??  patricio  Initial Python implementation
  2014-08-13  garland   switched the pyfits package to astropy.io.fits
  	              [email protected] 
  2017-06-20  zacchaeus Fixed None comparisons
                        [email protected]
  """

    owd = os.getcwd()
    os.chdir(cwd)
    tini = time.time()

    # Load the event
    event = me.loadevent(eventname)
    # Load the data
    me.updateevent(event, eventname, event.loadnext)

    # Create a new log starting from the old one.
    oldlogname = event.logname
    logname = event.eventname + ".log"
    log = le.Logedit(logname, oldlogname)
    event.logname = logname
    log.writelog('\nMARK: ' + time.ctime() + ': Starting p2badpix.')

    # ccampo 3/18/2011: do this in p5
    # Julian observation date
    #event.fp.juldat = event.jdjf80 + event.fp.time / 86400.0

    # ::::::::::::::::::::::: UNCERTAINTIES ::::::::::::::::::::::::::::::::
    # IRAC subarray data come with bogus uncertainties that are not linearly
    # related to photon noise.  We scale them later, using the reduced chi
    # squared from the model fit.

    # ::::::::::::::::::::::: FLUX CONVERSION :::::::::::::::::::::::::::::
    # Do we want flux (uJy/pix) or surface brightness (MJy/sr) units?  If
    # doing photometry, convert to flux.  Since we care about relative
    # numbers, it doesn't really matter.

    # Convert from surface brightness (MJy/sr) to flux units (uJy/pix)
    if event.fluxunits:
        log.writelog('Converting surface brightness to flux')
        event.data, event.uncd = btf.poet_bright2flux(event.data, event.uncd,
                                                      event.posscl)
        if event.havepreflash:
            event.predata, event.preuncd = btf.poet_bright2flux(
                event.predata, event.preuncd, event.posscl)
        if event.havepostcal:
            event.postdata, event.postuncd = btf.poet_bright2flux(
                event.postdata, event.postuncd, event.posscl)

    else:
        log.writelog('Did not convert bright to flux.')

    # Mean Background Estimate, from zodi model
    event.estbg = (np.mean(event.fp.zodi[np.where(event.fp.exist)]) +
                   np.mean(event.fp.ism[np.where(event.fp.exist)]) +
                   np.mean(event.fp.cib[np.where(event.fp.exist)]))

    if event.fluxunits:
        event.estbg *= (event.srperas * 1e12 * np.mean(event.posscl[0, :]) *
                        np.mean(event.posscl[1, :]))

    # Bad Pixel Masking
    log.writelog('Find and fix bad pixels')

    # Get permanent bad pixel mask.
    if not event.ispmask[0]:
        log.writelog('\nPermanent Bad pixel mask not found!')
    else:
        hdu = fits.open(event.pmaskfile[0])
        if hdu[0].header['bitpix'] == -32:  # if data type is float
            hdu[0].scale(type='int16')  # cast it down to int16
        event.pmask = hdu[0].data

    # IRS FIX:
    # IRS data contains the blue peak subarray while its pmask contains
    # the whole array (Hard coding)
    if event.photchan == 5:
        event.pmask = event.pmask[3:59, 86:127]

    # Do NOT define sigma, we have a different scheme for finding baddies
    # adds Spitzer rejects: fp.nsstrej  &  our rejects: fp.nsigrej
    event.mask = pbm.poet_badmask(event.data,
                                  event.uncd,
                                  event.pmask,
                                  event.inst.pcrit,
                                  event.bdmskd,
                                  event.inst.dcrit,
                                  event.fp,
                                  nimpos=event.nimpos)

    # User rejected pixels:
    if event.userrej is not None:
        for i in range(np.shape(event.userrej)[0]):
            event.mask[:, event.userrej[i, 0], event.userrej[i, 1], :] = 0
        event.fp.userrej = np.sum(np.sum(1 - event.mask, axis=1), axis=1)
        event.fp.userrej = np.transpose(event.fp.userrej) - event.fp.nsstrej
    else:
        event.fp.userrej = np.zeros((event.npos, event.maxnimpos))

    # define sigma here.
    # adds median sky: fp.medsky
    event.meanim = pcb.poet_chunkbad(event.data, event.uncd, event.mask,
                                     event.nimpos, event.sigma, event.szchunk,
                                     event.fp, event.nscyc)

    log.writelog('Masks combined')

    # Repeat procedure for preflash and postcal data:
    if event.havepreflash:
        event.premask = pbm.poet_badmask(event.predata,
                                         event.preuncd,
                                         event.pmask,
                                         event.inst.pcrit,
                                         event.prebdmskd,
                                         event.inst.dcrit,
                                         event.prefp,
                                         nimpos=event.prenimpos)
        if event.userrej is not None:
            for i in range(np.shape(event.userrej)[0]):
                event.premask[:, event.userrej[i, 0], event.userrej[i,
                                                                    1], :] = 0
            event.prefp.userrej = np.sum(np.sum(1 - event.premask, axis=1),
                                         axis=1)
            event.prefp.userrej = np.transpose(
                event.prefp.userrej) - event.prefp.nsstrej
        else:
            event.prefp.userrej = np.zeros((event.npos, event.premaxnimpos))

        event.premeanim = pcb.poet_chunkbad(event.predata, event.preuncd,
                                            event.premask, event.prenimpos,
                                            event.sigma, event.szchunk,
                                            event.prefp, event.nscyc)
    if event.havepostcal:
        event.postmask = pbm.poet_badmask(event.postdata,
                                          event.postuncd,
                                          event.pmask,
                                          event.inst.pcrit,
                                          event.postbdmskd,
                                          event.inst.dcrit,
                                          event.postfp,
                                          nimpos=event.postnimpos)

        if event.userrej is not None:
            for i in range(np.shape(event.userrej)[0]):
                event.postmask[:, event.userrej[i, 0], event.userrej[i,
                                                                     1], :] = 0
            event.postfp.userrej = np.sum(np.sum(1 - event.postmask, axis=1),
                                          axis=1)
            event.postfp.userrej = np.transpose(event.postfp.userrej) - \
                                   event.postfp.nsstrej
        else:
            event.postfp.userrej = np.zeros((event.npos, event.postmaxnimpos))

        event.postmeanim = pcb.poet_chunkbad(event.postdata, event.postuncd,
                                             event.postmask, event.postnimpos,
                                             event.sigma, event.szchunk,
                                             event.postfp, event.nscyc)
        for pos in range(event.npos):
            fits.writeto(event.eventname + "_medpostcal.fits",
                         event.postmeanim[:, :, pos],
                         clobber=True)

        # Delete post calibration data:
        event.havepostcal = False
        del (event.postdata)
        del (event.postmask)
        del (event.postuncd)
        del (event.postbdmskd)

    # Save the data
    if event.instrument == 'mips':
        todel = ['bdmskd', 'brmskd']  # what to delete
    else:
        todel = ['bdmskd']

    me.saveevent(event,
                 event.eventname + "_bpm",
                 save=['data', 'uncd', 'mask'],
                 delete=todel)

    # Print time elapsed and close log:
    log.writelog("Output files:")
    log.writelog("Data:")
    log.writelog(" " + cwd + '/' + event.eventname + "_bpm.dat")
    log.writelog(" " + cwd + '/' + event.eventname + "_bpm.h5")
    log.writelog("Log:")
    log.writelog(" " + cwd + '/' + logname)

    dt = t.hms_time(time.time() - tini)
    log.writeclose('\nBad pixel masking time (h:m:s):  %s ' % dt)

    os.chdir(owd)

    if event.runp3:
        #poet.p(3)
        os.system("python3 poet.py p3")
示例#9
0
def badpix(eventname, control=None):
    tini = time.time()

    # Load the event
    event = me.loadevent(eventname)
    # Load the data
    me.updateevent(event, eventname, event.loadnext)

    # Create a new log starting from the old one.
    oldlogname = event.logname
    logname = event.eventname + ".log"
    log = le.Logedit(logname, oldlogname)
    event.logname = logname
    log.writelog('\nMARK: ' + time.ctime() + ': Starting poet_2badpix.')

    # ccampo 3/18/2011: do this in p5
    # Julian observation date
    #event.fp.juldat = event.jdjf80 + event.fp.time / 86400.0

    # ::::::::::::::::::::::: UNCERTAINTIES ::::::::::::::::::::::::::::::::
    # IRAC subarray data come with bogus uncertainties that are not linearly
    # related to photon noise.  We scale them later, using the reduced chi
    # squared from the model fit.

    # ::::::::::::::::::::::: FLUX CONVERSION :::::::::::::::::::::::::::::
    # Do we want flux (uJy/pix) or surface brightness (MJy/sr) units?  If
    # doing photometry, convert to flux.  Since we care about relative
    # numbers, it doesn't really matter.

    # Convert from surface brightness (MJy/sr) to flux units (uJy/pix)
    if event.fluxunits:
        log.writelog('Converting surface brightness to flux')
        event.data, event.uncd = btf.poet_bright2flux(event.data, event.uncd,
                                                      event.posscl)
        if event.havecalaor:
            event.predata, event.preuncd = btf.poet_bright2flux(
                event.predata, event.preuncd, event.posscl)
            event.postdata, event.postuncd = btf.poet_bright2flux(
                event.postdata, event.postuncd, event.posscl)
    else:
        log.writelog('Did not convert bright to flux.')

    # Mean Background Estimate, from zodi model
    event.estbg = (np.mean(event.fp.zodi[np.where(event.fp.exist)]) +
                   np.mean(event.fp.ism[np.where(event.fp.exist)]) +
                   np.mean(event.fp.cib[np.where(event.fp.exist)]))

    if event.fluxunits:
        event.estbg *= (event.srperas * 1e12 * np.mean(event.posscl[0, :]) *
                        np.mean(event.posscl[1, :]))

    # Bad Pixel Masking
    log.writelog('Find and fix bad pixels')

    # Get permanent bad pixel mask.
    if not event.ispmask[0]:
        log.writelog('\nPermanent Bad pixel mask not found!')
    else:
        hdu = pf.open(str(event.pmaskfile[0].decode('utf-8')))
        if hdu[0].header['bitpix'] == -32:  # if data type is float
            hdu[0].scale(type='int16')  # cast it down to int16
        event.pmask = hdu[0].data

    # IRS FIX:
    # IRS data contains the blue peak subarray while its pmask contains
    # the whole array (Hard coding)
    if event.photchan == 5:
        event.pmask = event.pmask[3:59, 86:127]

    # Do NOT define sigma, we have a different scheme for finding baddies
    # adds Spitzer rejects: fp.nsstrej  &  our rejects: fp.nsigrej
    event.mask = pbm.poet_badmask(event.data,
                                  event.uncd,
                                  event.pmask,
                                  event.inst.pcrit,
                                  event.bdmskd,
                                  event.inst.dcrit,
                                  event.fp,
                                  nimpos=event.nimpos)

    # User rejected pixels:
    if event.userrej != None:
        for i in np.arange(np.shape(event.userrej)[0]):
            event.mask[:, event.userrej[i, 0], event.userrej[i, 1], :] = 0
        event.fp.userrej = np.sum(np.sum(1 - event.mask, axis=1), axis=1)
        event.fp.userrej = np.transpose(event.fp.userrej) - event.fp.nsstrej
    else:
        event.fp.userrej = np.zeros((int(event.npos), int(event.maxnimpos)),
                                    dtype=int)

    # define sigma here.
    # adds median sky: fp.medsky
    event.meanim = pcb.poet_chunkbad(event.data, event.uncd, event.mask,
                                     event.nimpos, event.sigma, event.szchunk,
                                     event.fp, event.nscyc)

    log.writelog('Masks combined')

    if event.havecalaor:
        event.premask = pbm.poet_badmask(event.predata,
                                         event.preuncd,
                                         event.pmask,
                                         event.inst.pcrit,
                                         event.prebdmskd,
                                         event.inst.dcrit,
                                         event.prefp,
                                         nimpos=event.calnimpos)

        event.premeanim = pcb.poet_chunkbad(event.predata, event.preuncd,
                                            event.premask, event.calnimpos,
                                            event.sigma, event.szchunk,
                                            event.prefp, event.nscyc)

        event.postmask = pbm.poet_badmask(event.postdata,
                                          event.postuncd,
                                          event.pmask,
                                          event.inst.pcrit,
                                          event.postbdmskd,
                                          event.inst.dcrit,
                                          event.postfp,
                                          nimpos=event.calnimpos)

        event.postmeanim = pcb.poet_chunkbad(event.postdata, event.postuncd,
                                             event.postmask, event.calnimpos,
                                             event.sigma, event.szchunk,
                                             event.postfp, event.nscyc)

    # Save the data

    if event.instrument == 'mips':
        todel = ['bdmskd', 'brmskd']  # what to delete
    else:
        todel = ['bdmskd']

    me.saveevent(event,
                 event.eventname + "_bpm",
                 save=['data', 'uncd', 'mask'],
                 delete=todel)

    # Print time elapsed and close log:
    cwd = os.getcwd() + "/"
    log.writelog("Output files:")
    log.writelog("Data:")
    log.writelog(" " + cwd + event.eventname + "_bpm.dat")
    log.writelog(" " + cwd + event.eventname + "_bpm.h5")
    log.writelog("Log:")
    log.writelog(" " + cwd + logname)

    dt = t.hms_time(time.time() - tini)
    log.writeclose('\nBad pixel masking time (h:m:s):  %s ' % dt)
示例#10
0
def photometry(event, pcf, photdir, mute, owd):

    tini = time.time()

    # Create photometry log
    logname = event.logname
    log = le.Logedit(photdir + "/" + logname, logname)
    log.writelog("\nStart " + photdir + " photometry: " + time.ctime())

    parentdir = os.getcwd() + "/"
    os.chdir(photdir)

    # Parse the attributes from the control file to the event:
    attrib = vars(pcf)
    keys = attrib.keys()
    for key in keys:
        setattr(event, key, attrib.get(key))

    maxnimpos, npos = event.maxnimpos, event.npos
    # allocating frame parameters:
    event.fp.aplev = np.zeros((npos, maxnimpos))
    event.fp.aperr = np.zeros((npos, maxnimpos))
    event.fp.nappix = np.zeros((npos, maxnimpos))
    event.fp.skylev = np.zeros((npos, maxnimpos))
    event.fp.skyerr = np.zeros((npos, maxnimpos))
    event.fp.nskypix = np.zeros((npos, maxnimpos))
    event.fp.nskyideal = np.zeros((npos, maxnimpos))
    event.fp.status = np.zeros((npos, maxnimpos))
    event.fp.good = np.zeros((npos, maxnimpos))

    # For interpolated aperture photometry, we need to "interpolate" the
    # mask, which requires float values. Thus, we convert the mask to
    # floats (this needs to be done before processes are spawned or memory
    # usage balloons).
    if event.mask.dtype != float:
        event.mask = event.mask.astype(float)

    # Aperture photometry:
    if event.phottype == "aper":  # not event.dooptimal or event.from_aper is None:

        # Multy Process set up:
        # Shared memory arrays allow only 1D Arrays :(
        aplev = Array("d", np.zeros(npos * maxnimpos))  # aperture flux
        aperr = Array("d", np.zeros(npos * maxnimpos))  # aperture error
        nappix = Array("d",
                       np.zeros(npos * maxnimpos))  # number of aperture pixels
        skylev = Array("d", np.zeros(npos * maxnimpos))  # sky level
        skyerr = Array("d", np.zeros(npos * maxnimpos))  # sky error
        nskypix = Array("d",
                        np.zeros(npos * maxnimpos))  # number of sky pixels
        nskyideal = Array("d", np.zeros(
            npos * maxnimpos))  # ideal number of sky pixels
        status = Array("d", np.zeros(npos * maxnimpos))  # apphot return status
        good = Array("d", np.zeros(npos * maxnimpos))  # good flag
        # Size of chunk of data each core will process:
        chunksize = maxnimpos // event.ncores + 1

        event.aparr = np.ones(npos * maxnimpos) * event.photap + event.offset

        print("Number of cores: " + str(event.ncores))
        # Start Muti Procecess:
        processes = []
        for nc in range(event.ncores):
            start = nc * chunksize  # Starting index to process
            end = (nc + 1) * chunksize  # Ending   index to process
            proc = Process(target=do_aphot,
                           args=(start, end, event, log, mute, aplev, aperr,
                                 nappix, skylev, skyerr, nskypix, nskyideal,
                                 status, good, 0))
            processes.append(proc)
            proc.start()

        # Make sure all processes finish their work:
        for nc in range(event.ncores):
            processes[nc].join()

        # Put the results in the event. I need to reshape them:
        event.fp.aplev = np.asarray(aplev).reshape(npos, maxnimpos)
        event.fp.aperr = np.asarray(aperr).reshape(npos, maxnimpos)
        event.fp.nappix = np.asarray(nappix).reshape(npos, maxnimpos)
        event.fp.skylev = np.asarray(skylev).reshape(npos, maxnimpos)
        event.fp.skyerr = np.asarray(skyerr).reshape(npos, maxnimpos)
        event.fp.nskypix = np.asarray(nskypix).reshape(npos, maxnimpos)
        event.fp.nskyideal = np.asarray(nskyideal).reshape(npos, maxnimpos)
        event.fp.status = np.asarray(status).reshape(npos, maxnimpos)
        event.fp.good = np.asarray(good).reshape(npos, maxnimpos)

        # raw photometry (no sky subtraction):
        event.fp.apraw = (event.fp.aplev + (event.fp.skylev * event.fp.nappix))

        # Print results into the log if it wasn't done before:
        for pos in range(npos):
            for i in range(event.nimpos[pos]):
                log.writelog(
                    '\nframe =%7d       ' % i + 'pos   =%5d       ' % pos +
                    'y =%7.3f       ' % event.fp.y[pos, i] +
                    'x =%7.3f' % event.fp.x[pos, i] + '\n' +
                    'aplev =%11.3f   ' % event.fp.aplev[pos, i] +
                    'aperr =%9.3f   ' % event.fp.aperr[pos, i] +
                    'nappix =%6.2f' % event.fp.nappix[pos, i] + '\n' +
                    'skylev=%11.3f   ' % event.fp.skylev[pos, i] +
                    'skyerr=%9.3f   ' % event.fp.skyerr[pos, i] +
                    'nskypix=%6.2f   ' % event.fp.nskypix[pos, i] +
                    'nskyideal=%6.2f' % event.fp.nskyideal[pos, i] + '\n' +
                    'status=%7d       ' % event.fp.status[pos, i] +
                    'good  =%5d' % event.fp.good[pos, i],
                    mute=True)

    elif event.phottype == "var":  # variable aperture radius

        # Multy Process set up:
        # Shared memory arrays allow only 1D Arrays :(
        aplev = Array("d", np.zeros(npos * maxnimpos))  # aperture flux
        aperr = Array("d", np.zeros(npos * maxnimpos))  # aperture error
        nappix = Array("d",
                       np.zeros(npos * maxnimpos))  # number of aperture pixels
        skylev = Array("d", np.zeros(npos * maxnimpos))  # sky level
        skyerr = Array("d", np.zeros(npos * maxnimpos))  # sky error
        nskypix = Array("d",
                        np.zeros(npos * maxnimpos))  # number of sky pixels
        nskyideal = Array("d", np.zeros(
            npos * maxnimpos))  # ideal number of sky pixels
        status = Array("d", np.zeros(npos * maxnimpos))  # apphot return status
        good = Array("d", np.zeros(npos * maxnimpos))  # good flag
        # Size of chunk of data each core will process:
        chunksize = maxnimpos // event.ncores + 1

        event.aparr = event.fp.noisepix[0]**.5 * event.photap + event.offset

        print("Number of cores: " + str(event.ncores))
        # Start Muti Procecess:
        processes = []
        for nc in range(event.ncores):
            start = nc * chunksize  # Starting index to process
            end = (nc + 1) * chunksize  # Ending   index to process
            proc = Process(target=do_aphot,
                           args=(start, end, event, log, mute, aplev, aperr,
                                 nappix, skylev, skyerr, nskypix, nskyideal,
                                 status, good, 0))
            processes.append(proc)
            proc.start()

        # Make sure all processes finish their work:
        for nc in range(event.ncores):
            processes[nc].join()

        # Put the results in the event. I need to reshape them:
        event.fp.aplev = np.asarray(aplev).reshape(npos, maxnimpos)
        event.fp.aperr = np.asarray(aperr).reshape(npos, maxnimpos)
        event.fp.nappix = np.asarray(nappix).reshape(npos, maxnimpos)
        event.fp.skylev = np.asarray(skylev).reshape(npos, maxnimpos)
        event.fp.skyerr = np.asarray(skyerr).reshape(npos, maxnimpos)
        event.fp.nskypix = np.asarray(nskypix).reshape(npos, maxnimpos)
        event.fp.nskyideal = np.asarray(nskyideal).reshape(npos, maxnimpos)
        event.fp.status = np.asarray(status).reshape(npos, maxnimpos)
        event.fp.good = np.asarray(good).reshape(npos, maxnimpos)

        # raw photometry (no sky subtraction):
        event.fp.apraw = (event.fp.aplev + (event.fp.skylev * event.fp.nappix))

        # Print results into the log if it wasn't done before:
        for pos in range(npos):
            for i in range(event.nimpos[pos]):
                log.writelog(
                    '\nframe =%7d       ' % i + 'pos   =%5d       ' % pos +
                    'y =%7.3f       ' % event.fp.y[pos, i] +
                    'x =%7.3f' % event.fp.x[pos, i] + '\n' +
                    'aplev =%11.3f   ' % event.fp.aplev[pos, i] +
                    'aperr =%9.3f   ' % event.fp.aperr[pos, i] +
                    'nappix =%6.2f' % event.fp.nappix[pos, i] + '\n' +
                    'skylev=%11.3f   ' % event.fp.skylev[pos, i] +
                    'skyerr=%9.3f   ' % event.fp.skyerr[pos, i] +
                    'nskypix=%6.2f   ' % event.fp.nskypix[pos, i] +
                    'nskyideal=%6.2f' % event.fp.nskyideal[pos, i] + '\n' +
                    'status=%7d       ' % event.fp.status[pos, i] +
                    'good  =%5d' % event.fp.good[pos, i],
                    mute=True)

    elif event.phottype == "ell":  # elliptical
        # Multy Process set up:
        # Shared memory arrays allow only 1D Arrays :(
        aplev = Array("d", np.zeros(npos * maxnimpos))  # aperture flux
        aperr = Array("d", np.zeros(npos * maxnimpos))  # aperture error
        nappix = Array("d",
                       np.zeros(npos * maxnimpos))  # number of aperture pixels
        skylev = Array("d", np.zeros(npos * maxnimpos))  # sky level
        skyerr = Array("d", np.zeros(npos * maxnimpos))  # sky error
        nskypix = Array("d",
                        np.zeros(npos * maxnimpos))  # number of sky pixels
        nskyideal = Array("d", np.zeros(
            npos * maxnimpos))  # ideal number of sky pixels
        status = Array("d", np.zeros(npos * maxnimpos))  # apphot return status
        good = Array("d", np.zeros(npos * maxnimpos))  # good flag
        # Size of chunk of data each core will process:
        chunksize = maxnimpos // event.ncores + 1

        print("Number of cores: " + str(event.ncores))
        # Start Muti Procecess:
        processes = []
        for nc in range(event.ncores):
            start = nc * chunksize  # Starting index to process
            end = (nc + 1) * chunksize  # Ending   index to process
            proc = Process(target=do_aphot,
                           args=(start, end, event, log, mute, aplev, aperr,
                                 nappix, skylev, skyerr, nskypix, nskyideal,
                                 status, good, 0))
            processes.append(proc)
            proc.start()

        # Make sure all processes finish their work:
        for nc in range(event.ncores):
            processes[nc].join()

        # Put the results in the event. I need to reshape them:
        event.fp.aplev = np.asarray(aplev).reshape(npos, maxnimpos)
        event.fp.aperr = np.asarray(aperr).reshape(npos, maxnimpos)
        event.fp.nappix = np.asarray(nappix).reshape(npos, maxnimpos)
        event.fp.skylev = np.asarray(skylev).reshape(npos, maxnimpos)
        event.fp.skyerr = np.asarray(skyerr).reshape(npos, maxnimpos)
        event.fp.nskypix = np.asarray(nskypix).reshape(npos, maxnimpos)
        event.fp.nskyideal = np.asarray(nskyideal).reshape(npos, maxnimpos)
        event.fp.status = np.asarray(status).reshape(npos, maxnimpos)
        event.fp.good = np.asarray(good).reshape(npos, maxnimpos)

        # raw photometry (no sky subtraction):
        event.fp.apraw = (event.fp.aplev + (event.fp.skylev * event.fp.nappix))

        # Print results into the log if it wasn't done before:
        for pos in range(npos):
            for i in range(event.nimpos[pos]):
                log.writelog(
                    '\nframe =%7d       ' % i + 'pos   =%5d       ' % pos +
                    'y =%7.3f       ' % event.fp.y[pos, i] +
                    'x =%7.3f' % event.fp.x[pos, i] + '\n' +
                    'aplev =%11.3f   ' % event.fp.aplev[pos, i] +
                    'aperr =%9.3f   ' % event.fp.aperr[pos, i] +
                    'nappix =%6.2f' % event.fp.nappix[pos, i] + '\n' +
                    'skylev=%11.3f   ' % event.fp.skylev[pos, i] +
                    'skyerr=%9.3f   ' % event.fp.skyerr[pos, i] +
                    'nskypix=%6.2f   ' % event.fp.nskypix[pos, i] +
                    'nskyideal=%6.2f' % event.fp.nskyideal[pos, i] + '\n' +
                    'status=%7d       ' % event.fp.status[pos, i] +
                    'good  =%5d' % event.fp.good[pos, i],
                    mute=True)

    elif event.phottype == "psffit":
        event.fp.aplev = event.fp.flux
        event.fp.skylev = event.fp.psfsky
        event.fp.good = np.zeros((event.npos, event.maxnimpos))
        for pos in range(event.npos):
            event.fp.good[pos, 0:event.nimpos[pos]] = 1

    elif event.phottype == "optimal":
        # utils for profile construction:
        pshape = np.array([2 * event.otrim + 1, 2 * event.otrim + 1])
        subpsf = np.zeros(np.asarray(pshape, int) * event.expand)
        x = np.indices(pshape)

        clock = t.Timer(np.sum(event.nimpos),
                        progress=np.array([0.05, 0.1, 0.25, 0.5, 0.75, 1.1]))

        for pos in range(npos):
            for i in range(event.nimpos[pos]):

                # Integer part of center of subimage:
                cen = np.rint([event.fp.y[pos, i], event.fp.x[pos, i]])
                # Center in the trimed image:
                loc = (event.otrim, event.otrim)
                # Do the trim:
                img, msk, err = ie.trimimage(event.data[i, :, :, pos],
                                             *cen,
                                             *loc,
                                             mask=event.mask[i, :, :, pos],
                                             uncd=event.uncd[i, :, :, pos])

                # Center of star in the subimage:
                ctr = (event.fp.y[pos, i] - cen[0] + event.otrim,
                       event.fp.x[pos, i] - cen[1] + event.otrim)

                # Make profile:
                # Index of the position in the supersampled PSF:
                pix = pf.pos2index(ctr, event.expand)
                profile, pctr = pf.make_psf_binning(event.psfim, pshape,
                                                    event.expand,
                                                    [pix[0], pix[1], 1.0, 0.0],
                                                    event.psfctr, subpsf)

                #subtract the sky level:
                img -= event.fp.psfsky[pos, i]
                # optimal photometry calculation:
                immean, uncert, good = op.optphot(img,
                                                  profile,
                                                  var=err**2.0,
                                                  mask=msk)

                event.fp.aplev[pos, i] = immean
                event.fp.aperr[pos, i] = uncert
                event.fp.skylev[pos, i] = event.fp.psfsky[pos, i]
                event.fp.good[pos, i] = good

                # Report progress:
                clock.check(np.sum(event.nimpos[0:pos]) + i,
                            name=event.centerdir)

    # START PREFLASH EDIT :::::::::::::::::::::::::::::::::::::

    # Do aperture on preflash data:
    if event.havepreflash:
        print("\nStart preflash photometry:")
        premaxnimpos = event.premaxnimpos
        preaplev = Array("d", np.zeros(npos * premaxnimpos))
        preaperr = Array("d", np.zeros(npos * premaxnimpos))
        prenappix = Array("d", np.zeros(npos * premaxnimpos))
        preskylev = Array("d", np.zeros(npos * premaxnimpos))
        preskyerr = Array("d", np.zeros(npos * premaxnimpos))
        preskynpix = Array("d", np.zeros(npos * premaxnimpos))
        preskyideal = Array("d", np.zeros(npos * premaxnimpos))
        prestatus = Array("d", np.zeros(npos * premaxnimpos))
        pregood = Array("d", np.zeros(npos * premaxnimpos))

        # Start Procecess:
        mute = False
        proc = Process(target=do_aphot,
                       args=(0, event.prenimpos[0], event, log, mute, preaplev,
                             preaperr, prenappix, preskylev, preskyerr,
                             preskynpix, preskyideal, prestatus, pregood, 1))
        proc.start()
        proc.join()

        # Put the results in the event. I need to reshape them:
        event.prefp.aplev = np.asarray(preaplev).reshape(npos, premaxnimpos)
        event.prefp.aperr = np.asarray(preaperr).reshape(npos, premaxnimpos)
        event.prefp.nappix = np.asarray(prenappix).reshape(npos, premaxnimpos)
        event.prefp.status = np.asarray(prestatus).reshape(npos, premaxnimpos)
        event.prefp.skylev = np.asarray(preskylev).reshape(npos, premaxnimpos)
        event.prefp.good = np.asarray(pregood).reshape(npos, premaxnimpos)

        # raw photometry (no sky subtraction):
        event.prefp.aplev = (event.prefp.aplev +
                             (event.prefp.skylev * event.prefp.nappix))
        # END PREFLASH EDIT :::::::::::::::::::::::::::::::::::::::

    if event.method in ["bpf"]:
        event.ispsf = False

    # PSF aperture correction:
    if event.ispsf and event.phottype == "aper":
        log.writelog('Calculating PSF aperture:')
        event.psfim = event.psfim.astype(np.float64)

        imerr = np.ones(np.shape(event.psfim))
        imask = np.ones(np.shape(event.psfim))
        skyfrac = 0.1

        event.aperfrac, ape, event.psfnappix, event.psfskylev, sle, \
             event.psfnskypix, event.psfnskyideal, event.psfstatus  \
                       = ap.apphot_c(event.psfim, imerr, imask,
                                     event.psfctr[0], event.psfctr[1],
                                     event.photap * event.psfexpand,
                                     event.skyin  * event.psfexpand,
                                     event.skyout * event.psfexpand,
                                     skyfrac, event.apscale, event.skymed)

        event.aperfrac += event.psfskylev * event.psfnappix

        event.fp.aplev /= event.aperfrac
        event.fp.aperr /= event.aperfrac

        log.writelog('Aperture contains %f of PSF.' % event.aperfrac)

    if event.ispsf and event.phottype == "var":
        log.writelog('Calculating PSF aperture:')
        event.psfim = event.psfim.astype(np.float64)

        imerr = np.ones(np.shape(event.psfim))
        imask = np.ones(np.shape(event.psfim))
        skyfrac = 0.1

        avgap = np.mean(event.aparr)

        event.aperfrac, ape, event.psfnappix, event.psfskylev, sle, \
             event.psfnskypix, event.psfnskyideal, event.psfstatus  \
                       = ap.apphot_c(event.psfim, imerr, imask,
                                     event.psfctr[0], event.psfctr[1],
                                     avgap        * event.psfexpand,
                                     event.skyin  * event.psfexpand,
                                     event.skyout * event.psfexpand,
                                     skyfrac, event.apscale, event.skymed)

        event.aperfrac += event.psfskylev * event.psfnappix

        event.fp.aplev /= event.aperfrac
        event.fp.aperr /= event.aperfrac

        log.writelog('Aperture contains %f of PSF.' % event.aperfrac)

    if event.ispsf and event.phottype == "ell":
        log.writelog('Calculating PSF aperture:')
        event.psfim = event.psfim.astype(np.float64)

        imerr = np.ones(np.shape(event.psfim))
        imask = np.ones(np.shape(event.psfim))
        skyfrac = 0.1

        avgxwid = np.mean(event.fp.xsig * event.photap)
        avgywid = np.mean(event.fp.ysig * event.photap)
        avgrot = np.mean(event.fp.rot)

        event.aperfrac, ape, event.psfnappix, event.psfskylev, sle, \
             event.psfnskypix, event.psfnskyideal, event.psfstatus  \
                       = ap.elphot_c(event.psfim, imerr, imask,
                                     event.psfctr[0], event.psfctr[1],
                                     avgxwid * event.psfexpand,
                                     avgywid * event.psfexpand,
                                     avgrot,
                                     event.skyin  * event.psfexpand,
                                     event.skyout * event.psfexpand,
                                     skyfrac, event.apscale, event.skymed)

        event.aperfrac += event.psfskylev * event.psfnappix

        event.fp.aplev /= event.aperfrac
        event.fp.aperr /= event.aperfrac

        log.writelog('Aperture contains %f of PSF.' % event.aperfrac)

    # Sadly we must do photometry for every aperture used
    # Possibly use a range and interpolate? Might be an option
    # for the future to speed this up.
    # This is commented out, as it seems to just remove the corrections
    # made by variable or elliptical photometry
    # if event.ispsf and (event.phottype == "var" or event.phottype == "ell"):
    #   log.writelog('Calculating PSF aperture. This may take some time.')
    #   event.psfim = event.psfim.astype(np.float64)

    #   imerr = np.ones(np.shape(event.psfim))
    #   imask = np.ones(np.shape(event.psfim))
    #   skyfrac = 0.1

    #   aperfrac     = Array("d", np.zeros(npos*maxnimpos))# psf flux
    #   aperfracerr  = Array("d", np.zeros(npos*maxnimpos))# psf flux error
    #   psfnappix    = Array("d", np.zeros(npos*maxnimpos))# psf aperture pix num
    #   psfsky       = Array("d", np.zeros(npos*maxnimpos))# psf sky level
    #   psfskyerr    = Array("d", np.zeros(npos*maxnimpos))# psf sky error
    #   psfnskypix   = Array("d", np.zeros(npos*maxnimpos))# psf sky pix num
    #   psfnskyideal = Array("d", np.zeros(npos*maxnimpos))# psf ideal sky pix num
    #   psfstatus    = Array("d", np.zeros(npos*maxnimpos))# psf return status
    #   psfgood      = Array("d", np.zeros(npos*maxnimpos))# psf good flag

    #   processes=[]
    #   for nc in range(event.ncores):
    #     start =  nc    * chunksize
    #     end   = (nc+1) * chunksize
    #     proc = Process(target=do_aphot_psf, args=(start, end, event, log, mute,
    #                                               aperfrac, aperfracerr,
    #                                               psfnappix,
    #                                               psfsky, psfskyerr,
    #                                               psfnskypix, psfnskyideal,
    #                                               psfstatus, psfgood))

    #     processes.append(proc)
    #     proc.start()

    #   for nc in range(event.ncores):
    #     processes[nc].join()

    #   # Reshape
    #   event.aperfrac     = np.asarray(aperfrac    ).reshape(npos,maxnimpos)
    #   event.aperfracerr  = np.asarray(aperfracerr ).reshape(npos,maxnimpos)
    #   event.psfnappix    = np.asarray(psfnappix   ).reshape(npos,maxnimpos)
    #   event.psfsky       = np.asarray(psfsky      ).reshape(npos,maxnimpos)
    #   event.psfskyerr    = np.asarray(psfskyerr   ).reshape(npos,maxnimpos)
    #   event.psfnskypix   = np.asarray(psfnskypix  ).reshape(npos,maxnimpos)
    #   event.psfnskyideal = np.asarray(psfnskyideal).reshape(npos,maxnimpos)
    #   event.psfstatus    = np.asarray(psfstatus   ).reshape(npos,maxnimpos)
    #   event.psfgood      = np.asarray(psfgood     ).reshape(npos,maxnimpos)

    #   event.aperfrac += event.psfsky * event.psfnappix

    #   event.fp.aplev /= event.aperfrac
    #   event.fp.aperr /= event.aperfrac

    #   log.writelog('Aperture contains average %f of PSF.'%np.mean(event.aperfrac))

    # save
    print("\nSaving ...")
    # denoised data:
    if event.denphot:
        killdata = 'dendata'
    else:
        killdata = 'data'
    me.saveevent(event,
                 event.eventname + "_pht",
                 delete=[killdata, 'uncd', 'mask'])

    # Print time elapsed and close log:
    cwd = os.getcwd() + "/"
    log.writelog("Output files (" + event.photdir + "):")
    log.writelog("Data:")
    log.writelog(" " + cwd + event.eventname + "_pht.dat")
    log.writelog("Log:")
    log.writelog(" " + cwd + logname)

    dt = t.hms_time(time.time() - tini)
    log.writeclose("\nEnd Photometry. Time (h:m:s):  %s " % dt + "  (" +
                   photdir + ")")
    print("--------------  ------------\n")

    os.chdir(owd)

    if event.runp5:
        os.system("python3 poet.py p5 %s/%s" %
                  (event.centerdir, event.photdir))
示例#11
0
def poetSave(event, directory='../'):
    me.saveevent(event, directory + "/" + event.eventname + "_p5c")
示例#12
0
def denoise(pcf, denoisedir):
    tini = time.time()

    # Create denoising log
    logname = event.logname
    log = le.Logedit(denoisedir + "/" + logname, logname)
    log.writelog("\nStart " + denoisedir + " denoising: " + time.ctime())

    os.chdir(denoisedir)

    # copy denoise.pcf in denoisedir
    pcf.make_file("denoise.pcf")

    # Parse the attributes from the control file to the event:
    attrib = vars(pcf)
    keys = attrib.keys()
    for key in keys:
        if key != 'srcest':
            setattr(event, key, attrib.get(key).get())

    for pos in range(event.npos):
        # Plot histogram of noisy wavelet coefficients
        ylim = histwc(event,
                      event.wavelet,
                      event.numlvls + 1,
                      pos,
                      log=log,
                      denoised=False)
        # Plot first 'length' frames of noisy lightcurve at pixel srcest
        plotlc(event, pos, length=200, denoised=False)
        '''
        maxlvls  = pywt.dwt_max_level(event.nimpos[pos], pywt.Wavelet(event.wavelet))
        # Determine the number of levels to denoise
        for i in range(1,maxlvls+1):
            if (2**i)*event.framtime < event.maxtime:
                numlvls = i
            else:
                break
        '''
        log.writelog("Denoising will occur on the lowest " +
                     str(event.numlvls) + " levels at position " + str(pos) +
                     ".")
        # Determine the time resolution of the highled denoised level
        timeres = 2**(event.numlvls) * event.framtime
        log.writelog("Time resolution for position " + str(pos) + ", level " +
                     str(event.numlvls) + " is " + str(timeres) + " seconds.")

        # Assess presence of NaNs and Infs in masked data
        print("Checking for NaNs and Infs.")
        data = (event.data[:, :, :, pos])[np.where(event.mask[:, :, :, pos])]
        if (np.sum(np.isnan(data)) + np.sum(np.isinf(data))) > 0:
            log.writelog(
                "***WARNING: Found NaNs and/or Infs in masked data at position "
                + str(pos) + ".")

        del (data)
        pool = mp.Pool(event.ncpu)
        for i in range(event.nx):
            for j in range(event.ny):
                #res=bayesshrink((event.data[:,j,i,pos])[np.where(event.mask[:,j,i,pos])], event.wavelet, event.numlvls, [j,i,pos])
                #writedata(res)
                exec(
                    'res = pool.apply_async(' + event.threshold +
                    ',((event.data[:,j,i,pos])[np.where(event.mask[:,j,i,pos])], event.wavelet, event.numlvls, [j,i,pos]),callback=writedata)'
                )
                #res = exec('pool.apply_async(' + event.threshold + ',((event.data[:,j,i,pos])[np.where(event.mask[:,j,i,pos])], event.wavelet, event.numlvls, [j,i,pos]),callback=writedata)')
                #res = pool.apply_async(event.threshold,((event.data[:,j,i,pos])[np.where(event.mask[:,j,i,pos])], event.wavelet, event.numlvls, [j,i,pos]),callback=writedata)

        pool.close()
        pool.join()
        #res.wait()

        #Plot histogram of denoised wavelet coefficients
        histwc(event,
               event.wavelet,
               event.numlvls + 1,
               pos,
               log=log,
               denoised=True,
               ylim=ylim)
        # Plot first 'length' frames of denoised lightcurve at pixel srcest
        plotlc(event, pos, length=200, denoised=True)

    # Save
    print("\nFinished Denoising. Saving.")
    me.saveevent(event,
                 event.eventname + "_den",
                 save=['data', 'uncd', 'mask'])

    # Print time elapsed and close log:
    cwd = os.getcwd() + "/"
    log.writelog("Output files (" + event.denoisedir + "):")
    log.writelog("Data:")
    log.writelog(" " + cwd + event.eventname + "_den.dat")
    log.writelog(" " + cwd + event.eventname + "_den.h5")
    log.writelog("Log:")
    log.writelog(" " + cwd + event.logname)

    dt = t.hms_time(time.time() - tini)
    log.writeclose("\nEnd Denoising. Time (h:m:s):  %s" % dt + "  (" +
                   event.denoisedir + ")")
    print("-------------  ------------\n")
    return
示例#13
0
文件: w3lc.py 项目: kevin218/WFC3
def lcWFC3(eventname,
           eventdir,
           nchan,
           wmin=1.125,
           wmax=1.65,
           expand=1,
           isplots=True):
    '''
    Compute photometric flux over specified range of wavelengths
    
    Parameters
    ----------
    eventname   : Unique identifier for these data
    eventdir    : Location of save file
    nchan       : Number of spectrophotometric channels
    wmin        : minimum wavelength
    wmax        : maximum wavelength
    expand      : expansion factor
    isplots     : Set True to produce plots
    
    Returns
    -------
    None
    
    History
    -------
    Written by Kevin Stevenson      June 2012
    
    '''

    # Load saved data
    # An event is an instance of an object
    #   loadevent: load the saved files from storage
    #       container for all of the data
    #           event: small data structures (i.e. light curves)
    #           aux:   large data structures (i.e. image cubes, etc)
    #
    #       i.e. event.BJDTDB : returns the time array
    #            aux.spectra  : 1D spectra per NDR
    #            aux.specerr  : 1D spectra error per NDR
    #            aux.data_mhdr: master header per frame
    #
    print("Loading saved data...")
    ev = me.loadevent(eventdir + '/d-' + eventname + '-w1')
    aux = me.loadevent(eventdir + '/d-' + eventname + '-data')
    ev.spectra = aux.spectra
    specerr = aux.specerr
    data_mhdr = aux.data_mhdr

    # Determine wavelength bins
    binsize = (wmax - wmin) / nchan  # width in bins
    wave_low = np.round([i for i in np.linspace(wmin, wmax - binsize, nchan)],
                        3)  # Left  edge of the wavelength bins
    wave_hi = np.round([i for i in np.linspace(wmin + binsize, wmax, nchan)],
                       3)  # Right edge of the wavelength bins
    # binwave     = (wave_low + wave_hi)/2. # Middle of wavelength bin

    # Increase resolution of spectra: uses np.zoom to oversample the image in a flux conserving interpolation
    if expand > 1:
        # note: ev.n_spec : number of spectra per frame :: hopefully just one
        print("Increasing spectra resolution...")
        # ev.spectra.shape[3] : wavelength (dispersion) dimension
        hdspectra = np.zeros(
            (ev.n_files, ev.n_spec, ev.n_reads - 1, expand *
             ev.spectra.shape[3]))  # hdspectra : high definition spectra
        hdspecerr = np.zeros(
            (ev.n_files, ev.n_spec, ev.n_reads - 1, expand *
             ev.spectra.shape[3]))  # hdspecerr : high definition spectra error
        hdwave = np.zeros((ev.n_img, ev.n_spec, expand * ev.spectra.shape[3]
                           ))  # hdwave    : high definition wavelength array

        # This is the 'zoom' step
        for n in range(
                ev.n_spec):  # per spectrum on the image (n_spec == 1 for WFC3)
            # This operates over all 1D stellar spectrum (over time) at once
            hdspectra[:, n] = spni.zoom(ev.spectra[:, n], zoom=[1, 1, expand])
            hdspecerr[:, n] = spni.zoom(specerr[:, n],
                                        zoom=[1, 1, expand]) * np.sqrt(expand)
        for m in range(ev.n_img):  # n_img : number of direct images
            # Some visits have a new wavelength solution per orbit
            for n in range(ev.n_spec):
                hdwave[m, n] = spni.zoom(ev.wave[m][n], zoom=expand)

        # Store high defition spectra
        ev.spectra = hdspectra
        specerr = hdspecerr
        ev.wave = hdwave

    # Correct for drift, if calculated
    if hasattr(ev, 'drift_model') and ev.drift_model != None:
        # Correct for drift :: map the motion of the spectrum across the detector
        #   provides higher precision on the wavelength solution
        print(
            'Applying drift correction... (Old stare-mode version, may not work)'
        )
        # Staring Mode Operations
        # ev.drift_model is defined in `w1`
        nx = ev.spectra.shape[
            3]  # number of pixels in the wavelength direction # FINDME: CHANGED the SHAPE[2] to a SHAPE[3]
        for m in range(ev.n_files):  # iterate over time
            for n in range(ev.n_spec
                           ):  # iterate over number of spectra (ideally == 1)
                spline = spi.UnivariateSpline(
                    np.arange(nx), ev.spectra[m, n], k=3,
                    s=0)  # Compute the spline for the shift
                ev.spectra[m, n] = spline(
                    np.arange(nx) +
                    ev.drift_model[n, m])  # Shifts the spectrum
        # finished Stare-mode operations
    elif ev.detector == 'IR':
        # This is for Scanning Mode
        #Calculate drift over all frames and non-destructive reads
        print('Applying drift correction...')
        # hst.drift_fit calculates the drift in the 1D spectra :: Does a cross correlation
        #   ev      : class with the data
        #   preclip : left edge of spectrum
        #   preclip : right  edge of spectrum
        ev.drift, ev.drift_model, ev.goodmask = hst.drift_fit(ev,
                                                              preclip=0,
                                                              postclip=None,
                                                              width=5 * expand,
                                                              deg=2,
                                                              validRange=11 *
                                                              expand)

        # Correct for drift
        if ev.n_reads > 2:
            # Throw away the first NDR -- it's bad
            print('WARNING: Marking all first reads as bad.')
            istart = 1
        else:
            print('Using first reads.')
            istart = 0

        # Apply the Drift correction (fancy word for spline)
        nx = ev.spectra.shape[
            3]  # number of pixels in the wavelength direction
        for m in range(ev.n_files):  # iterate over time
            for n in range(ev.n_spec
                           ):  # iterate over number of spectra (ideally == 1)
                for p in range(istart, ev.n_reads - 1):
                    # Compute the spline for the shift
                    spline = spi.UnivariateSpline(np.arange(nx),
                                                  ev.spectra[m, n, p],
                                                  k=3,
                                                  s=0)

                    # Using measured drift, not model fit
                    # `model fit` comes from the ev.drift_model
                    # `measured drift` comes from spline of order 3
                    ev.spectra[m, n, p] = spline(
                        np.arange(nx) +
                        ev.drift[n, m, p])  # Shifts the spectrum

        #Apply scan height correction
        #print('Applying scan height correction...')
        #ev.spectra  += ev.spectra[0,0]*(1-ev.scanHeight[:,:,np.newaxis,np.newaxis]/ev.scanHeight[0,0])
        #ev.spectra /= ev.scanHeight[:,:,np.newaxis,np.newaxis]
        #specerr    /= ev.scanHeight[:,:,np.newaxis,np.newaxis]
    else:
        # UVIS Stuff
        istart = 0

    # Assign scan direction: 0:forward vs 1:reverse
    ev.scandir = np.zeros(
        ev.n_files)  # Sets up all images as forward scan: modify later
    ev.n_scan0 = 0  # Number of forward scans
    ev.n_scan1 = 0  # Number of reverse scans

    try:
        scan0 = data_mhdr[0][
            'POSTARG2']  # POSTARG2 changes for forward and reverse scan 0: first file
        scan1 = data_mhdr[1][
            'POSTARG2']  # POSTARG2 changes for forward and reverse scan 1: first file
        for m in range(ev.n_files):
            # for every file file, check header if POSTARG2 == scan0 or scan1
            if data_mhdr[m]['POSTARG2'] == scan0:
                # Sum up number of forward scan
                ev.n_scan0 += 1
            elif data_mhdr[m]['POSTARG2'] == scan1:
                # Store scandir == 1 for reverse scanning
                ev.scandir[m] = 1
                # Sum up number of reverse scan
                ev.n_scan1 += 1
            else:
                # Something happened
                print('WARNING: Unknown scan direction for file ' + str(m) +
                      '.')

        print("# of files in scan direction 0: " + str(ev.n_scan0))
        print("# of files in scan direction 1: " + str(ev.n_scan1))
    except:
        ev.n_scan0 = ev.n_files
        print("Unable to determine scan direction, assuming unidirectional.")

    print("Generating light curves...")
    ev.eventname2 = ev.eventname  # Store old event name
    for i in range(nchan):
        ev.wave_low = wave_low[i]
        ev.wave_hi = wave_hi[i]
        print("Bandpass = %.3f - %.3f" % (ev.wave_low, ev.wave_hi))

        # Calculate photometric flux for each spectrum
        ev.photflux = np.zeros(
            (ev.n_spec, ev.n_files, np.max(
                (1, ev.n_reads -
                 2))))  # This become the light curve (to be populated)
        ev.photfluxerr = np.zeros(
            (ev.n_spec, ev.n_files, np.max((1, ev.n_reads - 2))
             ))  # This become the light curve errorbars (to be populated)
        # ev.wave         = []
        for n in range(ev.n_spec):  # hopefully == 1
            if ev.detector == 'IR':
                # Compute common wavelength and indices to apply over all observations
                wave = np.zeros(len(
                    ev.wave[0][n]))  # To be the globale wavelength array
                for j in range(ev.n_img):  # iterate over each image
                    wave += ev.wave[j][n]

                wave /= ev.n_img

                # wave  = np.mean(ev.wave, axis=0) # FINDME: TEST LATER

                # index == where wave meets BOTH requirement
                # Which indices in the mean wavelength (`wave`) are between the individal channel boundaries
                index = np.where(
                    np.bitwise_and(wave >= wave_low[i], wave <= wave_hi[i]))[0]
                # index = np.where((wave >= wave_low[i])*(wave <= wave_hi[i]))[0] # FINDME: TEST LATER
            else:
                # UVIS: Use all pixels for aperture photometry
                index = range(len(ev.spectra[0, 0, 0]))

            for m in range(ev.n_files):
                '''
                # This is a different way to compute the indices to associate with columns to be summed into 1D spectra
                # Select appropriate orbit-dependent wavelength
                if ev.n_img == (np.max(ev.orbitnum)+1):
                    j = int(ev.orbitnum[m])
                else:
                    j = 0
                #Method 1
                ev.wave.append(np.mean(ev.wavegrid[j][n],axis=0))
                index = np.where(np.bitwise_and(ev.wave[n] >= wave_low, ev.wave[n] <= wave_hi))[0]
                #Method 2
                index = np.where(np.bitwise_and(ev.wave[j][n] >= wave_low, ev.wave[j][n] <= wave_hi))[0]
                '''

                # This creates a light curve per NDR
                ev.photflux[n, m] = np.sum(
                    ev.spectra[m, n, istart:, index], axis=0
                )  # Summing in the 1D spectral plane (m == 1 for WFC3)
                ev.photfluxerr[n, m] = np.sqrt(
                    np.sum(specerr[m, n, istart:, index]**2, axis=0)
                )  # Summing in quadrature the 1D spectral plane (m == 1 for WFC3)

        # Save results for individual channels into individual files
        ev.eventname = ev.eventname2 + '_' + str(int(
            ev.wave_low * 1e3)) + '_' + str(int(ev.wave_hi * 1e3))
        # me.saveevent(ev, eventdir + '/d-' + ev.eventname + '-w3', delete=['data_mhdr', 'spectra', 'specerr'])

        # saveevent stores everything (that we want) into a pickle
        me.saveevent(ev, eventdir + '/d-' + ev.eventname + '-w3')

        # Produce plot
        if isplots == True:
            # 3XYZ: 3: w3 function
            #       X: Plot Number
            #       Y & Z: Spectral Channel Number
            #
            # Normalized Flux vs Time
            plt.figure(3000 + i, figsize=(10, 8))
            plt.clf()  # this clears the frame
            plt.suptitle('Wavelength range: ' + str(wave_low[i]) + '-' +
                         str(wave_hi[i]))
            ax = plt.subplot(111)
            #for n in range(ev.n_spec):
            #plt.subplot(ev.n_spec,1,1)
            #plt.title('Star ' + str(n))
            #igood   = np.where(ev.goodmask[0])[0]
            iscan0 = np.where(ev.scandir == 0)[0]
            iscan1 = np.where(ev.scandir == 1)[0]
            mjd = np.floor(ev.bjdtdb[0])
            flux0 = np.sum(ev.photflux[0][iscan0], axis=1) / np.sum(
                ev.photflux[0, [iscan0[-1]]])
            #err  = np.sqrt(1 / np.sum(1/ev.photfluxerr[0]**2,axis=1))/np.sum(ev.photflux[0,-1])
            try:
                err0 = np.sqrt(np.sum(ev.photfluxerr[0][iscan0]**2,
                                      axis=1)) / np.sum(
                                          ev.photflux[0, [iscan0[-1]]])
            except:
                err0 = 0
                # err1    = 0
            plt.errorbar(ev.bjdtdb[iscan0] - mjd, flux0, err0, fmt='bo')
            plt.text(
                0.05,
                0.1,
                "MAD = " +
                str(np.round(1e6 * np.median(np.abs(np.ediff1d(flux0))))) +
                " ppm",
                transform=ax.transAxes,
                color='b')
            if len(iscan1) > 0:
                flux1 = np.sum(ev.photflux[0][iscan1], axis=1) / np.sum(
                    ev.photflux[0, [iscan0[-1]]])
                err1 = np.sqrt(np.sum(ev.photfluxerr[0][iscan1]**2,
                                      axis=1)) / np.sum(
                                          ev.photflux[0, [iscan1[-1]]])
                plt.errorbar(ev.bjdtdb[iscan1] - mjd, flux1, err1, fmt='ro')
                plt.text(
                    0.05,
                    0.05,
                    "MAD = " +
                    str(np.round(1e6 * np.median(np.abs(np.ediff1d(flux1))))) +
                    " ppm",
                    transform=ax.transAxes,
                    color='r')
            plt.ylabel('Normalized Flux')
            plt.xlabel('Time [MJD + ' + str(mjd) + ']')

            plt.subplots_adjust(left=0.10,
                                right=0.95,
                                bottom=0.10,
                                top=0.90,
                                hspace=0.20,
                                wspace=0.3)
            plt.savefig(eventdir + '/figs/' + ev.eventname + '-Fig' +
                        str(3000 + i) + '.png')
            #plt.pause(0.1)

            if ev.detector == 'IR':
                # Drift: frame number vs drift in the wavelength direction
                plt.figure(3100 + i, figsize=(10, 8))
                plt.clf()
                for i in range(istart, ev.n_reads - 1):
                    plt.subplot(1, np.max((1, ev.n_reads - 2)), np.max((1, i)))
                    plt.plot(ev.drift[0, :, i], '.')
                    if i == istart:
                        plt.ylabel('Spectrum Drift')
                    if i == (ev.n_reads - 1) / 2:
                        plt.xlabel('Frame Number')
                plt.savefig(eventdir + '/figs/' + ev.eventname + '-Fig' +
                            str(3100) + '.png')

    if (isplots == True) and (ev.detector == 'IR'):
        # 2D light curve with drift correction
        # Plot frame number vs wavelength with color associated with value
        #   Very cool plot that produces "image" of the entire time series (hopefully corrected)
        plt.figure(3200, figsize=(8, ev.n_files / 20. + 0.8))
        plt.clf()
        vmin = 0.97
        vmax = 1.03
        # istart      = 0
        normspec = np.mean(ev.spectra[:, 0, istart:], axis=1) / np.mean(
            ev.spectra[-6:, 0, istart:], axis=(0, 1))
        ediff = np.zeros(ev.n_files)
        iwmin = np.where(ev.wave[0][0] > wmin)[0][0]
        iwmax = np.where(ev.wave[0][0] > wmax)[0][0]
        for i in range(ev.n_files):
            ediff[i] = 1e6 * np.median(
                np.abs(np.ediff1d(normspec[i, iwmin:iwmax])))
            plt.scatter(ev.wave[0][0],
                        np.zeros(ev.specsize) + i,
                        c=normspec[i],
                        s=14,
                        linewidths=0,
                        vmin=vmin,
                        vmax=vmax,
                        marker='s',
                        cmap=plt.cm.RdYlBu_r)
        plt.title("MAD = " + str(np.round(np.mean(ediff), 0)) + " ppm")
        plt.xlim(wmin, wmax)
        if nchan > 1:
            xticks = np.round([i for i in np.linspace(wmin, wmax, nchan + 1)],
                              3)
            plt.xticks(xticks, xticks)
            plt.vlines(xticks, 0, ev.n_files, 'k', 'dashed')
        plt.ylim(0, ev.n_files)
        plt.ylabel('Frame Number')
        plt.xlabel('Wavelength ($\mu m$)')
        plt.colorbar()
        plt.tight_layout()
        plt.savefig(eventdir + '/figs/fig3200-2D_LC.png')
示例#14
0
文件: w4ld.py 项目: kevin218/WFC3
def ld_driver(eventname,
              eventdir,
              wave_low=None,
              wave_hi=None,
              n_param=4,
              isplots=False,
              stellarmodel='phoenix'):
    '''

    '''
    # Load saved data
    print("Loading saved data...")
    ev = me.loadevent(eventdir + '/d-' + eventname + '-w3')
    aux = me.loadevent(eventdir + '/d-' + ev.eventname2 + '-data')
    ev.spectra = aux.spectra
    '''
    #FINDME: HACK
    ev.file_med = ev.loc_ld + 'kelt11_med.txt'
    ev.file_med = ev.loc_ld + 'lte6250-4.38+0.2a+0.0CMg-0.1.BT-dusty-giant-2011.cifist.He.irf.fits'
    ev.file_low = ev.loc_ld + 'lte6100-4.38+0.2a+0.0CMg-0.1.BT-dusty-giant-2011.cifist.He.irf.fits'
    ev.file_hi  = ev.loc_ld + 'lte6400-4.38+0.2a+0.0CMg-0.1.BT-dusty-giant-2011.cifist.He.irf.fits'
    #print(ev.file_med)
    '''
    n = 0
    m = 0
    ilo = np.where(ev.wave[n][m] > wave_low)[0][0]
    ihi = np.where(ev.wave[n][m] < wave_hi)[0][-1] + 1

    #
    print("Computing limb-darkening coefficients...")
    specwave = ev.wave[n][m][ilo:ihi] * 1e4  #Angstroms
    #iwave    = np.argsort(specwave)
    #specwave = specwave[iwave]*1e4  #Angstroms
    wavelow = specwave[0]
    wavehi = specwave[-1]
    spectrum = np.sum(ev.spectra[n, :, ilo:ihi], axis=0)
    if isplots:
        # Optimal
        ev.ldcoeffs = limbDarkening(ev.file_med,
                                    wavelow,
                                    wavehi,
                                    specwave=specwave,
                                    spectrum=spectrum,
                                    n_param=n_param,
                                    n_plot=4000,
                                    stellarmodel=stellarmodel)
        plt.title(str(n_param) + ' parameter model, optimal fit')
        plt.savefig(eventdir + '/figs/' + ev.eventname + '-Fig' + str(4000) +
                    str(n_param) + '.png')
        try:
            # Low
            ev.ldcoeffs_low = limbDarkening(ev.file_low,
                                            wavelow,
                                            wavehi,
                                            specwave=specwave,
                                            spectrum=spectrum,
                                            n_param=n_param,
                                            n_plot=4001,
                                            stellarmodel=stellarmodel)
            plt.title(str(n_param) + ' parameter model, low fit')
            plt.savefig(eventdir + '/figs/' + ev.eventname + '-Fig' +
                        str(4001) + str(n_param) + '.png')
            # Hi
            ev.ldcoeffs_hi = limbDarkening(ev.file_hi,
                                           wavelow,
                                           wavehi,
                                           specwave=specwave,
                                           spectrum=spectrum,
                                           n_param=n_param,
                                           n_plot=4002,
                                           stellarmodel=stellarmodel)
            plt.title(str(n_param) + ' parameter model, hi fit')
            plt.savefig(eventdir + '/figs/' + ev.eventname + '-Fig' +
                        str(4002) + str(n_param) + '.png')
        except:
            pass
    else:
        ev.ldcoeffs = limbDarkening(ev.file_med,
                                    wavelow,
                                    wavehi,
                                    specwave=specwave,
                                    spectrum=spectrum,
                                    n_param=n_param,
                                    n_plot=False,
                                    stellarmodel=stellarmodel)
        try:
            ev.ldcoeffs_low = limbDarkening(ev.file_low,
                                            wavelow,
                                            wavehi,
                                            specwave=specwave,
                                            spectrum=spectrum,
                                            n_param=n_param,
                                            n_plot=False,
                                            stellarmodel=stellarmodel)
            ev.ldcoeffs_hi = limbDarkening(ev.file_hi,
                                           wavelow,
                                           wavehi,
                                           specwave=specwave,
                                           spectrum=spectrum,
                                           n_param=n_param,
                                           n_plot=False,
                                           stellarmodel=stellarmodel)
        except:
            pass
    print(eventname, ev.ldcoeffs)

    # Save results
    print('Saving results...')
    me.saveevent(ev,
                 eventdir + '/d-' + ev.eventname + '-w4',
                 delete=['spectra'])

    return
示例#15
0
def lcWFC3(eventname,
           eventdir,
           nchan,
           madVariable,
           madVarSet,
           wmin=1.125,
           wmax=1.65,
           expand=1,
           smooth_len=None,
           correctDrift=True,
           isplots=True):
    '''
    Compute photometric flux over specified range of wavelengths

    Parameters
    ----------
    eventname   : Unique identifier for these data
    eventdir    : Location of save file
    nchan       : Number of spectrophotometric channels
    wmin        : minimum wavelength
    wmax        : maximum wavelength
    expand      : expansion factor
    isplots     : Set True to produce plots

    Returns
    -------
    None

    History
    -------
    Written by Kevin Stevenson      June 2012

    '''

    # Load saved data
    print("Loading saved data...")
    try:
        ev = me.loadevent(eventdir + '/d-' + eventname + '-w2')
        print('W2 data loaded\n')
    except:
        ev = me.loadevent(eventdir + '/d-' + eventname + '-w0')
        print('W0 data loaded\n')
    aux = me.loadevent(eventdir + '/d-' + eventname + '-data')
    ev.spectra = aux.spectra
    specerr = aux.specerr
    data_mhdr = aux.data_mhdr

    #Replace NaNs with zero
    ev.spectra[np.where(np.isnan(ev.spectra))] = 0

    # Determine wavelength bins
    binsize = (wmax - wmin) / nchan
    wave_low = np.round([i for i in np.linspace(wmin, wmax - binsize, nchan)],
                        3)
    wave_hi = np.round([i for i in np.linspace(wmin + binsize, wmax, nchan)],
                       3)
    #binwave     = (wave_low + wave_hi)/2.

    # Increase resolution of spectra
    nx = ev.spectra.shape[-1]
    if expand > 1:
        print("Increasing spectra resolution...")
        #hdspectra = np.zeros((ev.n_files,ev.n_reads-1,expand*nx))
        #hdspecerr = np.zeros((ev.n_files,ev.n_reads-1,expand*nx))
        hdspectra = spni.zoom(ev.spectra, zoom=[1, 1, expand])
        hdspecerr = spni.zoom(specerr, zoom=[1, 1, expand]) * np.sqrt(expand)
        hdwave = np.zeros((ev.n_img, ev.n_spec, expand * nx))
        for j in range(ev.n_img):
            hdwave[j] = spni.zoom(ev.wave[j][0], zoom=expand)
        ev.spectra = hdspectra
        specerr = hdspecerr
        ev.wave = hdwave
        nx *= expand

    # Smooth spectra
    if smooth_len != None:
        for m in range(ev.n_files):
            for n in range(ev.n_reads - 1):
                ev.spectra[m, n] = smooth.smooth(ev.spectra[m, n], smooth_len,
                                                 'flat')
    """
    # First read is bad for IMA files
    if ev.n_reads > 2:
        print('WARNING: Marking all first reads as bad.')
        istart = 1
    else:
        print('Using first reads.')
        istart = 0
    """
    print('Using first reads.')
    istart = 0

    if correctDrift == True:
        #Shift 1D spectra
        #Calculate drift over all frames and non-destructive reads
        print('Applying drift correction...')
        ev.drift, ev.goodmask = hst.drift_fit2(ev,
                                               preclip=0,
                                               postclip=None,
                                               width=5 * expand,
                                               deg=2,
                                               validRange=11 * expand,
                                               istart=istart,
                                               iref=ev.iref[0])
        # Correct for drift
        for m in range(ev.n_files):
            for n in range(istart, ev.n_reads - 1):
                spline = spi.UnivariateSpline(np.arange(nx),
                                              ev.spectra[m, n],
                                              k=3,
                                              s=0)
                #ev.spectra[m,n,p] = spline(np.arange(nx)+ev.drift_model[n,m,p])
                #if m==13:
                #    ev.drift[n,m,p] -= 0.476
                #Using measured drift, not model fit
                ev.spectra[m, n] = spline(np.arange(nx) + ev.drift[m, n])
    '''
    # Look for bad columns
    igoodcol    = np.ones(nx)
    normspec    = ev.spectra/np.mean(ev.spectra,axis=2)[:,:,np.newaxis]
    sumspec     = np.sum(normspec,axis=1)/(ev.n_reads-istart-1)
    stdsumspec  = np.std(sumspec, axis=0)
    igoodcol[np.where(stdsumspec > 0.007)] = 0  #FINDME: hard coded
    '''

    print("Generating light curves...")
    ev.eventname2 = ev.eventname
    for i in range(nchan):
        ev.wave_low = wave_low[i]
        ev.wave_hi = wave_hi[i]
        print("Bandpass = %.3f - %.3f" % (ev.wave_low, ev.wave_hi))
        # Calculate photometric flux for each spectrum
        ev.photflux = np.zeros(
            (ev.n_spec, ev.n_files, np.max((1, ev.n_reads - 1 - istart))))
        ev.photfluxerr = np.zeros(
            (ev.n_spec, ev.n_files, np.max((1, ev.n_reads - 1 - istart))))
        #ev.wave         = []
        if ev.detector == 'IR':
            #Compute common wavelength and indeces to apply over all observations
            wave = np.zeros(nx)
            for j in range(ev.n_img):
                wave += ev.wave[j][0]
            wave /= ev.n_img
            #index = np.where(np.bitwise_and(wave >= wave_low[i], wave <= wave_hi[i]))[0]
            index = np.where((wave >= wave_low[i]) * (wave <= wave_hi[i]))[0]
            #define numgoodcol, totcol
        else:
            # UVIS: Use all pixels for aperture photometry
            index = range(len(ev.spectra[0, 0, 0]))
        for m in range(ev.n_files):
            '''
            #Select appropriate orbit-dependent wavelength
            if ev.n_img == (np.max(ev.orbitnum)+1):
                j = int(ev.orbitnum[m])
            else:
                j = 0
            #Method 1
            ev.wave.append(np.mean(ev.wavegrid[j][n],axis=0))
            index = np.where(np.bitwise_and(ev.wave[n] >= wave_low, ev.wave[n] <= wave_hi))[0]
            #Method 2
            index = np.where(np.bitwise_and(ev.wave[j][n] >= wave_low, ev.wave[j][n] <= wave_hi))[0]
            '''
            ev.photflux[0, m] = np.sum(ev.spectra[m, istart:, index], axis=0)
            ev.photfluxerr[0, m] = np.sqrt(
                np.sum(specerr[m, istart:, index]**2, axis=0))

        # Save results
        ev.eventname = ev.eventname2 + '_' + str(int(
            ev.wave_low * 1e3)) + '_' + str(int(ev.wave_hi * 1e3))
        #me.saveevent(ev, eventdir + '/d-' + ev.eventname + '-w3', delete=['data_mhdr', 'spectra', 'specerr'])
        me.saveevent(ev, eventdir + '/d-' + ev.eventname + '-w3')

        # Produce plot
        if isplots == True:
            plt.figure(3000 + i, figsize=(10, 8))
            plt.clf()
            plt.suptitle('Wavelength range: ' + str(wave_low[i]) + '-' +
                         str(wave_hi[i]))
            ax = plt.subplot(111)
            #for n in range(ev.n_spec):
            #plt.subplot(ev.n_spec,1,1)
            #plt.title('Star ' + str(n))
            #igood   = np.where(ev.goodmask[0])[0]
            iscan0 = np.where(ev.scandir == 0)[0]
            iscan1 = np.where(ev.scandir == 1)[0]
            mjd = np.floor(ev.bjdtdb[0])
            flux0 = np.sum(ev.photflux[0][iscan0], axis=1) / np.sum(
                ev.photflux[0, [iscan0[-1]]])  # forward scan
            #err  = np.sqrt(1 / np.sum(1/ev.photfluxerr[0]**2,axis=1))/np.sum(ev.photflux[0,-1])
            try:
                err0 = np.sqrt(np.sum(ev.photfluxerr[0][iscan0]**2,
                                      axis=1)) / np.sum(
                                          ev.photflux[0, [iscan0[-1]]])
            except:
                err0 = 0
                #err1    = 0
            plt.errorbar(ev.bjdtdb[iscan0] - mjd, flux0, err0, fmt='bo')
            plt.text(
                0.05,
                0.1,
                "MAD = " +
                str(np.round(1e6 * np.median(np.abs(np.ediff1d(flux0))))) +
                " ppm",
                transform=ax.transAxes,
                color='b')
            #print(len(iscan1))
            flux1 = 0

            if len(iscan1) > 0:
                flux1 = np.sum(ev.photflux[0][iscan1], axis=1) / np.sum(
                    ev.photflux[0, [iscan0[-1]]])  # reverse scan
                err1 = np.sqrt(np.sum(ev.photfluxerr[0][iscan1]**2,
                                      axis=1)) / np.sum(
                                          ev.photflux[0, [iscan0[-1]]])
                plt.errorbar(ev.bjdtdb[iscan1] - mjd, flux1, err1, fmt='ro')
                plt.text(
                    0.05,
                    0.05,
                    "MAD = " +
                    str(np.round(1e6 * np.median(np.abs(np.ediff1d(flux1))))) +
                    " ppm",
                    transform=ax.transAxes,
                    color='r')
            plt.ylabel('Normalized Flux')
            plt.xlabel('Time [MJD + ' + str(mjd) + ']')

            plt.subplots_adjust(left=0.10,
                                right=0.95,
                                bottom=0.10,
                                top=0.90,
                                hspace=0.20,
                                wspace=0.3)
            plt.savefig(eventdir + '/figs/Fig' + str(3000 + i) + '-' +
                        ev.eventname + '.png')
            #plt.pause(0.1)

            # f = open('2017-07-15-w1_spec_width_20/W5_MAD_'+ev.madVarStr+'_1D.txt','a+')
            # fooTemp = getattr(ev,madVariable)
            # print('W5: ' + ev.madVarStr + ' = ' + str(fooTemp))
            # f.write(str(fooTemp) + ',' + str(np.round(1e6*np.median(np.abs(np.ediff1d(flux0))))) + ',' + str(np.round(1e6*np.median(np.abs(np.ediff1d(flux1))))) +'\n')
            # f.close()
            # print('W5_MAD_'+ ev.madVarStr +'_1D.txt saved\n')

    if (isplots >= 1) and (ev.detector == 'IR'):
        # Drift
        plt.figure(3100, figsize=(10, 8))
        plt.clf()
        plt.subplot(211)
        for j in range(istart, ev.n_reads - 1):
            plt.plot(ev.drift2D[:, j, 1], '.')
        plt.ylabel('Spectrum Drift Along y')
        plt.subplot(212)
        for j in range(istart, ev.n_reads - 1):
            plt.plot(ev.drift2D[:, j, 0] + ev.drift[:, j], '.')
        plt.ylabel('Spectrum Drift Along x')
        plt.xlabel('Frame Number')
        plt.savefig(eventdir + '/figs/fig3100-Drift.png')

        # 2D light curve with drift correction
        plt.figure(3200, figsize=(7.85, ev.n_files / 20. + 0.8))
        plt.clf()
        vmin = 0.98
        vmax = 1.01
        #FINDME
        normspec = np.zeros((ev.n_files, ev.spectra.shape[2]))
        for p in range(2):
            iscan = np.where(ev.scandir == p)[0]
            if len(iscan) > 0:
                normspec[iscan] = np.mean(ev.spectra[iscan],axis=1)/ \
                                  np.mean(ev.spectra[iscan[ev.inormspec[0]:ev.inormspec[1]]],axis=(0,1))
                #normspec[iscan] = np.mean(ev.spectra[iscan],axis=1)/np.mean(ev.spectra[ev.iref[p]],axis=0)
        #normspec    = np.mean(ev.spectra[:,istart:],axis=1)/np.mean(ev.spectra[ev.inormspec[0]:ev.inormspec[1],istart:],axis=(0,1))
        ediff = np.zeros(ev.n_files)
        iwmin = np.where(ev.wave[0][0] > wmin)[0][0]
        iwmax = np.where(ev.wave[0][0] > wmax)[0][0]
        for m in range(ev.n_files):
            ediff[m] = 1e6 * np.median(
                np.abs(np.ediff1d(normspec[m, iwmin:iwmax])))
            plt.scatter(ev.wave[0][0],
                        np.zeros(normspec.shape[-1]) + m,
                        c=normspec[m],
                        s=14,
                        linewidths=0,
                        vmin=vmin,
                        vmax=vmax,
                        marker='s',
                        cmap=plt.cm.RdYlBu_r)
        plt.title("MAD = " + str(np.round(np.mean(ediff), 0)) + " ppm")
        plt.xlim(wmin, wmax)
        if nchan > 1:
            xticks = np.round([i for i in np.linspace(wmin, wmax, nchan + 1)],
                              3)
            plt.xticks(xticks, xticks)
            plt.vlines(xticks, 0, ev.n_files, 'k', 'dashed')
        plt.ylim(0, ev.n_files)
        plt.ylabel('Frame Number')
        plt.xlabel('Wavelength ($\mu m$)')
        plt.xticks(rotation=30)
        plt.colorbar()
        plt.tight_layout()
        plt.savefig(eventdir + '/figs/fig3200-' + str(nchan) + '-2D_LC.png')
        #plt.savefig(eventdir+'/figs/fig3200-'+str(nchan)+'-2D_LC_'+madVariable+'_'+str(madVarSet)+'.png')

        #ev.mad5 = np.round(np.mean(ediff),0)
        # f = open('2017-07-15-w1_spec_width_20/W5_MAD_'+ev.madVarStr+'.txt','a+')
        # fooTemp = getattr(ev,madVariable)
        # print('W5: ' + ev.madVarStr + ' = ' + str(fooTemp))
        # f.write(str(fooTemp) + ',' + str(np.round(np.mean(ediff),0)) + '\n')
        # f.close()
        # print('W5_MAD_'+ ev.madVarStr +'.txt saved\n')

    if (isplots >= 3) and (ev.detector == 'IR'):
        # Plot individual non-destructive reads
        vmin = 0.97
        vmax = 1.03
        iwmin = np.where(ev.wave[0][0] > wmin)[0][0]
        iwmax = np.where(ev.wave[0][0] > wmax)[0][0]
        #FINDME
        normspec = ev.spectra[:, istart:] / np.mean(
            ev.spectra[ev.inormspec[0]:ev.inormspec[1], istart:], axis=0)
        for n in range(ev.n_reads - 1):
            plt.figure(3300 + n, figsize=(8, ev.n_files / 20. + 0.8))
            plt.clf()
            ediff = np.zeros(ev.n_files)
            for m in range(ev.n_files):
                ediff[m] = 1e6 * np.median(
                    np.abs(np.ediff1d(normspec[m, n, iwmin:iwmax])))
                plt.scatter(ev.wave[0][0],
                            np.zeros(normspec.shape[-1]) + m,
                            c=normspec[m, n],
                            s=14,
                            linewidths=0,
                            vmin=vmin,
                            vmax=vmax,
                            marker='s',
                            cmap=plt.cm.RdYlBu_r)
            plt.title("MAD = " + str(np.round(np.mean(ediff), 0)) + " ppm")
            plt.xlim(wmin, wmax)
            plt.ylim(0, ev.n_files)
            plt.ylabel('Frame Number')
            plt.xlabel('Wavelength ($\mu m$)')
            plt.colorbar()
            plt.tight_layout()
            plt.savefig(ev.eventdir + '/figs/fig' + str(3300 + n) +
                        '-2D_LC.png')
        """
        # Aligned 1D spectra
        plt.figure(3300, figsize=(8,6.5))
        plt.clf()
        #istart=0
        #normspec    = ev.spectra[:,istart:]/np.mean(ev.spectra[:,istart:],axis=2)[:,:,np.newaxis]
        normspec    = ev.spectra[:,:,1:]/np.mean(ev.spectra[:,:,1:],axis=2)[:,:,np.newaxis]
        wave        = ev.wave[0][0][1:]
        sumspec     = np.sum(normspec,axis=1)/(ev.n_reads-istart-1)
        for m in range(10,16):
            plt.plot(wave,sumspec[m],'r-')
        for m in range(7,10):
            plt.plot(wave,sumspec[m],'.k-')
        """

    return ev