コード例 #1
0
    def raw_to_flt(self, raw_name):
        """
        Function to run CALWF3 on a raw dataset. CRCORR is set to OMIT and FLATCORR is set to perform.
        If available, crds is ran to download and updat the RAW file header to point to the latest calibration files.
        The flat-field calibration file names are replaced with the ones included in this package and pointed to by
        G102_FF and G141_FF.

        Attributes
        ----------
        None

        Output
        ------
        string containing the name of the FLT file that was created

        """

        CRCORR = "OMIT"
        FLATCORR = "PERFORM"

        obs_id = raw_name.split("_raw")[0]

        files = ["{}_flt.fits".format(obs_id), "{}_ima.fits".format(obs_id)]
        for ff in files:
            if os.path.isfile(ff):
                os.unlink(ff)

        print("Processing ", raw_name)
        res = os.system(
            "crds bestrefs --files {}  --sync-references=1  --update-bestrefs "
            .format(raw_name))
        if res != 0:
            print("CRDS did not run.")

        fin = fits.open(raw_name, mode="update")
        fin[0].header["CRCORR"] = CRCORR
        fin[0].header["FLATCORR"] = FLATCORR
        filt = fin[0].header["FILTER"]

        self.org_FF_file = fin[0].header["PFLTFILE"]

        fin[0].header["PFLTFILE"] = self.FF_file

        fin.close()
        calwf3(raw_name)
        flt_name = raw_name.split("_raw.fits")[0] + "_flt.fits"

        if not os.path.isfile(flt_name):
            print("raw_to_flt() failed to generate ", flt_name)
            sys.exit(1)

        return flt_name
コード例 #2
0
def run_calwf3(path_to_files=''):
    """Recalibrate with UNITCORR set to 'OMIT'.
    """
    raws = glob.glob(path_to_files + "/*raw.fits")
    print raws
    # Change the keywords.
    for raw in raws:
        change_pctecorr_keyword(raw)

    # Recalibrate.
    for raw in raws:
        calwf3(raw)

    # Remove TRA files and linearity corr file.
    tras = glob.glob(path_to_files + "/*tra.fits")
    for tra in tras:
        os.remove(tra)
コード例 #3
0
def calibrate_raws(files):
    #Interates through raw files and calibrates them
    for im in files:
        hdu = fits.open(im, mode='update')
        #(Turns off PCTECORR switch - outputs only flt files)
        hdu[0].header['PCTECORR'] = 'OMIT'
        hdu.close()
        calwf3(im)

    #Makes diectortry to store unnessary files to clean working directory
    os.system('mkdir raw_crj_tra_csv_files')

    #Store files to newly made directory
    os.system('mv *raw.fits raw_crj_tra_csv_files')
    os.system('mv *.tra raw_crj_tra_csv_files')
    os.system('mv *crj.fits raw_crj_tra_csv_files')
    os.system('mv *.csv raw_crj_tra_csv_files')
    os.system('mv *asn.fits raw_crj_tra_csv_files')
コード例 #4
0
def run_all(skip_first_read=True):
    """
    Run splitting script on all RAW files in the working directory.  
    
    First generates IMA files from RAWs after setting CRCORR=OMIT.
    
    """

    import os
    import glob
    import astropy.io.fits as pyfits

    import wfc3tools

    files = glob.glob("*raw.fits")
    files.sort()

    for file in files:
        if os.path.exists(file.replace('_raw', '_ima')):
            print('IMA exists, skip', file)
            continue

        print('Process', file)

        # Set CRCORR
        raw_im = pyfits.open(file, mode='update')
        raw_im[0].header['CRCORR'] = 'OMIT'
        raw_im.flush()

        # Remove FLT if it exists or calwf3 will die
        if os.path.exists(file.replace('_raw', '_flt')):
            os.remove(file.replace('_raw', '_flt'))

        # Run calwf3
        wfc3tools.calwf3(file)

        # Split into individual FLTs
        split_ima_flt(file=file.replace('_raw', '_ima'),
                      skip_first_read=skip_first_read)
コード例 #5
0
ファイル: make_asn.py プロジェクト: mmckay18/uvis_biasgen
def make_asn_main(path):
    files_wo_asn = []
    files_con_asn = []
    asns = []
    obs = []

    os.chdir(path)
    #uses the vists to set new associations
    visits = []
    list_of_files = glob.glob('*raw.fits')
    for files in list_of_files:
        name = files
        visit = name[:6]
        visits.append(visit)
        hdu = fits.open(files, mode='update')
        hdu[0].header['PCTECORR'] = 'OMIT'
        hdu.close()

    unique_visits = set(visits)
    obs_date = []
    print(unique_visits)
    for items in unique_visits:
        asn_files = []
        for j in range(len(list_of_files)):
            files = list_of_files[j]
            comp = files[:6]
            write_file = files[:9]
            if items == comp:
                asn_files.append(write_file)
                output = files[:6] + '010_asn.fits'
                files = files
            else:
                continue

        if len(asn_files) > 1:
            row = [i + 1 for i in range(len(asn_files))]
            memtype = ['EXP-CRJ' for f in asn_files]
            memprsnt = [True for f in asn_files]
            xoffset = [0. for f in asn_files]
            yoffset = [0. for f in asn_files]
            xdelta = [0. for f in asn_files]
            ydelta = [0. for f in asn_files]
            rotation = [0. for f in asn_files]
            scale = [1. for f in asn_files]
            # Change the last row to have a memtype of PROD-CRJ instead of EXP-CRJ
            #memtype[-1] = 'PROD-CRJ'

            # Build the table
            columns = [
                row, asn_files, memtype, memprsnt, xoffset, yoffset, xdelta,
                ydelta, rotation, scale
            ]
            names = [
                'row', 'MEMNAME', 'MEMTYPE', 'MEMPRSNT', 'XOFFSET', 'YOFFSET',
                'XDELTA', 'YDELTA', 'ROTATION', 'SCALE'
            ]
            asn_table = Table(columns, names=names)

            # Save the table to a FITS file
            asn_table.write(output)

            # Update header to have required keywords
            hdulist = fits.open(output, mode='update')
            hdulist[0].header['INSTRUME'] = 'WFC3'
            hdulist[0].header['DETECTOR'] = 'UVIS'
            hdulist[0].header['PCTECORR'] = 'OMIT'
            hdulist.close()

        else:
            continue

    calwf3(output)
コード例 #6
0
def create_asn_file(path):
    os.chdir(path)
    base_path = path
    name_matches = []
    check = []
    number = []

    setOfNumbers = set()
    while len(setOfNumbers) < 600:
        setOfNumbers.add(str(randint(111, 999)))

    my_list = list(setOfNumbers)

    #for i in range(106):
    #   num=randint(102,999)
    #  nums=str(num)
    # number.append(nums)

    list_of_files = glob.glob('*raw.fits')

    for i in range(len(list_of_files)):
        #for num in setOfNumbers:

        files = list_of_files[i]
        curentName = os.path.join(base_path, files)
        name = files[9:]
        num = my_list[i]
        new_name = 'imam11' + num + name
        check.append(new_name)
        name_matches.append((files, new_name))
        fileName = os.path.join(base_path, new_name)
        os.rename(curentName, fileName)

    with open('name_matches.csv', 'w') as out:
        csv_out = csv.writer(out)
        csv_out.writerow(['origanal', 'new'])
        for row in name_matches:
            csv_out.writerow(row)
    #-------------------------------------------------------------------------------------------------------------------------------------------------
    files_wo_asn = []
    files_con_asn = []
    asns = []
    obs = []

    os.chdir(path)
    #uses the vists to set new associations
    visits = []
    list_of_files = glob.glob('*raw.fits')
    for files in list_of_files:
        name = files
        visit = name[:6]
        visits.append(visit)
        hdu = fits.open(files, mode='update')
        hdu[0].header['PCTECORR'] = 'OMIT'
        hdu.close()

    unique_visits = set(visits)
    obs_date = []
    print(unique_visits)
    for items in unique_visits:
        asn_files = []
        for j in range(len(list_of_files)):
            files = list_of_files[j]
            comp = files[:6]
            write_file = files[:9]
            if items == comp:
                asn_files.append(write_file)
                output = files[:6] + '010_asn.fits'
                files = files
            else:
                continue

        if len(asn_files) > 1:
            row = [i + 1 for i in range(len(asn_files))]
            memtype = ['EXP-CRJ' for f in asn_files]
            memprsnt = [True for f in asn_files]
            xoffset = [0. for f in asn_files]
            yoffset = [0. for f in asn_files]
            xdelta = [0. for f in asn_files]
            ydelta = [0. for f in asn_files]
            rotation = [0. for f in asn_files]
            scale = [1. for f in asn_files]
            # Change the last row to have a memtype of PROD-CRJ instead of EXP-CRJ
            #memtype[-1] = 'PROD-CRJ'

            # Build the table
            columns = [
                row, asn_files, memtype, memprsnt, xoffset, yoffset, xdelta,
                ydelta, rotation, scale
            ]
            names = [
                'row', 'MEMNAME', 'MEMTYPE', 'MEMPRSNT', 'XOFFSET', 'YOFFSET',
                'XDELTA', 'YDELTA', 'ROTATION', 'SCALE'
            ]
            asn_table = Table(columns, names=names)

            # Save the table to a FITS file
            asn_table.write(output)

            # Update header to have required keywords
            hdulist = fits.open(output, mode='update')
            hdulist[0].header['INSTRUME'] = 'WFC3'
            hdulist[0].header['DETECTOR'] = 'UVIS'
            hdulist[0].header['PCTECORR'] = 'OMIT'
            hdulist.close()

        else:
            continue

    calwf3(output)

    #   ----------------------------------------------------------------------------------------------------------------------------------------------------

    #    s current directory
    #    base_path = path

    os.chdir(path)

    name_new = []
    old_name = []
    name_matches = []

    fileSB = open('name_matches.csv', 'r')

    for line in fileSB:
        q = [d for d in line.split(',')]
        old_name.append(q[0])
        name_new.append(q[1])  #reads second colom

    list_of_raw = glob.glob('*raw.fits')

    for i in range(len(list_of_raw)):
        #for num in setOfNumbers:
        File = list_of_raw[i]
        files = File[:9]
        for j in range(len(name_new)):
            name_comp = name_new[j]
            comp_name = name_comp[:9]
            if files == comp_name:
                curentName = os.path.join(base_path, File)
                name = File[9:]
                old = old_name[j]
                OLD = old[:9]
                file_name = OLD + name
                #check.append(file_name)
                name_matches.append((File, file_name))
                fileName = os.path.join(base_path, file_name)
                os.rename(curentName, fileName)
            else:
                continue

    list_of_flt = glob.glob('*flt.fits')

    for i in range(len(list_of_flt)):
        #for num in setOfNumbers:
        File = list_of_flt[i]
        files = File[:9]
        for j in range(len(name_new)):
            name_comp = name_new[j]
            comp_name = name_comp[:9]
            if files == comp_name:
                curentName = os.path.join(base_path, File)
                name = File[9:]
                old = old_name[j]
                OLD = old[:9]
                file_name = OLD + name
                #check.append(file_name)
                name_matches.append((File, file_name))
                fileName = os.path.join(base_path, file_name)
                os.rename(curentName, fileName)
            else:
                continue

    list_of_flc = glob.glob('*flc.fits')

    for i in range(len(list_of_flc)):
        #for num in setOfNumbers:
        File = list_of_flc[i]
        files = File[:9]
        for j in range(len(name_new)):
            name_comp = name_new[j]
            comp_name = name_comp[:9]
            if files == comp_name:
                curentName = os.path.join(base_path, File)
                name = File[9:]
                old = old_name[j]
                OLD = old[:9]
                file_name = OLD + name
                #check.append(file_name)
                name_matches.append((File, file_name))
                fileName = os.path.join(base_path, file_name)
                os.rename(curentName, fileName)
            else:
                continue

    list_of_tra = glob.glob('*.tra')

    for i in range(len(list_of_tra)):
        #for num in setOfNumbers:
        File = list_of_tra[i]
        files = File[:9]
        for j in range(len(name_new)):
            name_comp = name_new[j]
            comp_name = name_comp[:9]
            if files == comp_name:
                curentName = os.path.join(base_path, File)
                name = File[9:]
                old = old_name[j]
                OLD = old[:9]
                file_name = OLD + name
                #check.append(file_name)
                name_matches.append((File, file_name))
                fileName = os.path.join(base_path, file_name)
                os.rename(curentName, fileName)
            else:
                continue
    fileSB.close()

    with open('matches.csv', 'w') as out:
        csv_out = csv.writer(out)
        csv_out.writerow(['origanal', 'new'])
        for row in name_matches:
            csv_out.writerow(row)

    os.system('mkdir raw_crj_tra_csv_files')
    os.system('mv *raw.fits raw_crj_tra_csv_files')
    os.system('mv *.tra raw_crj_tra_csv_files')
    os.system('mv *crj.fits raw_crj_tra_csv_files')
    os.system('mv *.csv raw_crj_tra_csv_files')
    os.system('mv *asn.fits raw_crj_tra_csv_files')
コード例 #7
0
 def cal():
     calwf3(version=True)
コード例 #8
0
 def cal():
     calwf3()
コード例 #9
0
ファイル: reprocess_wfc3.py プロジェクト: gbrammer/wfc3
def show_MultiAccum_reads(raw='ibp329isq_raw.fits', flatten_ramp=False, verbose=True, stats_region=[[0,1014], [0,1014]]):
    """
    Make a figure (.ramp.png) showing the individual reads of an 
    IMA or RAW file.
    """    
    import scipy.ndimage as nd
    from matplotlib.figure import Figure
    from matplotlib.backends.backend_agg import FigureCanvasAgg
    
    if verbose:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.WARN)

    status = fetch_calibs(raw) #, ftpdir='ftp://ftp.stsci.edu/cdbs/iref/')
    if not status:
        return False
        
    img = pyfits.open(raw)
    
    if 'raw' in raw:
        gains = [2.3399999, 2.3699999, 2.3099999, 2.3800001]
        gain = np.zeros((1024,1024))
        gain[512: ,0:512] += gains[0]
        gain[0:512,0:512] += gains[1]
        gain[0:512, 512:] += gains[2]
        gain[512: , 512:] += gains[3]
    else:
        gain=1
    
    logger.info('Make MULTIACCUM cube')
        
    #### Split the multiaccum file into individual reads    
    cube, dq, time, NSAMP = split_multiaccum(img, scale_flat=False)
    
    if 'raw' in raw:
        dark_file = img[0].header['DARKFILE'].replace('iref$', os.getenv('iref')+'/')
        dark = pyfits.open(dark_file)
        dark_cube, dark_dq, dark_time, dark_NSAMP = split_multiaccum(dark, scale_flat=False)

        diff = np.diff(cube-dark_cube[:NSAMP,:,:], axis=0)*gain
        dt = np.diff(time)
    
        #### Need flat for Poisson
        flat_im, flat = get_flat(img)
        diff /= flat
    else:
        diff = np.diff(cube, axis=0)
        dt = np.diff(time)
    
    ####  Average ramp
    slx = slice(stats_region[0][0], stats_region[0][1])
    sly = slice(stats_region[1][0], stats_region[1][1])
    ramp_cps = np.median(diff[:, sly, slx], axis=1)
    avg_ramp = np.median(ramp_cps, axis=1)
    
    #### Initialize the figure
    logger.info('Make plot')
    
    plt.ioff()
    #fig = plt.figure(figsize=[10,10])
    fig = Figure(figsize=[10,10])

    ## Smoothing
    smooth = 1
    kernel = np.ones((smooth,smooth))/smooth**2
    
    ## Plot the individual reads
    for j in range(1,NSAMP-1):
        ax = fig.add_subplot(4,4,j)
        smooth_read = nd.convolve(diff[j,:,:],kernel)
        ax.imshow(smooth_read[5:-5:smooth, 5:-5:smooth]/dt[j], 
                  vmin=0, vmax=4, origin='lower', cmap=plt.get_cmap('cubehelix'))
        
        ax.set_xticklabels([]); ax.set_yticklabels([])
        ax.text(20,5,'%d' %(j), ha='left', va='bottom', backgroundcolor='white')
    
    ## Show the ramp
    fig.tight_layout(h_pad=0.3, w_pad=0.3, pad=0.5)
    
    ax = fig.add_axes((0.6, 0.05, 0.37, 0.18))
    #ax = fig.add_subplot(428)
    ax.plot(time[2:], (ramp_cps[1:,16:-16:4].T/np.diff(time)[1:]).T, alpha=0.1, color='black')
    ax.plot(time[2:], avg_ramp[1:]/np.diff(time)[1:], alpha=0.8, color='red', linewidth=2)
    ax.set_xlabel('time'); ax.set_ylabel('background [e/s]')

    #fig.tight_layout(h_pad=0.3, w_pad=0.3, pad=0.5)
    root=raw.split('_')[0]
    #plt.savefig(root+'_ramp.png')
    
    canvas = FigureCanvasAgg(fig)
    canvas.print_figure(root+'_ramp.png', dpi=200, transparent=False)
    
    #### Same ramp data file    
    np.savetxt('%s_ramp.dat' %(root), np.array([time[1:], avg_ramp/np.diff(time)]).T, fmt='%.3f')
    
    if flatten_ramp:
        #### Flatten the ramp by setting background countrate to the average.  
        #### Output saved to "*x_flt.fits" rather than the usual *q_flt.fits.
        import wfc3tools
        
        flux = avg_ramp/np.diff(time)
        avg = avg_ramp.sum()/time[-1]
        min = flux[1:].min()
        subval = np.cumsum((flux-avg)*np.diff(time))
        
        imraw = pyfits.open(raw.replace('ima','raw'))
        for i in range(1, NSAMP):
            logger.info('Remove excess %.2f e/s from read #%d (t=%.1f)' %(flux[-i]-min, NSAMP-i+1, time[-i]))
            
            imraw['SCI',i].data = imraw['SCI',i].data - np.cast[int](subval[-i]/2.36*flat)
                
        files=glob.glob(raw.split('q_')[0]+'x_*')
        for file in files:
            os.remove(file)
            
        imraw[0].header['CRCORR'] = 'PERFORM'
        imraw.writeto(raw.replace('q_raw', 'x_raw'), clobber=True)
        
        ## Run calwf3
        wfc3tools.calwf3(raw.replace('q_raw', 'x_raw'))
                
    return fig
コード例 #10
0
ファイル: reprocess_wfc3.py プロジェクト: gbrammer/wfc3
def make_IMA_FLT(raw='ibhj31grq_raw.fits', pop_reads=[], remove_ima=True, fix_saturated=True, flatten_ramp=True, stats_region=[[300,714], [300,714]]):
    """
    Run calwf3, if necessary, to generate ima & flt files.  Then put the last
    read of the ima in the FLT SCI extension and let Multidrizzle flag 
    the CRs.
    
    Optionally pop out reads affected by satellite trails or earthshine.  The 
    parameter `pop_reads` is a `list` containing the reads to remove, where
    a value of 1 corresponds to the first real read after the 2.9s flush.
    
    Requires IRAFX for wfc3tools
    """
    import wfc3tools
        
    #### Remove existing products or calwf3 will die
    for ext in ['flt','ima']:
        if os.path.exists(raw.replace('raw', ext)):
            os.remove(raw.replace('raw', ext))
    
    #### Turn off CR rejection    
    raw_im = pyfits.open(raw, mode='update')
    if raw_im[0].header['DETECTOR'] == 'UVIS':
        return True
        
    status = fetch_calibs(raw) #, ftpdir='ftp://ftp.stsci.edu/cdbs/iref/')
    if not status:
        return False
        
    if not pop_reads:
        raw_im[0].header['CRCORR'] = 'OMIT'
        raw_im.flush()
    
    #### Run calwf3
    wfc3tools.calwf3(raw)
    
    flt = pyfits.open(raw.replace('raw', 'flt'), mode='update')
    ima = pyfits.open(raw.replace('raw', 'ima'))
    
    #### Pull out the data cube, order in the more natural sense
    #### of first reads first
    cube, dq, time, NSAMP = split_multiaccum(ima, scale_flat=False)
    
    #### Readnoise in 4 amps
    readnoise_2D = np.zeros((1024,1024))
    readnoise_2D[512: ,0:512] += ima[0].header['READNSEA']
    readnoise_2D[0:512,0:512] += ima[0].header['READNSEB']
    readnoise_2D[0:512, 512:] += ima[0].header['READNSEC']
    readnoise_2D[512: , 512:] += ima[0].header['READNSED']
    readnoise_2D = readnoise_2D**2

    #### Gain in 4 amps
    gain_2D = np.zeros((1024,1024))
    gain_2D[512: ,0:512] += ima[0].header['ATODGNA']
    gain_2D[0:512,0:512] += ima[0].header['ATODGNB']
    gain_2D[0:512, 512:] += ima[0].header['ATODGNC']
    gain_2D[512: , 512:] += ima[0].header['ATODGND']
    
    ### Pop out reads affected by satellite trails or earthshine
    masks = glob.glob(raw.replace('.fits', '*mask.reg'))
    if (len(pop_reads) > 0) | (len(masks) > 0):
        print('\n****\nPop reads %s from %s\n****\n' %(pop_reads, ima.filename()))
        
        #### Need to put dark back in for Poisson
        dark_file = ima[0].header['DARKFILE'].replace('iref$', os.getenv('iref')+'/')
        dark = pyfits.open(dark_file)
        dark_cube, dark_dq, dark_time, dark_NSAMP = split_multiaccum(dark, scale_flat=False)
        
        #### Need flat for Poisson
        flat_im, flat = get_flat(ima)
        
        #### Subtract diffs of flagged reads
        diff = np.diff(cube, axis=0)
        dark_diff = np.diff(dark_cube, axis=0)

        dt = np.diff(time)
        final_exptime = np.ones((1024, 1024))*time[-1]
        final_sci = cube[-1,:,:]*1
        final_dark = dark_cube[NSAMP-1,:,:]*1        
        for read in pop_reads:
            final_sci -= diff[read,:,:]
            final_dark -= dark_diff[read,:,:]
            final_exptime -= dt[read]
        
        if False:
            ### Experimenting with automated flagging
            sh = (1024,1024)
            drate = (diff.reshape((14,-1)).T/dt).T
            med = np.median(drate, axis=0)
            fmed = np.median(med)
            nmad = 1.48*np.median(np.abs(drate-med), axis=0)
            
            drate_ma = np.ma.masked_array(drate, mask=~np.isfinite(drate))
            wht_ma = drate_ma*0
            
            excess = med*0.
            for read in range(1,NSAMP-1):
                med_i = np.percentile(drate[read,:]-med, 20)
                excess_electrons = (drate[read,:]-med-med_i)*dt[read]
                rms = np.sqrt((fmed + med_i)*dt[read])
                
                hot = (excess_electrons / rms) > 10
                #sm = nd.median_filter(excess_electrons.reshape(sh), 10).flatten()
                #hot |= (sm / rms) > 3
                
                med_i = np.percentile((drate[read,:]-med)[~hot], 50)
                print(med_i)
                
                drate_ma.mask[read, hot] |= True
                drate_ma.data[read,:] -= med_i
                wht_ma.mask[read, hot] |= True
                wht_ma.data[read,:] = dt[read]
            
            wht_ma.mask[0,:] = True
            
            avg = (drate_ma*wht_ma).sum(axis=0)/wht_ma.sum(axis=0)
            pyfits.writeto('%s_avg.fits' %(raw.split('_raw')[0]), data=avg.data.reshape(sh)[5:-5,5:-5], clobber=True)
                
        #### Removed masked regions of individual reads
        if len(masks) > 0:
            import pyregion
            for mask in masks:
                mask_read = int(mask.split('.')[-3])
                if mask_read in pop_reads:
                    continue
                
                print('Mask pixels in read %d (%s)' %(mask_read, mask))
                
                refhdu = ima['SCI', 1]
                r = pyregion.open(mask).as_imagecoord(header=refhdu.header)
                mask_array = r.get_mask(hdu=refhdu)
                final_exptime -= mask_array*dt[mask_read]
                final_sci -= diff[mask_read,:,:]*mask_array
                final_dark -= dark_diff[mask_read,:,:]*mask_array
                
        #### Variance terms
        ## read noise
        final_var = readnoise_2D*1
        ## poisson term
        final_var += (final_sci*flat + final_dark*gain_2D)*(gain_2D/2.368)
        ## flat errors
        final_var += (final_sci*flat*flat_im['ERR'].data)**2
        final_err = np.sqrt(final_var)/flat/(gain_2D/2.368)/1.003448/final_exptime
        
        final_sci /= final_exptime
                
        flt[0].header['EXPTIME'] = np.max(final_exptime)
        
    else:
        if flatten_ramp:
            #### Subtract out the median of each read to make background flat
            fix_saturated = False
            
            print('\n*** Flatten ramp ***')
            ima = pyfits.open(raw.replace('raw', 'ima'), mode='update')
            
            #### Grism exposures aren't flat-corrected
            filter = ima[0].header['FILTER']
            if 'G1' in filter:
                flats = {'G102': 'uc72113oi_pfl.fits', 
                         'G141': 'uc721143i_pfl.fits'}
                
                flat = pyfits.open('%s/%s' %(os.getenv('iref'), flats[filter]))[1].data
            else:
                flat = 1.
            
            #### Remove the variable ramp            
            slx = slice(stats_region[0][0], stats_region[0][1])
            sly = slice(stats_region[1][0], stats_region[1][1])
            total_countrate = np.median((ima['SCI',1].data/flat)[sly, slx])
            for i in range(ima[0].header['NSAMP']-2):
                ima['SCI',i+1].data /= flat
                med = np.median(ima['SCI',i+1].data[sly, slx])
                print('Read #%d, background:%.2f' %(i+1, med))
                ima['SCI',i+1].data += total_countrate - med
            
            if 'G1' in filter:
                for i in range(ima[0].header['NSAMP']-2):
                    ima['SCI',i+1].data *= flat
            
            ima[0].header['CRCORR'] = 'PERFORM'
            ima[0].header['DRIZCORR'] = 'OMIT'
            
            ### May need to generate a simple dummy ASN file for a single exposure
            ### Not clear why calwf3 needs an ASN if DRIZCORR=OMIT, but it does
            need_asn = False
            if ima[0].header['ASN_ID'] == 'NONE':
                need_asn=True
            else:
                if not os.path.exists(ima[0].header['ASN_TAB']):
                    need_asn=True
            
            if need_asn:
                import stsci.tools
                
                exp = ima.filename().split('_ima')[0]
                params = stsci.tools.asnutil.ASNMember()
                asn = stsci.tools.asnutil.ASNTable(output=exp)
                asn['members'] = {exp:params}
                asn['order'] = [exp]
                asn.write()
                
                ima[0].header['ASN_ID'] = exp.upper()
                ima[0].header['ASN_TAB'] = '%s_asn.fits' %(exp)
                
            ima.flush()
                                
            #### Initial cleanup
            files=glob.glob(raw.replace('raw', 'ima_*'))
            for file in files:
                print('#cleanup: rm %s' %(file))
                os.remove(file)
        
            #### Run calwf3 on cleaned IMA
            wfc3tools.calwf3(raw.replace('raw', 'ima'))
            
            #### Put results into an FLT-like file
            ima = pyfits.open(raw.replace('raw', 'ima_ima'))
            flt_new = pyfits.open(raw.replace('raw', 'ima_flt'))
            flt['DQ'].data = flt_new['DQ'].data*1
            flt['TIME'] = flt_new['TIME']
            flt['SAMP'] = flt_new['SAMP']
            
            final_sci = ima['SCI', 1].data*1
            final_sci[5:-5,5:-5] = flt_new['SCI'].data*1
            #final_err = ima['ERR', 1].data*1
            
            ### Need original ERR, something gets messed up
            final_err = ima['ERR', 1].data*1
            final_err[5:-5,5:-5] = flt['ERR'].data*1
            
            ### Clean up
            files=glob.glob(raw.replace('raw', 'ima_*'))
            for file in files:
                print('#cleanup: rm %s' %(file))
                os.remove(file)
                
        else:
            final_sci = ima['SCI', 1].data*1
            final_err = ima['ERR', 1].data*1
    
    final_dq = ima['DQ', 1].data*1
    
    #### For saturated pixels, look for last read that was unsaturated
    #### Background will be different under saturated pixels but maybe won't
    #### matter so much for such bright objects.
    if (fix_saturated):
        print('Fix Saturated pixels:')
        #### Saturated pixels
        zi, yi, xi = np.indices(dq.shape)
        saturated = (dq & 256) > 0
        # 1024x1024 index array of reads where pixels not saturated
        zi_flag = zi*1
        zi_flag[saturated] = 0
        ### 2D array of the last un-saturated read
        last_ok_read = np.max(zi_flag, axis=0)
        sat_zero = last_ok_read == 0        
        pyfits.writeto(raw.replace('_raw','_lastread'), data=last_ok_read[5:-5,5:-5], header=flt[1].header, clobber=True)
        ### keep pixels from first read even if saturated
        last_ok_read[sat_zero] = 1
        
        zi_idx = zi < 0
        for i in range(1, NSAMP-1):
            zi_idx[i,:,:] = zi[i,:,:] == last_ok_read

        time_array = time[zi]
        time_array[0,:,:] = 1.e-3 # avoid divide-by-zero
        # pixels that saturated before the last read
        fix = (last_ok_read < (ima[0].header['NSAMP'] - 1)) & (last_ok_read > 0)
        #err = np.sqrt(ima[0].header['READNSEA']**2 + cube)/time_array
        err = np.sqrt(readnoise_2D + cube)/time_array

        final_sci[fix] = np.sum((cube/time_array)*zi_idx, axis=0)[fix]
        final_err[fix] = np.sum(err*zi_idx, axis=0)[fix]

        fixed_sat = (zi_idx.sum(axis=0) > 0) & ((final_dq & 256) > 0)
        final_dq[fixed_sat] -= 256
        final_dq[sat_zero] |= 256
        
        print('  Nsat = %d' %(fixed_sat.sum()))
        flt['DQ'].data |= final_dq[5:-5,5:-5] & 256
        
    else:
        #### Saturated pixels
        flt['DQ'].data |= ima['DQ',1].data[5:-5,5:-5] & 256
        
    flt['SCI'].data = final_sci[5:-5,5:-5]
    flt['ERR'].data = final_err[5:-5,5:-5]
    
    #### Some earthshine flares DQ masked as 32: "unstable pixels"
    mask = (flt['DQ'].data & 32) > 0
    if mask.sum() > 1.e4:
        print('\n****\nTake out excessive DQ=32 flags (N=%e)\n****\n' %(mask.sum()))
        #flt['DQ'].data[mask] -= 32
        mask = flt['DQ'].data & 32
        ### Leave flagged 32 pixels around the edges
        flt['DQ'].data[5:-5,5:-5] -= mask[5:-5,5:-5]
        
    ### Update the FLT header
    flt[0].header['IMA2FLT'] = (1, 'FLT extracted from IMA file')
    flt[0].header['IMASAT'] = (fix_saturated*1, 'Manually fixed saturation')
    flt[0].header['NPOP'] = (len(pop_reads), 'Number of reads popped from the sequence')
    for iread, read in enumerate(pop_reads):
        flt[0].header['POPRD%02d' %(iread+1)] = (read, 'Read kicked out of the MULTIACCUM sequence')
        
    flt.flush()
    
    ### Remove the IMA file
    if remove_ima:
        os.remove(raw.replace('raw', 'ima'))
コード例 #11
0
ファイル: anomalies.py プロジェクト: gbrammer/reprocess_wfc3
def test():
    import glob
    import os
    import astropy.io.fits as pyfits
    import wfc3tools

    from . import utils, reprocess_wfc3
    from .reprocess_wfc3 import split_multiaccum
    from . import anomalies

    files = glob.glob('*ima.fits')
    i = -1
    files.sort()

    i += 1
    file = files[i]

    for file in files:

        if not os.path.exists(file.replace('_raw', '_ima')):
            try:
                os.remove(file.replace('_raw', '_flt'))
                os.remove(file.replace('_raw', '_ima'))
            except:
                pass

            ima = pyfits.open(file, mode='update')
            ima[0].header['CRCORR'] = 'PERFORM'
            ima.flush()

            wfc3tools.calwf3(file, log_func=reprocess_wfc3.silent_log)

        ima = pyfits.open(file.replace('_raw', '_ima'))
        cube, dq, time, NS = split_multiaccum(ima, scale_flat=False)

        is_grism = ima[0].header['FILTER'] in ['G102', 'G141']
        if is_grism:
            params = [LINE_PARAM_GRISM_LONG, LINE_PARAM_GRISM_SHORT]
        else:
            params = [LINE_PARAM_IMAGING_LONG, LINE_PARAM_IMAGING_SHORT]

        out = trails_in_cube(cube,
                             dq,
                             time,
                             line_params=params[0],
                             subtract_column=is_grism)

        image, edges, lines = out

        if len(lines) == 0:
            out = trails_in_cube(cube,
                                 dq,
                                 time,
                                 line_params=params[1],
                                 subtract_column=is_grism)

            image, edges, lines = out

        root = ima.filename().split('_')[0]
        print(root, len(lines))

        if len(lines) > 0:
            fig = sat_trail_figure(image, edges, lines, label=root)
            #fig.savefig('{0}_trails.png'.format(root))
            canvas = FigureCanvasAgg(fig)
            canvas.print_figure(root + '_trails.png', dpi=200)

            reg = anomalies.segments_to_mask(lines,
                                             params[0]['NK'],
                                             image.shape[1],
                                             buf=params[0]['NK'] *
                                             (1 + is_grism))

            fpr = open('{0}_trails.reg'.format(root), 'w')
            fpr.writelines(reg)
            fpr.close()