Exemplo n.º 1
0
def extract_overscan_region(img):
    """
    As of July 2018, Veloce uses an e2v CCD231-84-1-E74 4kx4k chip.
    Image dimensions are 4096 x 4112 pixels, but the recorded images size including the overscan region is 4202 x 4112 pixels.
    We therefore have an overscan region of size 53 x 4112 at either end. 
    
    raw_img = pyfits.getdata(filename)     -->    raw_img.shape = (4112, 4202)
    img = correct_orientation(raw_img)     -->        img.shape = (4202, 4112)
    """

    #correct orientation if needed
    if img.shape == (4112, 4202):
        img = correct_orientation(img)

    if img.shape != (4202, 4112):
        print('ERROR: wrong image size encountered!!!')
        return

    ny, nx = img.shape

    #define overscan regions
    os1 = img[:53, :nx // 2]
    os2 = img[:53, nx // 2:]
    os3 = img[ny - 53:, nx // 2:]
    os4 = img[ny - 53:, :nx // 2]

    return os1, os2, os3, os4
Exemplo n.º 2
0
def crop_overscan_region(img):
    """
    As of July 2018, Veloce uses an e2v CCD231-84-1-E74 4kx4k chip.
    Image dimensions are 4096 x 4112 pixels, but the recorded images size including the overscan region is 4202 x 4112 pixels.
    We therefore have an overscan region of size 53 x 4112 at either end. 
    
    raw_img = pyfits.getdata(filename)     -->    raw_img.shape = (4112, 4202)
    img = correct_orientation(raw_img)     -->        img.shape = (4202, 4112)
    """

    #correct orientation if needed
    if img.shape == (4112, 4202):
        img = correct_orientation(img)

    if img.shape != (4202, 4112):
        print('ERROR: wrong image size encountered!!!')
        return

    #crop overscan region
    good_img = img[53:4149, :]

    return good_img
Exemplo n.º 3
0
def make_median_image(imglist, MB=None, raw=False):
    """
    Make a median image from a given list of images.

    INPUT:
    'imglist'  : list of files (incl. directories)
    'MB'       : master bias frame - if provided, it will be subtracted from every image before median image is computed
    'raw'      : boolean - set to TRUE if you want to retain the original size and orientation;
                 otherwise the image will be brought to the 'correct' orientation and the overscan regions will be cropped

    OUTPUT:
    'medimg'   : median image
    """

    # from veloce_reduction.calibration import crop_overscan_region

    # prepare array
    allimg = []

    # loop over all files in "dark_list"
    for file in imglist:
        # read in dark image
        img = pyfits.getdata(file)
        if not raw:
            # bring to "correct" orientation
            img = correct_orientation(img)
            # remove the overscan region
            img = crop_overscan_region(img)
        if MB is not None:
            # subtract master bias (if provided)
            img = img - MB

        # add image to list
        allimg.append(img)

    # get median image
    medimg = np.median(np.array(allimg), axis=0)

    return medimg
Exemplo n.º 4
0
def get_bias_and_readnoise_from_bias_frames(bias_list,
                                            degpol=5,
                                            clip=5,
                                            gain=None,
                                            debug_level=0,
                                            timit=False):
    """
    Calculate the median bias frame, the offsets in the four different quadrants (assuming bias frames are flat within a quadrant),
    and the read-out noise per quadrant (ie the STDEV of the signal, but from difference images).
    
    INPUT:
    'bias_list'    : list of raw bias image files (incl. directories)
    'degpol'       : order of the polynomial (in each direction) to be used in the 2-dim polynomial surface fits to each quadrant's median bais frame
    'clip'         : number of 'sigmas' used to identify outliers when 'cleaning' each quadrant's median bais frame before the surface fitting
    'gain'         : array of gains for each quadrant (in units of e-/ADU)
    'debug_level'  : for debugging...
    'timit'        : boolean - do you want to measure execution run time?
    
    OUTPUT:
    'medimg'   : the median bias frame [ADU]
    'coeffs'   : the coefficients that describe the 2-dim polynomial surface fit to the 4 quadrants
    'offsets'  : the 4 constant offsets per quadrant (assuming bias frames are flat within a quadrant) [ADU]
    'rons'     : read-out noise for the 4 quadrants [e-]
    """

    if timit:
        start_time = time.time()

    print(
        'Determining offset levels and read-out noise properties from bias frames for 4 quadrants...'
    )

    img = pyfits.getdata(bias_list[0])

    #do some formatting things for real observations
    #bring to "correct" orientation
    img = correct_orientation(img)
    #remove the overscan region, which looks crap for actual bias images
    img = crop_overscan_region(img)

    ny, nx = img.shape

    #define four quadrants via masks
    q1, q2, q3, q4 = make_quadrant_masks(nx, ny)

    #co-add all bias frames
    #MB = create_master_img(bias_list, imgtype='bias', with_errors=False, savefiles=False, remove_outliers=True)

    #prepare arrays
    #means_q1 = []
    medians_q1 = []
    sigs_q1 = []
    #means_q2 = []
    medians_q2 = []
    sigs_q2 = []
    #means_q3 = []
    medians_q3 = []
    sigs_q3 = []
    #means_q4 = []
    medians_q4 = []
    sigs_q4 = []
    allimg = []

    # for name in bias_list:
    #
    #     img = pyfits.getdata(name)
    #     print(img.shape)

    #first get mean / median for all bias images (per quadrant)
    for name in bias_list:

        img = pyfits.getdata(name)
        #bring to "correct" orientation
        img = correct_orientation(img)

        # ny,nx = img.shape
        #
        # #define four quadrants via masks
        # q1,q2,q3,q4 = make_quadrant_masks(nx,ny)

        #remove the overscan region, which looks crap for actual bias images
        img = crop_overscan_region(img)
        #means_q1.append(np.nanmean(img[q1]))
        medians_q1.append(np.nanmedian(img[q1]))
        #means_q2.append(np.nanmean(img[q2]))
        medians_q2.append(np.nanmedian(img[q2]))
        #means_q3.append(np.nanmean(img[q3]))
        medians_q3.append(np.nanmedian(img[q3]))
        #means_q4.append(np.nanmean(img[q4]))
        medians_q4.append(np.nanmedian(img[q4]))
        allimg.append(img)

    # now get sigma of RON for ALL DIFFERENT COMBINATIONS of length 2 of the images in 'bias_list'
    # by using the difference images we are less susceptible to funny pixels (hot, warm, cosmics, etc.)
    list_of_combinations = list(combinations(bias_list, 2))
    for (name1, name2) in list_of_combinations:
        # read in observations and bring to right format
        img1 = pyfits.getdata(name1)
        img2 = pyfits.getdata(name2)

        img1 = correct_orientation(img1)
        img1 = crop_overscan_region(img1)
        img2 = correct_orientation(img2)
        img2 = crop_overscan_region(img2)

        #take difference and do sigma-clipping
        diff = img1.astype(long) - img2.astype(long)
        sigs_q1.append(np.nanstd(sigma_clip(diff[q1], 5)) / np.sqrt(2))
        sigs_q2.append(np.nanstd(sigma_clip(diff[q2], 5)) / np.sqrt(2))
        sigs_q3.append(np.nanstd(sigma_clip(diff[q3], 5)) / np.sqrt(2))
        sigs_q4.append(np.nanstd(sigma_clip(diff[q4], 5)) / np.sqrt(2))

    #now average over all images
    #allmeans = np.array([np.median(medians_q1), np.median(medians_q2), np.median(medians_q3), np.median(medians_q4)])
    offsets = np.array([
        np.median(medians_q1),
        np.median(medians_q2),
        np.median(medians_q3),
        np.median(medians_q4)
    ])
    rons = np.array([
        np.median(sigs_q1),
        np.median(sigs_q2),
        np.median(sigs_q3),
        np.median(sigs_q4)
    ])

    #get median image as well
    medimg = np.median(np.array(allimg), axis=0)

    ##### now fit a 2D polynomial surface to the median bias image (for each quadrant separately)

    #now, because all quadrants are the same size, they have the same "normalized coordinates", so only have to do that once
    xq1 = np.arange(0, (nx / 2))
    yq1 = np.arange(0, (ny / 2))
    XX_q1, YY_q1 = np.meshgrid(xq1, yq1)
    x_norm = (XX_q1.flatten() / ((len(xq1) - 1) / 2.)) - 1.
    y_norm = (YY_q1.flatten() / ((len(yq1) - 1) / 2.)) - 1.

    #Quadrant 1
    medimg_q1 = medimg[:(ny / 2), :(nx / 2)]
    #clean this, otherwise the surface fit will be rubbish
    medimg_q1[np.abs(medimg_q1 - np.median(medians_q1)) > clip *
              np.median(sigs_q1)] = np.median(medians_q1)
    coeffs_q1 = polyfit2d(x_norm, y_norm, medimg_q1.flatten(), order=degpol)

    #Quadrant 2
    medimg_q2 = medimg[:(ny / 2), (nx / 2):]
    #clean this, otherwise the surface fit will be rubbish
    medimg_q2[np.abs(medimg_q2 - np.median(medians_q2)) > clip *
              np.median(sigs_q2)] = np.median(medians_q2)
    #     xq2 = np.arange((nx/2),nx)
    #     yq2 = np.arange(0,(ny/2))
    #     XX_q2,YY_q2 = np.meshgrid(xq2,yq2)
    #     xq2_norm = (XX_q2.flatten() / ((len(xq2)-1)/2.)) - 3.   #not quite right
    #     yq2_norm = (YY_q2.flatten() / ((len(yq2)-1)/2.)) - 1.
    coeffs_q2 = polyfit2d(x_norm, y_norm, medimg_q2.flatten(), order=degpol)

    #Quadrant 3
    medimg_q3 = medimg[(ny / 2):, (nx / 2):]
    #clean this, otherwise the surface fit will be rubbish
    medimg_q3[np.abs(medimg_q3 - np.median(medians_q3)) > clip *
              np.median(sigs_q3)] = np.median(medians_q3)
    coeffs_q3 = polyfit2d(x_norm, y_norm, medimg_q3.flatten(), order=degpol)

    #Quadrant 4
    medimg_q4 = medimg[(ny / 2):, :(nx / 2)]
    #clean this, otherwise the surface fit will be rubbish
    medimg_q4[np.abs(medimg_q4 - np.median(medians_q4)) > clip *
              np.median(sigs_q4)] = np.median(medians_q4)
    coeffs_q4 = polyfit2d(x_norm, y_norm, medimg_q4.flatten(), order=degpol)

    #return all coefficients as 4-element array
    coeffs = np.array([coeffs_q1, coeffs_q2, coeffs_q3, coeffs_q4])

    #convert read-out noise (but NOT offsets!!!) to units of electrons rather than ADUs by muliplying with the gain (which has units of e-/ADU)
    if gain is None:
        print('ERROR: gain(s) not set!!!')
        return
    else:
        rons = rons * gain

    if debug_level >= 1:
        #plot stuff
        print('plot the distributions for the four quadrants maybe!?!?!?')

    print('Done!!!')
    if timit:
        print('Time elapsed: ' + str(np.round(time.time() - start_time, 1)) +
              ' seconds')

    if savefile:
        #save median bias image
        dum = bias_list[0].split('/')
        path = bias_list[0][0:-len(dum[-1])]
        #get header from the read-noise mask file
        h = pyfits.getheader(path + 'read_noise_mask.fits')
        #change a few things
        for i in range(1, 5):
            del h['offset_' + str(i)]
        h['UNITS'] = 'ADU'
        h['HISTORY'][0] = ('   median BIAS frame - created ' +
                           time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) +
                           ' (GMT)')
        #write master bias to file
        pyfits.writeto(path + 'median_bias.fits', medimg, h, clobber=True)

    return medimg, coeffs, offsets, rons
Exemplo n.º 5
0
def get_flux_and_variance_pairs(imglist,
                                MB,
                                MD=None,
                                scalable=True,
                                simu=False,
                                timit=False):
    """
    measure gain from a list of flat-field images as described here:
    https://www.mirametrics.com/tech_note_ccdgain.php

    units = ADUs

    INPUT:
    'imglist'  : list of image filenames (incl. directories)
    'MB'       : the master bias frame
    'simu'     : boolean - are you using simulated spectra?
    'timit'    : boolean - do you want to measure execution run time?

    OUTPUT:
    'f_q1' : median signal for quadrant 1
    'v_q1' : variance in signal for quadrant 1
    (same for other quadrants)
    """

    if timit:
        start_time = time.time()

    # do some formatting things for real observations
    #read dummy image
    img = pyfits.getdata(imglist[0])
    if not simu:
        # bring to "correct" orientation
        img = correct_orientation(img)
        # remove the overscan region, which looks crap for actual bias images
        img = crop_overscan_region(img)

    ny, nx = img.shape

    # define four quadrants via masks
    q1, q2, q3, q4 = make_quadrant_masks(nx, ny)

    #prepare arrays containing flux and variance
    f_q1 = np.array([])
    f_q2 = np.array([])
    f_q3 = np.array([])
    f_q4 = np.array([])
    v_q1 = np.array([])
    v_q2 = np.array([])
    v_q3 = np.array([])
    v_q4 = np.array([])

    #list of all possible pairs of files
    list_of_combinations = list(combinations(imglist, 2))

    for (name1, name2) in list_of_combinations:
        #read in observations and bring to right format
        img1 = pyfits.getdata(name1)
        img2 = pyfits.getdata(name2)
        if not simu:
            img1 = correct_orientation(img1)
            img1 = crop_overscan_region(img1)
            img2 = correct_orientation(img2)
            img2 = crop_overscan_region(img2)
        #subtract master bias
        img1 = img1 - MB
        if MD is not None:
            if scalable:
                texp1 = pyfits.getval(name1, 'exptime')
                texp2 = pyfits.getval(name1, 'exptime')
                if texp1 == texp2:
                    #subtract (scaled) master dark
                    img1 = img1 - MD * texp1
                    img2 = img2 - MD * texp2
                else:
                    print(
                        'ERROR: exposure times for the flat-field pairs do not agree!!!'
                    )
                    return
            else:
                #subtract master dark of right exposure time
                img1 = img1 - MD
                img2 = img2 - MD

        #take difference and do sigma-clipping
        # diff = img1.astype(long) - img2.astype(long)
        med1_q1 = np.nanmedian(img1[q1])
        med2_q1 = np.nanmedian(img2[q1])
        r_q1 = med1_q1 / med2_q1
        diff_q1 = img1[q1].astype(long) - r_q1 * img2[q1].astype(long)
        var_q1 = (np.nanstd(sigma_clip(diff_q1, 5)) / np.sqrt(2))**2
        med1_q2 = np.nanmedian(img1[q2])
        med2_q2 = np.nanmedian(img2[q2])
        r_q2 = med1_q2 / med2_q2
        diff_q2 = img1[q2].astype(long) - r_q2 * img2[q2].astype(long)
        var_q2 = (np.nanstd(sigma_clip(diff_q2, 5)) / np.sqrt(2))**2
        med1_q3 = np.nanmedian(img1[q3])
        med2_q3 = np.nanmedian(img2[q3])
        r_q3 = med1_q3 / med2_q3
        diff_q3 = img1[q3].astype(long) - r_q3 * img2[q3].astype(long)
        var_q3 = (np.nanstd(sigma_clip(diff_q3, 5)) / np.sqrt(2))**2
        med1_q4 = np.nanmedian(img1[q4])
        med2_q4 = np.nanmedian(img2[q4])
        r_q4 = med1_q4 / med2_q4
        diff_q4 = img1[q4].astype(long) - r_q4 * img2[q4].astype(long)
        var_q4 = (np.nanstd(sigma_clip(diff_q4, 5)) / np.sqrt(2))**2

        #fill output arrays
        f_q1 = np.append(f_q1, med1_q1)
        f_q2 = np.append(f_q2, med1_q2)
        f_q3 = np.append(f_q3, med1_q3)
        f_q4 = np.append(f_q4, med1_q4)
        v_q1 = np.append(v_q1, var_q1)
        v_q2 = np.append(v_q2, var_q2)
        v_q3 = np.append(v_q3, var_q3)
        v_q4 = np.append(v_q4, var_q4)

    if timit:
        print('Time elapsed: ' + str(np.round(time.time() - start_time, 1)) +
              ' seconds')

    return f_q1, f_q2, f_q3, f_q4, v_q1, v_q2, v_q3, v_q4
Exemplo n.º 6
0
def correct_for_bias_and_dark_from_filename(imgname,
                                            MB,
                                            MD,
                                            gain=None,
                                            scalable=False,
                                            savefile=False,
                                            path=None,
                                            simu=False,
                                            timit=False):
    """
    This routine subtracts both the MASTER BIAS frame [in ADU], and the MASTER DARK frame [in e-] from a given image.
    It also corrects the orientation of the image and crops the overscan regions.
    NOTE: the input image has units of ADU, but the output image has units of electrons!!!
    
    INPUT:
    'imgname'   : filename of raw science image (incl. directory)
    'MB'        : the master bias frame [ADU]
    'MD'        : the master dark frame [e-]
    'gain'      : the gains for each quadrant [e-/ADU]
    'scalable'  : boolean - do you want to normalize the dark current to an exposure time of 1s? (ie do you want to make it "scalable"?)
    
    OUTPUT:
    'dc_bc_img'  : the bias- & dark-corrected image [e-] (also has been brought to 'correct' orientation and overscan regions cropped) 
    
    MODHIST:
    #CMB - I removed the 'ronmask' and 'err_MD' INPUTs
    clone of "correct_for_bias_and_dark", but this one allows us to save output files
    """
    if timit:
        start_time = time.time()

    #(0) read in raw image [ADU]
    img = pyfits.getdata(imgname)
    if not simu:
        #bring to "correct" orientation
        img = correct_orientation(img)
        #remove the overscan region
        img = crop_overscan_region(img)

    #(1) BIAS SUBTRACTION [ADU]
    #bias-corrected_image
    bc_img = img - MB

    #(2) conversion to ELECTRONS and DARK SUBTRACTION [e-]
    #if the darks have a different exposure time then the images you are trying to correct, we need to re-scale the master dark
    if scalable:
        texp = pyfits.getval(imgname, 'exptime')
        if texp is not None:
            MD = MD * texp
#             #cannot have an error estimate lower than the read-out noise; this is dodgy but don't know what else to do
#             err_MD = np.maximum(err_MD * texp, ronmask)
        else:
            print(
                'ERROR: "texp" has to be provided when "scalable" is set to TRUE'
            )
            return
    #convert image to electrons now
    ny, nx = bc_img.shape
    q1, q2, q3, q4 = make_quadrant_masks(nx, ny)
    bc_img[q1] = gain[0] * bc_img[q1]
    bc_img[q2] = gain[1] * bc_img[q2]
    bc_img[q3] = gain[2] * bc_img[q3]
    bc_img[q4] = gain[3] * bc_img[q4]
    #now subtract master dark frame [e-] to create dark- & bias-corrected image [e-]
    dc_bc_img = bc_img - MD

    #if desired, write bias- & dark-corrected image and error array to fits files
    if savefile:
        dum = imgname.split('/')
        dum2 = dum[-1].split('.')
        shortname = dum2[0]
        if path is None:
            print('WARNING: output file directory not provided!!!')
            print('Using same directory as input file...')
            path = imgname[0:-len(dum[-1])]
        #outfn = path+shortname+'_bias_and_dark_corrected.fits'
        outfn = path + shortname + '_BD.fits'
        #get header from the original image FITS file
        h = pyfits.getheader(imgname)
        h['UNITS'] = 'ELECTRONS'
        h['HISTORY'] = '   BIAS- & DARK-corrected image - created ' + time.strftime(
            "%Y-%m-%d %H:%M:%S", time.gmtime()) + ' (GMT)'
        pyfits.writeto(outfn, dc_bc_img, h, clobber=True)
#         h_err = h.copy()
#         h_err['HISTORY'] = 'estimated uncertainty in BIAS- & DARK-corrected image - created '+time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())+' (GMT)'
#         pyfits.append(outfn, err_dc_bc_img, h_err, clobber=True)

    if timit:
        print('Time elapsed: ' + str(np.round(time.time() - start_time, 1)) +
              ' seconds')


#     return dc_bc_img, err_dc_bc_img
    return dc_bc_img
Exemplo n.º 7
0
pathdict['dispsol'] = '/Users/christoph/OneDrive - UNSW/dispsol/'
pathdict['root'] = '/Users/christoph/OneDrive - UNSW/'

### (0) GET INFO FROM FITS HEADERS ##################################################################################################################
# do a stock take of ALL FITS files in that folder
all_raw_files = glob.glob(pathdict['raw'] + date[-2:] + "*.fits")
all_raw_files.sort()
print('Identifying ' + str(len(all_raw_files)) + ' raw FITS files...')

# divide into lists according to exposure type
acq_list, bias_list, dark_list, flat_list, arc_list, thxe_list, laser_list, laser_and_thxe_list, stellar_list, unknown_list = get_obstype_lists(
    pathdict)
assert len(unknown_list) == 0, "WARNING: unknown files encountered!!!"

# obsnames = short_filenames(bias_list)
dumimg = crop_overscan_region(correct_orientation(pyfits.getdata(
    bias_list[0])))
ny, nx = dumimg.shape
del dumimg
#####################################################################################################################################################

# check white light exposures

for file in flat_list:
    #     fimg = crop_overscan_region(correct_orientation(pyfits.getdata(file)))
    fimg = correct_for_bias_and_dark_from_filename(file,
                                                   np.zeros((4096, 4112)),
                                                   np.zeros((4096, 4112)),
                                                   gain=[1., 1.095, 1.125, 1.],
                                                   scalable=False,
                                                   savefile=False,
                                                   path=pathdict['raw'])