Ejemplo n.º 1
0
def onedim_medfilt_cosmic_ray_removal(f,
                                      err,
                                      thresh=5.,
                                      low_thresh=3.,
                                      w=31,
                                      maxfilter_size=250,
                                      gauss_filter_sigma=15,
                                      debug_level=0):

    f_clean = f.copy()
    f_sm = medfilt(f, w)
    err_sm = medfilt(err, w)
    cont_rough = ndimage.gaussian_filter(
        ndimage.maximum_filter(medfilt(f, w), size=maxfilter_size),
        gauss_filter_sigma)
    # we also need to adjust the threshold according to the normalized scatter of f - f_sm
    dum = (f - f_sm) / err_sm
    scatter = np.nanstd(sigma_clip(dum, 3))
    #     cosmics = (f - f_sm) / err_sm > thresh
    #     cosmics = (f - f_sm) / err_sm > thresh * scatter
    #     cosmics = (f - f_sm) / err_sm > thresh * np.max([1,scatter])
    cosmics = ((f - f_sm) / err_sm > thresh * scatter) & (f > cont_rough)
    f_clean[cosmics] = f_sm[cosmics]

    # "grow" the cosmics by 1 pixel in each direction (as in LACosmic)
    growkernel = np.ones(3)
    extended_cosmics = np.cast['bool'](ndimage.convolve(
        np.cast['float32'](cosmics), growkernel))
    cosmic_edges = np.logical_xor(cosmics, extended_cosmics)

    # now check only for these pixels surrounding the cosmics whether they are affected (but use lower threshold)
    #     bad_edges = np.logical_and((f - f_sm) / err_sm > low_thresh, cosmic_edges)
    bad_edges = np.logical_and((f - f_sm) / err_sm > low_thresh * scatter,
                               cosmic_edges)
    #     bad_edges = np.logical_and((f - f_sm) / err_sm > low_thresh * np.max([1,scatter]), cosmic_edges)
    f_clean[bad_edges] = f_sm[bad_edges]

    ncos = np.sum(cosmics) + np.sum(bad_edges)

    return f_clean, ncos
Ejemplo n.º 2
0
def fit_background_astropy(bg,
                           poly_deg=5,
                           clip=10,
                           polytype='chebyshev',
                           return_full=True,
                           timit=False):
    """ 
    WARNING: While this works just fine, it is MUCH MUCH slower than 'fit_background' above. The astropy fitting/modelling must be to blame...
    
    INPUT:
    'bg'               : sparse matrix containing the inter-order regions of the 2D image
    'poly_deg'         : the order of the polynomials to use in the fit (for both dimensions)
    'clip'             : threshold for sigma clipping (needed to get rid of hot pixels etc)
    'polytype'         : either 'polynomial' (default), 'legendre', or 'chebyshev' are accepted
    'return_full'      : boolean - if TRUE, then the background model for each pixel for each order is returned; otherwise just the set of coefficients that describe it
    'timit'            : time it...
    'debug_level'      : for debugging only
    
    OUTPUT:
    EITHER
    'bkgd_coeffs'      : functional form of the coefficients that describe the background model
    OR 
    'bkgd_img'         : full background image constructed from best-fit model
    (selection between outputs is controlled by the 'return_full' keyword)
    
    TODO:
    figure out how to properly use weights here
    """

    if timit:
        start_time = time.time()

    # find the non-zero parts of the sparse matrix
    # format is:
    # contents[0] = row indices, ie y-values
    # contents[1] = column indices, ie x-values
    # contents[2] = values
    contents = sparse.find(bg)

    ny, nx = bg.todense().shape

    # perform sigma-clipping to get rid of hot pixels etc
    z_clean, goodix, badix = sigma_clip(contents[2], clip, return_indices=True)

    # re-normalize to [-1,+1] - otherwise small errors in parms have huge effects
    #     x_norm = (contents[0] / ((nx-1)/2.)) - 1.
    #     y_norm = (contents[1] / ((ny-1)/2.)) - 1.
    #     z = contents[2]
    y_norm = (contents[0][goodix] / ((nx - 1) / 2.)) - 1.
    x_norm = (contents[1][goodix] / ((ny - 1) / 2.)) - 1.
    #     z_clean_2 = contents[2][goodix]   # I double-checked and it really is the same as output from sigma_clip!!!

    # call the surface fitting routine
    bkgd_coeffs = fit_poly_surface_2D(x_norm,
                                      y_norm,
                                      z_clean,
                                      weights=None,
                                      polytype=polytype,
                                      poly_deg=poly_deg)
    #     cheb_coeffs = fit_poly_surface_2D(x_norm, y_norm, z, weights=None, polytype = 'c', poly_deg=3, timit=True)
    #     lege_coeffs = fit_poly_surface_2D(x_norm, y_norm, z, weights=None, polytype = 'l', poly_deg=3, timit=True)
    #     poly_coeffs = fit_poly_surface_2D(x_norm, y_norm, z, weights=None, polytype = 'p', poly_deg=3, timit=True)

    if return_full:
        xx = np.arange(nx)
        xxn = (xx / ((nx - 1) / 2.)) - 1.
        yy = np.arange(ny)
        yyn = (yy / ((ny - 1) / 2.)) - 1.
        X, Y = np.meshgrid(xxn, yyn)
        bkgd_img = bkgd_coeffs(X, Y)
#         bkgd_img = bkgd_coeffs(Y,X)     #IDKY, but the indices are the wrong way around if I do it like in the line above!!!!!
#         Ha!!! I know why! I had x_norm and y_norm mixed up above...

    if timit:
        print('Time elapsed: ' +
              np.round(time.time() - start_time, 2).astype(str) +
              ' seconds...')

    if return_full:
        return bkgd_coeffs, bkgd_img
    else:
        return bkgd_coeffs
Ejemplo n.º 3
0
def fit_background(bg, deg=5, clip=10, return_full=True, timit=False):
    """ 
    INPUT:
    'bg'                      : sparse matrix containing the inter-order regions of the 2D image
    'deg'                     : the order of the polynomials to use in the fit (for both dimensions)
    'clip'                    : threshold for sigma clipping (needed to get rid of hot pixels etc)
    'return_full'             : boolean - if TRUE, then the full image of the background model is returned; otherwise just the set of coefficients that describe it
    'timit'                   : time it...
    
    OUTPUT:
    'coeffs'    : polynomial coefficients that describe the background model
    'bkgd_img'  : full background image constructed from best-fit model (only if 'return_full' is set to TRUE)
    
    TODO:
    figure out how to properly use weights here
    """

    if timit:
        start_time = time.time()

    # find the non-zero parts of the sparse matrix
    # format is:
    # contents[0] = row indices, ie y-values
    # contents[1] = column indices, ie x-values
    # contents[2] = values
    contents = sparse.find(bg)

    ny, nx = bg.todense().shape

    # perform sigma-clipping to get rid of hot pixels etc
    z_clean, goodix, badix = sigma_clip(contents[2], clip, return_indices=True)

    # re-normalize to [-1,+1] - otherwise small errors in parms have huge effects
    #     x_norm = (contents[0] / ((nx-1)/2.)) - 1.
    #     y_norm = (contents[1] / ((ny-1)/2.)) - 1.
    #     z = contents[2]
    y_norm = (contents[0][goodix] / ((nx - 1) / 2.)) - 1.
    x_norm = (contents[1][goodix] / ((ny - 1) / 2.)) - 1.
    #     z_clean_2 = contents[2][goodix]   # I double-checked and it really is the same as output from sigma_clip!!!

    #     m = polyfit2d(contents[0]-int(ny/2), contents[1]-int(nx/2), contents[2], order=deg)
    coeffs = polyfit2d(x_norm, y_norm, z_clean, order=deg)
    # The result (m) is an array of the polynomial coefficients in the model f  = sum_i sum_j a_ij x^i y^j,
    # eg:    m = [a00,a01,a02,a03,a10,a11,a12,a13,a20,.....,a33] for order=3

    if timit:
        print('Time taken for fitting background model: ' +
              np.round(time.time() - start_time, 2).astype(str) +
              ' seconds...')
        start_time_2 = time.time()

    if return_full:
        xx = np.arange(nx)
        xxn = (xx / ((nx - 1) / 2.)) - 1.
        yy = np.arange(ny)
        yyn = (yy / ((ny - 1) / 2.)) - 1.
        X, Y = np.meshgrid(xxn, yyn)
        bkgd_img = polyval2d(X, Y, coeffs)


#         bkgd_img = polyval2d(Y,X,coeffs)    # IDKY, but the indices are the wrong way around if I do it like in the line above!!!!!
#         Ha!!! I know why! I had x_norm and y_norm mixed up above...

    if timit:
        print('Time taken for constructing full background image: ' +
              np.round(time.time() - start_time_2, 2).astype(str) +
              ' seconds...')
        print('Total time elapsed: ' +
              np.round(time.time() - start_time, 2).astype(str) +
              ' seconds...')

    if return_full:
        return coeffs, bkgd_img
    else:
        return coeffs
Ejemplo n.º 4
0
def find_gaps(flat,
              deg_polynomial=2,
              gauss_filter_sigma=3.,
              min_peak=0.05,
              maskthresh=100.,
              weighted_fits=True,
              slowmask=False,
              simu=False,
              timit=False,
              debug_level=0):
    """
    BASED ON JULIAN STUERMER'S MAROON_X PIPELINE:
    
    Locates and fits stripes (ie orders) in a flat field echelle spectrum.
    
    Starting in the central column, the algorithm identifies peaks and traces each stripe to the edge of the detector
    by following the brightest pixels along each order. It then fits a polynomial to each stripe.
    To improve algorithm stability, the image is first smoothed with a Gaussian filter. It not only eliminates noise, but
    also ensures that the cross-section profile of the flat becomes peaked in the middle, which helps to identify the
    center of each stripe. Choose gauss_filter accordingly.
    To avoid false positives, only peaks above a certain (relative) intensity threshold are used.
      
    :param flat: dark-corrected flat field spectrum
    :type flat: np.ndarray
    :param deg_polynomial: degree of the polynomial fit
    :type deg_polynomial: int
    :param gauss_filter_sigma: sigma of the gaussian filter used to smooth the image.
    :type gauss_filter_sigma: float
    :param min_peak: minimum relative peak height 
    :type min_peak: float
    :param debug_level: debug level flag
    :type debug_level: int
    :return: list of polynomial fits (np.poly1d)
    :rtype: list
    """

    if timit:
        start_time = time.time()

    #logging.info('Finding stripes...')
    print("Finding stripes...")
    ny, nx = flat.shape

    # smooth image slightly for noise reduction
    filtered_flat = ndimage.gaussian_filter(flat.astype(np.float),
                                            gauss_filter_sigma)

    # find peaks in center column
    data = filtered_flat[:, int(nx / 2)]
    #peaks = np.r_[True, data[1:] > data[:-1]] & np.r_[data[:-1] > data[1:], True]
    troughs = np.r_[True, data[1:] < data[:-1]] & np.r_[data[:-1] < data[1:],
                                                        True]

    if debug_level > 1:
        plt.figure()
        plt.title('Local maxima')
        plt.plot(data)
        plt.scatter(np.arange(ny)[troughs], data[troughs], s=25)
        plt.show()

    # fiddly fix, so we only find the troughs between stellar and sky fibres
    idx = np.logical_and(np.logical_and(troughs, data > 50), data < 2000.)
    idx[:40] = False
    idx[4016:] = False

    minima = np.arange(ny)[idx]

    # filter out maxima too close to the boundary to avoid problems
    minima = minima[minima > 3]
    minima = minima[minima < ny - 3]

    if debug_level > 1:
        #labels, n_labels = ndimage.measurements.label(data > min_peak * np.max(data))
        plt.figure()
        plt.title('Order gaps (stellar / sky)')
        plt.plot(data)
        plt.scatter(np.arange(ny)[minima], data[minima], s=25, c='red')
        #plt.plot((labels[0] > 0) * np.max(data))   #what does this do???
        plt.show()

    n_gaps = len(minima)
    #logging.info('Number of stripes found: %d' % n_order)
    print('Number of gaps found: %d' % n_gaps)

    gaps = np.zeros((n_gaps, nx))
    # because we only want to use good pixels in the fit later on
    mask = np.ones((n_gaps, nx), dtype=bool)

    # walk through to the left and right along the maximum of the order
    # loop over all orders:
    for m, row in enumerate(minima):
        column = int(nx / 2)
        gaps[m, column] = row
        start_row = row
        # walk right
        while (column + 1 < nx):
            column += 1
            args = np.array(np.linspace(max(1, start_row - 1),
                                        min(start_row + 1, ny - 1), 3),
                            dtype=int)
            args = args[np.logical_and(
                args < ny, args > 0)]  #deal with potential edge effects
            p = filtered_flat[args, column]
            # new maximum (apply only when there are actually flux values in p, ie not when eg p=[0,0,0]), otherwise leave start_row unchanged
            if ~(p[0] == p[1] and p[0] == p[2]):
                start_row = args[np.argmin(p)]
            gaps[m, column] = start_row
            #build mask - exclude pixels at upper/lower end of chip; also exclude peaks that do not lie at least 5 sigmas above rms of 3-sigma clipped background (+/- cliprange pixels from peak location)
            #if ((p < 10).all()) or ((column > 3500) and (mask[m,column-1]==False)) or (start_row in (0,nx-1)) or (m==42 and (p < 100).all()):
            if slowmask:
                cliprange = 25
                bg = filtered_flat[start_row - cliprange:start_row +
                                   cliprange + 1, column]
                clipped = sigma_clip(bg, 3.)
                if (filtered_flat[start_row, column] - np.median(clipped) <
                        5. * np.std(clipped)) or (start_row in (0, nx - 1)):
                    mask[m, column] = False
            else:
                if ((p < maskthresh).all()) or (start_row in (0, ny - 1)):
                    mask[m, column] = False
        # walk left
        column = int(nx / 2)
        start_row = row
        while (column > 0):
            column -= 1
            args = np.array(np.linspace(max(1, start_row - 1),
                                        min(start_row + 1, ny - 1), 3),
                            dtype=int)
            args = args[np.logical_and(
                args < ny, args > 0)]  #deal with potential edge effects
            p = filtered_flat[args, column]
            # new maximum (apply only when there are actually flux values in p, ie not when eg p=[0,0,0]), otherwise leave start_row unchanged
            if ~(p[0] == p[1] and p[0] == p[2]):
                start_row = args[np.argmin(p)]
            gaps[m, column] = start_row
            #build mask - exclude pixels at upper/lower end of chip; also exclude peaks that do not lie at least 5 sigmas above rms of 3-sigma clipped bcakground (+/- cliprange pixels from peak location)
            #if ((p < 10).all()) or ((column < 500) and (mask[m,column+1]==False)) or (start_row in (0,nx-1)) or (m==42 and (p < 100).all()):
            if slowmask:
                cliprange = 25
                bg = filtered_flat[start_row - cliprange:start_row +
                                   cliprange + 1, column]
                clipped = sigma_clip(bg, 3.)
                if (filtered_flat[start_row, column] - np.median(clipped) <
                        5. * np.std(clipped)) or (start_row in (0, nx - 1)):
                    mask[m, column] = False
            else:
                if ((p < maskthresh).all()) or (start_row in (0, ny - 1)) or (
                        simu == True and m == 0
                        and column < 1300) or (simu == False and m == 0
                                               and column < 900):
                    mask[m, column] = False

    # do Polynomial fit for each order
    #logging.info('Fit polynomial of order %d to each stripe' % deg_polynomial)
    print('Fit polynomial of order %d to each stripe...' % deg_polynomial)
    P = []
    xx = np.arange(nx)
    for i in range(len(gaps)):
        if not weighted_fits:
            #unweighted
            p = np.poly1d(
                np.polyfit(xx[mask[i, :]], gaps[i, mask[i, :]],
                           deg_polynomial))
        else:
            #weighted
            filtered_flux_along_order = np.zeros(nx)
            for j in range(nx):
                #filtered_flux_along_order[j] = filtered_flat[o[j].astype(int),j]    #that was when the loop reas: "for o in orders:"
                filtered_flux_along_order[j] = filtered_flat[gaps[
                    i, j].astype(int), j]
            filtered_flux_along_order[filtered_flux_along_order < 1] = 1
            #w = 1. / np.sqrt(filtered_flux_along_order)   this would weight the order centres less!!!
            w = np.sqrt(filtered_flux_along_order)
            p = np.poly1d(
                np.polyfit(xx[mask[i, :]],
                           gaps[i, mask[i, :]],
                           deg_polynomial,
                           w=w[mask[i, :]]))
        P.append(p)

    if debug_level > 0:
        plt.figure()
        plt.imshow(filtered_flat,
                   interpolation='none',
                   vmin=np.min(flat),
                   vmax=0.9 * np.max(flat),
                   cmap=plt.get_cmap('gray'))
        for p in P:
            plt.plot(xx, p(xx), 'g', alpha=1)
        plt.ylim((0, ny))
        plt.xlim((0, nx))
        plt.show()

    if timit:
        print('Elapsed time: ' + str(time.time() - start_time) + ' seconds')

    return P, mask
Ejemplo n.º 5
0
def get_bias_and_readnoise_from_bias_frames(bias_list,
                                            degpol=5,
                                            clip=5,
                                            gain=None,
                                            debug_level=0,
                                            timit=False):
    """
    Calculate the median bias frame, the offsets in the four different quadrants (assuming bias frames are flat within a quadrant),
    and the read-out noise per quadrant (ie the STDEV of the signal, but from difference images).
    
    INPUT:
    'bias_list'    : list of raw bias image files (incl. directories)
    'degpol'       : order of the polynomial (in each direction) to be used in the 2-dim polynomial surface fits to each quadrant's median bais frame
    'clip'         : number of 'sigmas' used to identify outliers when 'cleaning' each quadrant's median bais frame before the surface fitting
    'gain'         : array of gains for each quadrant (in units of e-/ADU)
    'debug_level'  : for debugging...
    'timit'        : boolean - do you want to measure execution run time?
    
    OUTPUT:
    'medimg'   : the median bias frame [ADU]
    'coeffs'   : the coefficients that describe the 2-dim polynomial surface fit to the 4 quadrants
    'offsets'  : the 4 constant offsets per quadrant (assuming bias frames are flat within a quadrant) [ADU]
    'rons'     : read-out noise for the 4 quadrants [e-]
    """

    if timit:
        start_time = time.time()

    print(
        'Determining offset levels and read-out noise properties from bias frames for 4 quadrants...'
    )

    img = pyfits.getdata(bias_list[0])

    #do some formatting things for real observations
    #bring to "correct" orientation
    img = correct_orientation(img)
    #remove the overscan region, which looks crap for actual bias images
    img = crop_overscan_region(img)

    ny, nx = img.shape

    #define four quadrants via masks
    q1, q2, q3, q4 = make_quadrant_masks(nx, ny)

    #co-add all bias frames
    #MB = create_master_img(bias_list, imgtype='bias', with_errors=False, savefiles=False, remove_outliers=True)

    #prepare arrays
    #means_q1 = []
    medians_q1 = []
    sigs_q1 = []
    #means_q2 = []
    medians_q2 = []
    sigs_q2 = []
    #means_q3 = []
    medians_q3 = []
    sigs_q3 = []
    #means_q4 = []
    medians_q4 = []
    sigs_q4 = []
    allimg = []

    # for name in bias_list:
    #
    #     img = pyfits.getdata(name)
    #     print(img.shape)

    #first get mean / median for all bias images (per quadrant)
    for name in bias_list:

        img = pyfits.getdata(name)
        #bring to "correct" orientation
        img = correct_orientation(img)

        # ny,nx = img.shape
        #
        # #define four quadrants via masks
        # q1,q2,q3,q4 = make_quadrant_masks(nx,ny)

        #remove the overscan region, which looks crap for actual bias images
        img = crop_overscan_region(img)
        #means_q1.append(np.nanmean(img[q1]))
        medians_q1.append(np.nanmedian(img[q1]))
        #means_q2.append(np.nanmean(img[q2]))
        medians_q2.append(np.nanmedian(img[q2]))
        #means_q3.append(np.nanmean(img[q3]))
        medians_q3.append(np.nanmedian(img[q3]))
        #means_q4.append(np.nanmean(img[q4]))
        medians_q4.append(np.nanmedian(img[q4]))
        allimg.append(img)

    # now get sigma of RON for ALL DIFFERENT COMBINATIONS of length 2 of the images in 'bias_list'
    # by using the difference images we are less susceptible to funny pixels (hot, warm, cosmics, etc.)
    list_of_combinations = list(combinations(bias_list, 2))
    for (name1, name2) in list_of_combinations:
        # read in observations and bring to right format
        img1 = pyfits.getdata(name1)
        img2 = pyfits.getdata(name2)

        img1 = correct_orientation(img1)
        img1 = crop_overscan_region(img1)
        img2 = correct_orientation(img2)
        img2 = crop_overscan_region(img2)

        #take difference and do sigma-clipping
        diff = img1.astype(long) - img2.astype(long)
        sigs_q1.append(np.nanstd(sigma_clip(diff[q1], 5)) / np.sqrt(2))
        sigs_q2.append(np.nanstd(sigma_clip(diff[q2], 5)) / np.sqrt(2))
        sigs_q3.append(np.nanstd(sigma_clip(diff[q3], 5)) / np.sqrt(2))
        sigs_q4.append(np.nanstd(sigma_clip(diff[q4], 5)) / np.sqrt(2))

    #now average over all images
    #allmeans = np.array([np.median(medians_q1), np.median(medians_q2), np.median(medians_q3), np.median(medians_q4)])
    offsets = np.array([
        np.median(medians_q1),
        np.median(medians_q2),
        np.median(medians_q3),
        np.median(medians_q4)
    ])
    rons = np.array([
        np.median(sigs_q1),
        np.median(sigs_q2),
        np.median(sigs_q3),
        np.median(sigs_q4)
    ])

    #get median image as well
    medimg = np.median(np.array(allimg), axis=0)

    ##### now fit a 2D polynomial surface to the median bias image (for each quadrant separately)

    #now, because all quadrants are the same size, they have the same "normalized coordinates", so only have to do that once
    xq1 = np.arange(0, (nx / 2))
    yq1 = np.arange(0, (ny / 2))
    XX_q1, YY_q1 = np.meshgrid(xq1, yq1)
    x_norm = (XX_q1.flatten() / ((len(xq1) - 1) / 2.)) - 1.
    y_norm = (YY_q1.flatten() / ((len(yq1) - 1) / 2.)) - 1.

    #Quadrant 1
    medimg_q1 = medimg[:(ny / 2), :(nx / 2)]
    #clean this, otherwise the surface fit will be rubbish
    medimg_q1[np.abs(medimg_q1 - np.median(medians_q1)) > clip *
              np.median(sigs_q1)] = np.median(medians_q1)
    coeffs_q1 = polyfit2d(x_norm, y_norm, medimg_q1.flatten(), order=degpol)

    #Quadrant 2
    medimg_q2 = medimg[:(ny / 2), (nx / 2):]
    #clean this, otherwise the surface fit will be rubbish
    medimg_q2[np.abs(medimg_q2 - np.median(medians_q2)) > clip *
              np.median(sigs_q2)] = np.median(medians_q2)
    #     xq2 = np.arange((nx/2),nx)
    #     yq2 = np.arange(0,(ny/2))
    #     XX_q2,YY_q2 = np.meshgrid(xq2,yq2)
    #     xq2_norm = (XX_q2.flatten() / ((len(xq2)-1)/2.)) - 3.   #not quite right
    #     yq2_norm = (YY_q2.flatten() / ((len(yq2)-1)/2.)) - 1.
    coeffs_q2 = polyfit2d(x_norm, y_norm, medimg_q2.flatten(), order=degpol)

    #Quadrant 3
    medimg_q3 = medimg[(ny / 2):, (nx / 2):]
    #clean this, otherwise the surface fit will be rubbish
    medimg_q3[np.abs(medimg_q3 - np.median(medians_q3)) > clip *
              np.median(sigs_q3)] = np.median(medians_q3)
    coeffs_q3 = polyfit2d(x_norm, y_norm, medimg_q3.flatten(), order=degpol)

    #Quadrant 4
    medimg_q4 = medimg[(ny / 2):, :(nx / 2)]
    #clean this, otherwise the surface fit will be rubbish
    medimg_q4[np.abs(medimg_q4 - np.median(medians_q4)) > clip *
              np.median(sigs_q4)] = np.median(medians_q4)
    coeffs_q4 = polyfit2d(x_norm, y_norm, medimg_q4.flatten(), order=degpol)

    #return all coefficients as 4-element array
    coeffs = np.array([coeffs_q1, coeffs_q2, coeffs_q3, coeffs_q4])

    #convert read-out noise (but NOT offsets!!!) to units of electrons rather than ADUs by muliplying with the gain (which has units of e-/ADU)
    if gain is None:
        print('ERROR: gain(s) not set!!!')
        return
    else:
        rons = rons * gain

    if debug_level >= 1:
        #plot stuff
        print('plot the distributions for the four quadrants maybe!?!?!?')

    print('Done!!!')
    if timit:
        print('Time elapsed: ' + str(np.round(time.time() - start_time, 1)) +
              ' seconds')

    if savefile:
        #save median bias image
        dum = bias_list[0].split('/')
        path = bias_list[0][0:-len(dum[-1])]
        #get header from the read-noise mask file
        h = pyfits.getheader(path + 'read_noise_mask.fits')
        #change a few things
        for i in range(1, 5):
            del h['offset_' + str(i)]
        h['UNITS'] = 'ADU'
        h['HISTORY'][0] = ('   median BIAS frame - created ' +
                           time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) +
                           ' (GMT)')
        #write master bias to file
        pyfits.writeto(path + 'median_bias.fits', medimg, h, clobber=True)

    return medimg, coeffs, offsets, rons
Ejemplo n.º 6
0
def get_flux_and_variance_pairs(imglist,
                                MB,
                                MD=None,
                                scalable=True,
                                simu=False,
                                timit=False):
    """
    measure gain from a list of flat-field images as described here:
    https://www.mirametrics.com/tech_note_ccdgain.php

    units = ADUs

    INPUT:
    'imglist'  : list of image filenames (incl. directories)
    'MB'       : the master bias frame
    'simu'     : boolean - are you using simulated spectra?
    'timit'    : boolean - do you want to measure execution run time?

    OUTPUT:
    'f_q1' : median signal for quadrant 1
    'v_q1' : variance in signal for quadrant 1
    (same for other quadrants)
    """

    if timit:
        start_time = time.time()

    # do some formatting things for real observations
    #read dummy image
    img = pyfits.getdata(imglist[0])
    if not simu:
        # bring to "correct" orientation
        img = correct_orientation(img)
        # remove the overscan region, which looks crap for actual bias images
        img = crop_overscan_region(img)

    ny, nx = img.shape

    # define four quadrants via masks
    q1, q2, q3, q4 = make_quadrant_masks(nx, ny)

    #prepare arrays containing flux and variance
    f_q1 = np.array([])
    f_q2 = np.array([])
    f_q3 = np.array([])
    f_q4 = np.array([])
    v_q1 = np.array([])
    v_q2 = np.array([])
    v_q3 = np.array([])
    v_q4 = np.array([])

    #list of all possible pairs of files
    list_of_combinations = list(combinations(imglist, 2))

    for (name1, name2) in list_of_combinations:
        #read in observations and bring to right format
        img1 = pyfits.getdata(name1)
        img2 = pyfits.getdata(name2)
        if not simu:
            img1 = correct_orientation(img1)
            img1 = crop_overscan_region(img1)
            img2 = correct_orientation(img2)
            img2 = crop_overscan_region(img2)
        #subtract master bias
        img1 = img1 - MB
        if MD is not None:
            if scalable:
                texp1 = pyfits.getval(name1, 'exptime')
                texp2 = pyfits.getval(name1, 'exptime')
                if texp1 == texp2:
                    #subtract (scaled) master dark
                    img1 = img1 - MD * texp1
                    img2 = img2 - MD * texp2
                else:
                    print(
                        'ERROR: exposure times for the flat-field pairs do not agree!!!'
                    )
                    return
            else:
                #subtract master dark of right exposure time
                img1 = img1 - MD
                img2 = img2 - MD

        #take difference and do sigma-clipping
        # diff = img1.astype(long) - img2.astype(long)
        med1_q1 = np.nanmedian(img1[q1])
        med2_q1 = np.nanmedian(img2[q1])
        r_q1 = med1_q1 / med2_q1
        diff_q1 = img1[q1].astype(long) - r_q1 * img2[q1].astype(long)
        var_q1 = (np.nanstd(sigma_clip(diff_q1, 5)) / np.sqrt(2))**2
        med1_q2 = np.nanmedian(img1[q2])
        med2_q2 = np.nanmedian(img2[q2])
        r_q2 = med1_q2 / med2_q2
        diff_q2 = img1[q2].astype(long) - r_q2 * img2[q2].astype(long)
        var_q2 = (np.nanstd(sigma_clip(diff_q2, 5)) / np.sqrt(2))**2
        med1_q3 = np.nanmedian(img1[q3])
        med2_q3 = np.nanmedian(img2[q3])
        r_q3 = med1_q3 / med2_q3
        diff_q3 = img1[q3].astype(long) - r_q3 * img2[q3].astype(long)
        var_q3 = (np.nanstd(sigma_clip(diff_q3, 5)) / np.sqrt(2))**2
        med1_q4 = np.nanmedian(img1[q4])
        med2_q4 = np.nanmedian(img2[q4])
        r_q4 = med1_q4 / med2_q4
        diff_q4 = img1[q4].astype(long) - r_q4 * img2[q4].astype(long)
        var_q4 = (np.nanstd(sigma_clip(diff_q4, 5)) / np.sqrt(2))**2

        #fill output arrays
        f_q1 = np.append(f_q1, med1_q1)
        f_q2 = np.append(f_q2, med1_q2)
        f_q3 = np.append(f_q3, med1_q3)
        f_q4 = np.append(f_q4, med1_q4)
        v_q1 = np.append(v_q1, var_q1)
        v_q2 = np.append(v_q2, var_q2)
        v_q3 = np.append(v_q3, var_q3)
        v_q4 = np.append(v_q4, var_q4)

    if timit:
        print('Time elapsed: ' + str(np.round(time.time() - start_time, 1)) +
              ' seconds')

    return f_q1, f_q2, f_q3, f_q4, v_q1, v_q2, v_q3, v_q4