示例#1
0
def find_star(infile, pos=pos0, find_size2d=size2d):
    ## Mod on 24/02/2017 to handle CRs (quick fix)
    ## Mod on 01/04/2017 to handle CRs and bad pixels using cosmicrays_lacosmic
    ## Mod on 04/04/2017 to pass pos and find_size2d keywords

    im0, hdr0 = fits.getdata(infile, header=True)

    # + on 01/04/2017
    im0_clean = cosmicray_lacosmic(im0, sigclip=10)

    # Mod on 04/04/2017
    # find_size2d = size2d #u.Quantity((25, 770), u.pixel)
    cutout = Cutout2D(im0_clean[0],
                      pos,
                      find_size2d,
                      mode='partial',
                      fill_value=np.nan)
    cutout = cutout.data

    peak = np.where(cutout == np.max(cutout))
    ycen, xcen = peak[0][0], peak[1][0]
    # print xcen, ycen

    xcen += pos[0] - find_size2d[1].value / 2.0
    ycen += pos[1] - find_size2d[0].value / 2.0
    # print xcen, ycen
    return xcen, ycen
示例#2
0
def cosmic_ray_corr(textlist_files, prefix_str='c'):
    """
    Gain correction and Cosmic ray correction using LA Cosmic method.
    Args:
        textlist_files : A python list object with paths/names to the individual files.
        prefix_str     : String appended to the name of newly created file
    Returns:
        None
    """
    for filename in textlist_files:

        file_corr = CCDData.read(filename, unit=u.adu)
        file_corr = ccdp.gain_correct(file_corr, gain=GAIN)
        new_ccd = ccdp.cosmicray_lacosmic(file_corr,
                                          readnoise=READ_NOISE,
                                          sigclip=7,
                                          satlevel=SATURATION,
                                          niter=4,
                                          gain_apply=False,
                                          verbose=True)
        new_ccd.meta['crcorr'] = True
        new_ccd.data = new_ccd.data.astype('float32')
        new_ccd.write(prefix_str + filename,
                      hdu_mask=None,
                      hdu_uncertainty=None)
示例#3
0
    def __init__(
        self,
        ybounds=(425, 510),
        root_dir=None,
        inpaint_bad_pixels=False,
        inpaint_cosmic_rays=False,
    ):
        super().__init__()

        if root_dir is None:
            root_dir = "/home/gully/GitHub/ynot/test/data/2012-11-27/"
        self.root_dir = root_dir
        self.nirspec_collection = self.create_nirspec_collection()
        # self.unique_objects = self.get_unique_objects()
        # self.label_nirspec_nods()
        nodA_path = self.root_dir + "/NS.20121127.49332.fits"
        nodA_data = fits.open(nodA_path)[0].data.astype(np.float64)
        nodA = torch.tensor(nodA_data)

        nodB_path = self.root_dir + "/NS.20121127.50726.fits"
        nodB_data = fits.open(nodB_path)[0].data.astype(np.float64)
        nodB = torch.tensor(nodB_data)

        # Read in the Bad Pixel mask
        self.bpm = self.load_bad_pixel_mask()

        data_full = torch.stack([nodA, nodB])  # Creates NxHxW tensor
        # Inpaint bad pixels.  In the future we will simply neglect these pixels
        if inpaint_bad_pixels:
            data_full = self.inpaint_bad_pixels(data_full)

        self.n_images = len(data_full[:, 0, 0])
        self.gain = 5.8  # e/ADU, per NIRSPEC documentation

        if inpaint_cosmic_rays:
            for ii in range(self.n_images):
                nod_ccd = CCDData(data_full[ii].numpy(), unit="adu")
                out = ccdproc.cosmicray_lacosmic(
                    nod_ccd,
                    readnoise=23.0,
                    gain=self.gain,
                    verbose=False,
                    satlevel=1.0e7,
                    sigclip=7.0,
                    sepmed=False,
                    cleantype="medmask",
                    fsmode="median",
                )
                data_full[ii] = torch.tensor(out.data)
        else:
            data_full = data_full * self.gain

        data = data_full[:, ybounds[0]:ybounds[1], :]
        data = data.permute(0, 2, 1)

        self.pixels = data
        self.index = torch.tensor([0, 1])
示例#4
0
def cleanCosmic(ccd, mbox=15, rbox=15, gbox=11, sigclip=5, cleantype="medmask", cosmic_method='lacosmic'):
    ctype = cosmic_method.lower().strip()
    ctypes = ['lacosmic', 'median']
    if not ctype in ctypes:
        print ('>>> Cosmic ray type "%s" NOT available [%s]' % (ctype, ' | '.join(ctypes)))
        return
    if ctype == 'lacosmic':
        ccd = ccdproc.cosmicray_lacosmic(ccd, sigclip=sigclip, cleantype=cleantype)
    elif ctype == 'median':
        ccd = ccdproc.cosmicray_median(ccd, mbox=mbox, rbox=rbox, gbox=gbox)
    if isinstance(ccd, CCDData):
        ccd.header['COSMIC'] = ctype.upper()
    return ccd
示例#5
0
def crrej_LA(ccd,
             dtype='float32',
             output=None,
             nomask=False,
             output_verify='fix',
             overwrite=False,
             **kwargs):
    ''' Does the cosmic-ray rejection using default L.A.Cosmic algorithm.

    Parameters
    ----------
    ccd: CCDData, ndarray
        The ccd to be cosmic-ray removed. If ndarray, changed to CCDData by
        ``ccd = CCDData(ccd)``.

    dtype: dtype-like
        The dtype of the output ccd (CCDData's data)

    output: path-like
        The path to save the rejected ccd.

    nomask: bool
        If ``False`` (default), the returned and saved ``CCDData`` will contain
        the mask extension (extension 1 with name MASK). If ``True``, the
        mask will be set as ``None`` after the cosmic-ray rejection. Can be
        turned on when the mask is unimportant and the disk storage is running
        out.

    kwargs:
        The kwargs for the cosmic-ray rejection. By default,
        ``sigclip=4.5``, ``sigfrac=0.3``, ``objlim=5.0``, ``gain=1.0``,
        ``readnoise=6.5``, ``satlevel=65535.0``, ``pssl=0.0``, ``niter=4``,
        ``sepmed=True``, ``cleantype='meanmask'``, ``fsmode='median'``,
        ``psfmodel='gauss'``, ``psffwhm=2.5``, ``psfsize=7``, ``psfk=None``,
        ``psfbeta=4.765``, ``verbose=False``
    '''

    if not isinstance(ccd, CCDData):
        warnings.warn("ccd is not CCDData. Convert using ccd = CCDData(ccd)")
        ccd = CCDData(ccd)

    nccd = cosmicray_lacosmic(ccd, **kwargs)
    nccd = fu.CCDData_astype(nccd, dtype=dtype)

    if nomask:
        nccd.mask = None

    if output is not None:
        nccd.write(output, output_verify=output_verify, overwrite=overwrite)

    return nccd
示例#6
0
def align_combine_images(list_files, fitsfile, ref_image_fits=None, precision=100, dout=None, hexp='EXPTIME', force=False, 
	minmax_clip=False, minmax_clip_min=0.0, sigma_clip=True, date=None, clip_extrema=False, func=np.ma.median, sigclip=5, 
	cosmic=False, mbox=15, rbox=15, gbox=11, cleantype="medmask", cosmic_method='lacosmic', sky=False, dict_sky={}, 
	dict_combine={}, suffix=None, hfilter='FILTER', key_file='file', hobj='OBJECT', ext=0, method='median', 
	align=True, **kwargs):
    if ref_image_fits is None:
        tobj = getTimeTable(list_files, key_time=hexp, key_file=key_file, ext=ext, abspath=True, mask=-1, sort=True, clean=True)
        ref_image_fits, list_files = tobj[key_file][-1], tobj[key_file][:-1].data.tolist()
    ref_image = CCDData.read(ref_image_fits)
    ref_image = ccdproc.cosmicray_lacosmic(ref_image, sigclip=sigclip)
    lccd = [ref_image]
    lexp = [ref_image.header[hexp]]
    all_images = [os.path.basename(ref_image_fits)]
    for img in list_files:
        image = fits2CCDData(img, single=True)
        image_suffix = suffix if suffix is not None else img.split('.')[-1]
        if 'REFIMA' in image.header and 'IMAGES' in image.header and not force:
            continue
        if cosmic:
            image = cleanCosmic(image, mbox=mbox, rbox=rbox, gbox=gbox, sigclip=sigclip, cleantype=cleantype, cosmic_method=cosmic_method)
        if align:
            offset_image = shiftImage(image, ref_image, precision=precision, **kwargs)
            img_shifted = '%s_shift.%s' % (img.split('.fit')[0], image_suffix)
            all_images.append(os.path.basename(img_shifted))
            img_shifted = join_path(img_shifted, dout)
            offset_image.write(img_shifted, clobber=True)
            lccd.append(offset_image)
        else:
            all_images.append(os.path.basename(img))
            lccd.append(image)
        lexp.append(image.header[hexp])
        #iraf.unlearn("imshift")
        #iraf.imshift.interp_type = "spline3"
        #iraf.imshift(img, img, shift[1], shift[0])
    # Combine images
    lexp = np.array(lexp)
    scale_func = None # 1. / lexp
    combine = ccdproc.combine(lccd, method=method, scale=scale_func, minmax_clip=minmax_clip, func=func,
		minmax_clip_min=minmax_clip_min, sigma_clip=sigma_clip, clip_extrema=clip_extrema, **dict_combine)
    if sky:
        combine = subtract_sky_ccd(combine, **dict_sky)
    combine.header['IMAGES'] = str(' | '.join(all_images))
    combine.header['REFIMA'] = os.path.basename(ref_image_fits)
    combine.header['IMGSEXP'] = ' | '.join(map(str,lexp[1:].tolist() + [lexp[0]]))
    combine.header['CMETHOD'] = method
    dir_out = os.path.dirname(ref_image_fits) if dout is None else dout
    fitsfile = join_path(fitsfile, dir_out)
    combine.header['FILENAME'] = os.path.basename(fitsfile)
    combine.header['CCDVER'] = VERSION
    combine.write(fitsfile, clobber=True)
示例#7
0
def find_gnirs_window_mean(infile, mylogger=None):
    # + on 04/04/2017
    # Using a mean approach to get the GNIRS window
    '''
    Modified by Chun Ly, 9 January 2018
     - Implement glog logging, allow mylogger keyword
    Modified by Chun Ly, 22 March 2018
     - Bug fix : mylog -> clog
    '''

    # + on 09/01/2018
    if type(mylogger) == type(None):
        mylog, clog = 0, log
    else:
        mylog, clog = 1, mylogger

    clog.info('## Reading : ' + infile)  # Mod on 09/01/2018
    im0 = fits.getdata(infile)
    hdr0 = fits.getheader(infile, ext=0)

    im0_clean = cosmicray_lacosmic(im0, sigclip=10)
    im0_clean = im0_clean[0]

    im0_mask = mask_bad_pixels(im0_clean)

    mean_y = np.nanmean(im0_mask, axis=1)
    mean_x = np.nanmean(im0_mask, axis=0)

    i_y = np.where(mean_y > 0)[0]
    i_x = np.where(mean_x > 0)[0]

    # + on 05/04/2017
    i_y_grp = group_index(i_y, find_max=True)
    i_x_grp = group_index(i_x, find_max=True)

    # + on 01/04/2017, Mod on 05/04/2017
    y_min, y_max = np.min(i_y_grp), np.max(i_y_grp)
    x_min, x_max = np.min(i_x_grp), np.max(i_x_grp)
    x_cen, y_cen = (x_max + x_min) / 2.0, (
        y_max + y_min) / 2.0  # Later mod on 04/04/2017
    #x_cen, y_cen = np.average(i_x), np.average(i_y)

    # + on 01/04/2017
    info0  = 'x_min=%i, x_max=%i, y_min=%i, y_max=%i ' % \
             (x_min, x_max, y_min, y_max)
    info0 += 'x_cen=%.2f, y_cen=%.2f' % (x_cen, y_cen)
    clog.info(info0)  # Mod on 09/01/2018
    return x_min, x_max, y_min, y_max, x_cen, y_cen
示例#8
0
def find_gnirs_window(infile):
    # + on 30/03/2017
    # Mod on 01/04/2017 to figure out way to find GNIRS window
    '''
    Modified by Chun Ly, 9 January 2018
    - Implement glog logging, allow mylogger keyword
    Modified by Chun Ly, 22 March 2018
     - Bug fix : mylog -> clog
    '''

    # + on 09/01/2018
    if type(mylogger) == type(None):
        mylog, clog = 0, log
    else:
        mylog, clog = 1, mylogger

    # Mod on 01/04/2017
    clog.info('## Reading : ' + infile)  # Mod on 09/01/2018
    im0 = fits.getdata(infile)
    hdr0 = fits.getheader(infile, ext=0)

    # Mod on 01/04/2017
    im0_clean = cosmicray_lacosmic(im0, sigclip=10)
    im0_clean = im0_clean[0]

    im0_mask = mask_bad_pixels(im0_clean)

    # + on 01/04/2017
    if hdr0['NDAVGS'] == 1: v_min = 10
    if hdr0['NDAVGS'] == 16: v_min = 100

    i_y, i_x = np.where((im0_mask > v_min) & np.isfinite(im0_mask))
    # print len(i_y)
    med0 = np.median(im0_mask[i_y, i_x])

    # + on 01/04/2017
    y_min, y_max = np.min(i_y), np.max(i_y)
    x_min, x_max = np.min(i_x), np.max(i_x)
    x_cen, y_cen = np.average(i_x), np.average(i_y)

    # + on 01/04/2017
    info0  = '## med0=%.2f, x_min=%i, x_max=%i, y_min=%i, y_max=%i ' % \
             (med0, x_min, x_max, y_min, y_max)
    info0 += 'x_cen=%.2f, y_cen=%.2f' % (x_cen, y_cen)
    clog.info(info0)  # Mod on 09/01/2018
    return med0, x_min, x_max, y_min, y_max, x_cen, y_cen
示例#9
0
def zapcosmic(file_list):
    i=0
    nfiles = len(file_list)
    for f in file_list:
        print ('ZAPPING COSMIC RAYS FOR FILE %i OF %i'%(i,nfiles))
        with fits.open(f) as hdu1:
            print ('working on ',f)

            # convert data to CCDData format and save header
            ccd = CCDData(hdu1[0].data, unit=u.adu)
            header = hdu1[0].header
            crimage = ccdproc.cosmicray_lacosmic(ccd, gain = float(gain.value), readnoise = float(rdnoise.value))
            header['HISTORY'] = '= Cosmic rays rejected using ccdproc.cosmicray_lacosmic '
            fits.writeto('z'+f,crimage,header)
            hdu1.close()
        i += 1
        print ('\n')
示例#10
0
def cosmic_ray(hdul):
    num_amps = len(hdul)
    if num_amps > 1 and hdul[0].data is None:
        num_amps -= 1
    gains = find_gain(hdul[0].header, num_amps)
    rdnoises = find_rdnoise(hdul[0].header, num_amps)
    kwargs = {'sigclip': 4.5, 'objlim': 6, 'gain': 2., 'readnoise': 6.,
              'sepmed': True, 'cleantype': "idw", "verbose": True}
    amp_count = 0
    for i in range(len(hdul)):
        if hdul[i].data is None:
            continue
        data = hdul[i].data
        kwargs['gain'] = gains[amp_count]
        kwargs['readnoise'] = rdnoises[amp_count]
        newdata, mask = cosmicray_lacosmic(data, **kwargs)
        hdul[i].data = newdata / gains[amp_count]
        amp_count += 1
    hdul[0].header.add_history('LaPlacian Cosmic Ray removal algorithm applied')
    return hdul
示例#11
0
def reduce_object(name,
                  channel,
                  file_list,
                  flat_fn,
                  trim,
                  clean_cosmic_rays,
                  logfile,
                  outpath,
                  bias_fn=None):
    extra_files = []
    for num, fn in enumerate(file_list):
        fn = path.join(datapath, fn)
        if bias_fn is not None:
            cmd = 'modsBias.py -f {}'.format(fn)
            run_cmd(cmd, logfile)
            fn = fn[:-5] + '_ot.fits'
            newfn = fn[:-5] + 'B.fits'
            cmd = 'modsSub.py -f {} {} {}'.format(fn, bias_fn, newfn)
            run_cmd(cmd, logfile)
            fn = newfn
            extra_files.append(newfn)
            cmd = 'modsProcNoBias.py -bf {} {}'.format(fn, flat_fn)
            run_cmd(cmd, logfile)
            newfn = fn[:-5] + 'f.fits'
            ccd = trim_image(newfn, trim, logfile)
        else:
            cmd = 'modsProc.py -bf {} {}'.format(fn, flat_fn)
            run_cmd(cmd, logfile)
            newfn = fn[:-5] + '_otf.fits'
            ccd = trim_image(newfn, trim, logfile)
        if clean_cosmic_rays:
            print('cleaning cosmic rays')
            ccd = ccdproc.cosmicray_lacosmic(ccd)
            extra_files.append(newfn)
            newfn = path.join(datapath,
                              '{}-{}-{}.fits'.format(name, channel, num + 1))
            print('lacosmic algo: file name --> ' + newfn, file=logfile)
        ccd.write(newfn, overwrite=True)
        call('mv ' + newfn + ' ' + outpath, shell=True)
    return extra_files
示例#12
0
def get_slit_trace(infile):  #, xmin, xmax):
    # Mod on 04/04/2017, aesthetics, fix bug
    # Mod on 04/04/2017, handle CRs affecting trace
    # Mod on 06/04/2017. Slit trace can be lost in median. Using average

    im0, hdr0 = fits.getdata(infile, header=True)

    # + on 04/04/2017
    im0_clean = cosmicray_lacosmic(im0, sigclip=10)
    im0_clean = im0_clean[0]

    # Bug: Mod on 06/04/2017
    y_avg0 = np.average(im0_clean, axis=1)  # Mod on 04/04/2017
    cen0 = (np.where(y_avg0 == np.max(y_avg0))[0])[0]

    dy = 20  # + on 04/04/2017
    im0_crop = im0_clean[cen0 - dy:cen0 + dy, :]  # Mod on 04/04/2017

    y_idx, x_idx = np.where(im0_crop >= 0.25 * np.max(im0_crop))
    xmin, xmax = np.min(x_idx), np.max(x_idx)

    dx = 2
    x0 = np.arange(xmin, xmax, dx)

    y0_lo = np.zeros(len(x0))
    y0_hi = np.zeros(len(x0))

    for xx in xrange(len(x0)):
        im0_crop = im0_clean[cen0 - dy:cen0 + dy,
                             x0[xx]:x0[xx] + dx]  # Mod on 04/04/2017
        y_med = np.median(im0_crop, axis=1)
        edge_idx = np.where(y_med >= 0.1 * np.max(y_med))[0]
        if len(edge_idx) > 2:
            y0_lo[xx] = cen0 - dy + edge_idx[0]
            y0_hi[xx] = cen0 - dy + edge_idx[-1]
        else:
            y0_lo[xx] = np.nan
            y0_hi[xx] = np.nan

    return x0 + dx / 2.0, y0_lo, y0_hi
示例#13
0
 def crrej(self,im,crbox=None,nsig=5,display=None) :
     """ CR rejection
     """
     if crbox is None: return im
     if type(im) is not list : ims=[im]
     else : ims = im
     out=[]
     for i,im in enumerate(ims) :
         if display is not None : 
             display.tv(im)
         if crbox == 'lacosmic':
             if self.verbose : print('  zapping CRs with ccdproc.cosmicray_lacosmic')
             im= ccdproc.cosmicray_lacosmic(im)
         else :
             if self.verbose : print('  zapping CRs with filter [{:d},{:d}]...'.format(*crbox))
             image.zap(im,crbox,nsig=nsig)
         if display is not None : 
             display.tv(im)
             input("  See CR-zapped image and original with - key. Hit any key to continue")
         out.append(im)
     if len(out) == 1 : return out[0]
     else : return out
示例#14
0
    warr = np.arange(warr.min(), warr.max(), dw)
    for i in range(data.shape[0]):
        data[i, :] = np.interp(warr, wmap[i, :], data[i, :])
    hdu[0].data = data
    hdu[0].header['CTYPE1'] = 'LAMBDA'
    hdu[0].header['CTYPE2'] = 'PIXEL'
    hdu[0].header['CD1_1'] = dw
    hdu[0].header['CD2_1'] = 0.0
    hdu[0].header['CD1_2'] = 0.0
    hdu[0].header['CD2_2'] = 1.0
    hdu[0].header['CRPIX1'] = 0.0
    hdu[0].header['CRPIX2'] = 0.0
    hdu[0].header['CRVAL1'] = warr.min()
    hdu[0].header['CRVAL2'] = 0.0
    hdu[0].header['CDELT1'] = 1.0
    hdu[0].header['CDELT2'] = 1.0
    hdu[0].header['DC-FLAG'] = 0

    return hdu


if __name__ == '__main__':
    #filename=sys.argv[1]
    import glob
    import ccdproc
    for filename in glob.glob('w_r*fit'):
        hdu = fits.open(filename)
        hdu[0].data, crmask = ccdproc.cosmicray_lacosmic(hdu[0].data)
        hdu = rectify(hdu)
        hdu.writeto(filename.replace('w_', 't_'), clobber=True)
示例#15
0
def clean_the_images(path, filename):
    #ast=AstrometryNet()
    #ast.api_key= 'iqmqwvazpvolmjmn'
    dir = path
    gain = 2 * u.electron / u.adu
    readnoise = 7.5 * u.electron
    ra = input('Enter the RA of the source:   ')
    dec = input('Enter the DEC of the source: ')
    '''
    wcs_header=ast.solve_from_image(path+filename)
    wcs=WCS(wcs_header)
    ran,decn=wcs.all_pix2world(1024,1024,0)
    print(ran,decn)
    '''
    file_name = os.path.join(dir, filename)
    image = ccdproc.CCDData.read(file_name, unit='adu')
    header = fits.getheader(file_name, 0)

    time = header['DATE']
    t = Time(time, format='isot', scale='utc')
    print(t.jd, t.mjd)
    header.insert(15, ('RA', ra))
    header.insert(16, ('DEC', dec))

    a = sorted(glob(os.path.join(dir, 'bias*.fits')))
    biaslist = []
    for i in range(0, len(a)):
        data = ccdproc.CCDData.read(a[i], unit='adu')
        #data = ccdproc.create_deviation(data, gain=gain, readnoise=readnoise)
        #data= data-(data.uncertainty.array)
        biaslist.append(data)
    combiner = ccdproc.Combiner(biaslist)
    masterbias = combiner.median_combine()
    masterbias.write('masterbias.fit', overwrite=True)
    mbias = ccdproc.CCDData.read('masterbias.fit', unit='adu')
    #masterbias.meta=image.meta
    print('master bias generated')
    print(np.mean(masterbias), np.median(masterbias))

    c = sorted(glob(os.path.join(dir, 'flat*.fits')))
    flatlist = []
    for j in range(0, len(c)):
        flat = ccdproc.CCDData.read(c[j], unit='adu')
        #flat= ccdproc.create_deviation(flat, gain=gain, readnoise=readnoise)
        flat = ccdproc.subtract_bias(flat, masterbias)
        flatlist.append(flat)
    combiner = ccdproc.Combiner(flatlist)
    masterflat = combiner.median_combine()
    masterflat.write('masterflat.fits', overwrite=True)
    mflat = ccdproc.CCDData.read('masterflat.fits', unit='adu')
    print('master flat generated')
    print(np.mean(masterflat), np.median(masterflat))

    #masterflat.meta=image.meta

    bias_subtracted = ccdproc.subtract_bias(image, masterbias)
    flat_corrected = ccdproc.flat_correct(bias_subtracted, masterflat)
    cr_cleaned = ccdproc.cosmicray_lacosmic(flat_corrected,
                                            readnoise=7.5,
                                            sigclip=5)
    print('cosmic ray removed')

    fits.writeto(dir + 'j_0947_i_1_clean.fits',
                 cr_cleaned,
                 header,
                 overwrite=True)

    print('image cleaned')
示例#16
0
def run_astroscrappy(image_old, syntax):
    '''
    Function to call a instance of astroscrappy by Curtis McCully

    link: https://astroscrappy.readthedocs.io/en/latest/#functions

    '''
    try:

        import time
        import logging
        import astroscrappy
        from threading import Thread

        from numpy import sum as np_sum

        # from numpy import inf as np_inf

        logger = logging.getLogger(__name__)

        logger.info('Detecting/removing cosmic ray sources')

        if syntax['use_astroscrappy']:

            #  is the program taking a while
            taking_while = False

            # output list
            clean_image_lst = []

            print('Starting Astroscrappy ... ', end='')

            # wrapper to move output to list
            def wrapper(func, args, res):

                res.append(func(*args))

            # setup astroscrappy but don't call
            def prep_astroscrappy():
                return astroscrappy.detect_cosmics(
                    image_old.data,
                    sigclip=4.5,
                    sigfrac=0.3,
                    objlim=5.0,
                    gain=syntax['gain'],
                    satlevel=65535.0,
                    pssl=0.0,
                    niter=4,
                    sepmed=True,
                    cleantype='meanmask',
                    fsmode='median',
                    psfmodel='gauss',
                    psffwhm=2.5,
                    psfsize=7,
                    psfk=None,
                    psfbeta=4.765,
                    verbose=False,
                )

            cray_remove_thread = Thread(target=wrapper,
                                        args=(prep_astroscrappy, (),
                                              clean_image_lst))

            # start time of astrcoscappry
            cray_time = time.time()

            # start thread
            cray_remove_thread.start()

            print('working ... ', end='')

            # while astroscrapy is working keep alive with while loop
            while cray_remove_thread.isAlive():
                #  if it takes along time- print something to show it's not hung
                if time.time() - cray_time > 15 and not taking_while:
                    print('this may take some time ... ', end='')
                    taking_while = True
            end_time = time.time() - cray_time
            print('done')

            clean_image = clean_image_lst[0][1]
            CR_mask = clean_image_lst[0][0]

        elif syntax['use_lacosmic']:
            from ccdproc import cosmicray_lacosmic
            cray_time = time.time()

            clean_image, CR_mask = cosmicray_lacosmic(image_old.data,
                                                      sigclip=4.5,
                                                      sigfrac=0.3,
                                                      objlim=5.0,
                                                      gain=syntax['gain'],
                                                      satlevel=65535.0,
                                                      pssl=0.0,
                                                      niter=4,
                                                      sepmed=True,
                                                      cleantype='meanmask',
                                                      fsmode='median',
                                                      psfmodel='gauss',
                                                      psffwhm=2.5,
                                                      psfsize=7,
                                                      psfk=None,
                                                      psfbeta=4.765,
                                                      verbose=False)
            end_time = time.time() - cray_time

        print('Exposure time: %ds :: Cosmic Ray Detections: %d' %
              (syntax['exp_time'], np_sum(CR_mask)))

        syntax['CR_detections'] = np_sum(CR_mask)
        syntax['CR_time_taken'] = end_time

        return clean_image, syntax

    except Exception as e:
        logger.exception(e)
        return image_old
示例#17
0
    def process_raw_frame(self, master_bias, master_flat, pixel_mask_spec=None):
        """
        Bias and flat-correct a raw CCD frame. Trim off the overscan
        region. Identify cosmic rays using "lacosmic" and inflat
        uncertainties where CR's are found. If specified, mask out
        nearby sources by setting pixel uncertainty to infinity (or
        inverse-variance to 0).

        Returns
        -------
        nccd : `ccdproc.CCDData`
            A copy of the original ``CCDData`` object but after the
            above procedures have been run.
        """

        oscan_fits_section = "[{}:{},:]".format(self.oscan_idx,
                                                self.oscan_idx+self.oscan_size)

        # make a copy of the object
        nccd = self.ccd.copy()

        # apply the overscan correction
        poly_model = Polynomial1D(2)
        nccd = ccdproc.subtract_overscan(nccd, fits_section=oscan_fits_section,
                                         model=poly_model)

        # trim the image (remove overscan region)
        nccd = ccdproc.trim_image(nccd, fits_section='[1:{},:]'.format(self.oscan_idx))

        # create the error frame
        nccd = ccdproc.create_deviation(nccd, gain=self.ccd_gain,
                                        readnoise=self.ccd_readnoise)

        # now correct for the ccd gain
        nccd = ccdproc.gain_correct(nccd, gain=self.ccd_gain)

        # correct for master bias frame
        # - this does some crazy shit at the blue end, but we can live with it
        nccd = ccdproc.subtract_bias(nccd, master_bias)

        # correct for master flat frame
        nccd = ccdproc.flat_correct(nccd, master_flat)

        # comsic ray cleaning - this updates the uncertainty array as well
        nccd = ccdproc.cosmicray_lacosmic(nccd, sigclip=8.)

        # replace ccd with processed ccd
        self.ccd = nccd

        # check for a pixel mask
        if pixel_mask_spec is not None:
            mask = self.make_nearby_source_mask(pixel_mask_spec)
            logger.debug("\t\tSource mask loaded.")

            stddev = nccd.uncertainty.array
            stddev[mask] = np.inf
            nccd.uncertainty = StdDevUncertainty(stddev)

        if self.plot_path is not None:
            # TODO: this assumes vertical CCD
            aspect_ratio = nccd.shape[1]/nccd.shape[0]

            fig,axes = plt.subplots(2, 1, figsize=(10,2 * 12*aspect_ratio),
                                    sharex=True, sharey=True)

            vmin,vmax = self.zscaler.get_limits(nccd.data)
            axes[0].imshow(nccd.data.T, origin='bottom',
                           cmap=self.cmap, vmin=max(0,vmin), vmax=vmax)

            stddev = nccd.uncertainty.array
            vmin,vmax = self.zscaler.get_limits(stddev[np.isfinite(stddev)])
            axes[1].imshow(stddev.T, origin='bottom',
                           cmap=self.cmap, vmin=max(0,vmin), vmax=vmax)

            axes[0].set_title('Object: {0}, flux'.format(self._obj_name))
            axes[1].set_title('root-variance'.format(self._obj_name))

            fig.tight_layout()
            fig.savefig(path.join(self.plot_path, '{}_frame.png'.format(self._filename_base)))
            plt.close(fig)

        return nccd
示例#18
0
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits

# getting the input image file and data.
image_sci = get_pkg_data_filename(
    '/media/sf_VB_shared_files/archiveunziped/cosmic_ray_rejection/finalscience1.fits'
)
image_sci, header = fits.getdata(image_sci, header=True, ext=0, cobbler=True)
# print(image_sci)

# Getting the deviation, gain value for the image. the input gain in line 16 is used if the units of image is different from the read out noise.
# gain and read out noise depends on the detector. So enter the appropriate value.

data = CCDData(image_sci, unit='adu')
# data_with_deviation = ccdproc.create_deviation(data, gain=2.6 * u.electron/u.adu, readnoise= 15* u.electron)

# gain_corrected = ccdproc.gain_correct(data_with_deviation, 2.6*u.electron/u.adu)

# cleaning the image and removing the cosmic ray using lacosmic

cr_cleaned = ccdproc.cosmicray_lacosmic(data, sigclip=5)

# saving the output fits image.
fits.writeto(
    '/media/sf_VB_shared_files/archiveunziped/ipopfiles/finalop/cosrayfinalsciopla_sigclip5.fits',
    np.array(cr_cleaned),
    header,
    checksum=True)

# hooray!! end of code!! :)
def cosmic(data, sigclip=5):
    newdata, mask = ccdproc.cosmicray_lacosmic(data, sigclip)
    return newdata, mask
示例#20
0
def clean_the_images(path,filename):
    '''
    This module is meant for cleaning the images. The tasks to be included are: bias correction,
    flat correction, trimming, overscan as well as the cosmic ray removal from the science cases.
    (For the time we are skipping the overscan and trimming part.

    INPUT:
    path: The directory where the images are kept (string)
    filename: The first few characters and the extension of the images (string). Example:
    j0946*fits, HD1104*fits etc.

    OUTPUT:

    cleaned images in the new directory: path/cleaned
    '''

    dir = path
    gain = 2 * u.electron / u.adu  # gain and readout noise are properties of the CCD and will change for different CCDs.
    readnoise = 7.5 * u.electron

    ra=input('Enter the RA of the source:   ')
    dec=input('Enter the DEC of the source: ')


    bias_files = sorted(glob(os.path.join(dir,'bias*.fits')))
    biaslist = []
    for i in range (0,len(bias_files)):
        data= ccdproc.CCDData.read(bias_files[i],unit='adu')
        #data = ccdproc.create_deviation(data, gain=gain, readnoise=readnoise)
        #data= data-(data.uncertainty.array)
        biaslist.append(data)
    masterbias = ccdproc.combine(biaslist,method='average',sigma_clip=True, sigma_clip_low_thresh=5, sigma_clip_high_thresh=5,
                             sigma_clip_func=np.ma.median, sigma_clip_dev_func=mad_std)
    masterbias.write('masterbias.fits', overwrite=True)
    mbias=ccdproc.CCDData.read('masterbias.fits',unit='adu')
    print('Master bias generated')
    print(" Mean and median of the masterbias: ",np.mean(masterbias), np.median(masterbias))



    flat_files=sorted(glob(os.path.join(dir,'flat*.fits')))
    flatlist = []
    for j in range(0,len(flat_files)):
        flat=ccdproc.CCDData.read(flat_files[j],unit='adu')
        flat_bias_removed=ccdproc.subtract_bias(flat,masterbias)
        flatlist.append(flat_bias_removed)

        def inv_median(a):
            return 1 / np.median(a)

    masterflat = ccdproc.combine(flatlist,method='median', scale=inv_median,
                                 sigma_clip=True, sigma_clip_low_thresh=5, sigma_clip_high_thresh=5,
                                 sigma_clip_func=np.ma.median, sigma_clip_dev_func=mad_std)
    masterflat.write('masterflat.fits', overwrite=True)
    mflat=ccdproc.CCDData.read('masterflat.fits',unit='adu')
    print('Master flat generated')
    print(" Mean and median of the masterflat: ",np.mean(masterflat), np.median(masterflat))



    file_names = sorted(glob(os.path.join(dir,filename)))
    for i in range(0,len(file_names)):
        image=ccdproc.CCDData.read(file_names[i],unit='adu')
        header=fits.getheader(file_names[i],0)
        bias_subtracted = ccdproc.subtract_bias(image, masterbias)
        flat_corrected = ccdproc.flat_correct(bias_subtracted, masterflat)
        cr_cleaned = ccdproc.cosmicray_lacosmic(flat_corrected,readnoise=7.5, sigclip=5,satlevel=65535,niter=20,cleantype='meanmask',gain_apply=True)
        #print('Cosmic rays removed')
        clean_file=file_names[i].replace('.fits','')


        fits.writeto(clean_file+'_cleaned.fits',cr_cleaned,header,overwrite=True)
        print('Image no-%i has been cleaned'%i)
示例#21
0
def fitbg3(data, order_mask, readnoise=11, sigclip=[4, 2, 3], isplots=0):
    """
    Fit sky background with out-of-spectra data. Optimized to remove
    the 1/f noise in the NIRISS spectra (works in the y-direction).

    Parameters
    ----------
    isplots : bool, optional                                      
       Plots intermediate steps for the background fitting routine.
       Default is False.                                          

    Returns
    -------
    data : object
       data object now contains new attribute `bkg_removed`.
    """

    # Removes cosmic rays
    # Loops through niters cycles to make sure all pesky
    #    cosmic rays are trashed
    rm_crs = np.zeros(data.data.shape)
    bkg_subbed = np.zeros(data.data.shape)

    for i in tqdm(range(len(data.data))):

        ccd = CCDData((data.data[i]) * units.electron)
        mask = np.zeros(data.data[i].shape)

        for n in range(len(sigclip)):
            m1 = ccdp.cosmicray_lacosmic(ccd,
                                         readnoise=readnoise,
                                         sigclip=sigclip[n])
            ccd = CCDData(m1.data * units.electron)
            mask[m1.mask == True] += 1

        rm_crs[i] = m1.data
        rm_crs[i][mask >= 1] = np.nan

        rm_crs[i] = clipping.gauss_removal(
            rm_crs[i], ~order_mask, linspace=[-200,
                                              200])  # removal from background
        rm_crs[i] = clipping.gauss_removal(rm_crs[i],
                                           order_mask,
                                           linspace=[-10, 10],
                                           where='order')  # removal from order

        b1 = bkg_sub(rm_crs[i],
                     order_mask,
                     bkg_estimator='median',
                     sigma=4,
                     box=(10, 5),
                     filter_size=(2, 2))
        b2 = bkg_sub(rm_crs[i] - b1,
                     order_mask,
                     sigma=3,
                     bkg_estimator='median')

        bkg_subbed[i] = (rm_crs[i] - b1) - b2

    data.bkg_removed = bkg_subbed

    return data
示例#22
0
def flatfield_correction(target_dir, ic, config, logger):
    import os
    import numpy as np
    import scipy.ndimage as nd
    from ccdproc import CCDData, combine, cosmicray_lacosmic, flat_correct

    logger.debug('Flat fields.')

    i = ((ic.summary['imagetyp'] == 'object') +
         (ic.summary['imagetyp'] == 'flat'))
    filters = np.unique(ic.summary['filter'][i].data.data)

    flats = dict()
    for filt in filters:
        flats[filt] = 1
        for flat_key, flat_name in config.flats.items():
            fn = '{}-{}.fits'.format(flat_key, filt)
            fn = os.sep.join((target_dir, fn))
            if os.path.exists(fn) and not config.reprocess_all:
                logger.info('Reading {}.'.format(fn))
                flats[filt] = CCDData.read(fn)
            elif len(ic.files_filtered(object=flat_name, filter=filt)) == 0:
                logger.warning(
                    'No {} files provided for {} and {} not found.'.format(
                        flat_name, filt, fn))
            else:
                logger.info('Generating {}.'.format(fn))
                files = ic.files_filtered(include_path=True,
                                          object=flat_name,
                                          filter=filt)
                flat = combine(files, method='median', scale=mode_scaler)
                flat.mask = (flat.data > 1.2) + (flat.data < 0.8)

                n = str([int(f.split('.')[-2]) for f in files])
                flat.meta.add_history(
                    'Created from file numbers: {}'.format(n))
                flat.meta['FILENAME'] = os.path.basename(fn)[1]

                flat.write(fn, overwrite=True)

                if flats[filt] == 1:
                    logger.info('Using {} for {}'.format(
                        flat_name.lower(), filt))
                    flats[filt] = flat

        if flats[filt] == 1:
            # find last flat
            fn = tuple(('{}-{}.fits'.format(k, filt) for k in config.flats))
            try:
                last_flat = find_last(target_dir, config, fn)
                flats[filt] = CCDData.read(last_flat)
                logger.info('Using {}.'.format(last_flat))
            except AssertionError:
                logger.warning(
                    'No previous flat found.  Not flat correcting {} data.'.
                    format(filt))

    i = ((ic.summary['imagetyp'] != 'bias')
         & ic.summary['flatcor'].mask
         & ~ic.summary['subbias'].mask)
    logger.info('{} files to flat correct.'.format(sum(i)))
    for fn in ic.summary['file'][i]:
        ccd = CCDData.read(os.sep.join([ic.location, fn]))

        filt = ccd.meta['FILTER']
        if flats[filt] == 1:
            logger.debug('{} skipped (no {} flat field provided).'.format(
                fn, filt))
            continue

        logger.debug(fn)
        ccd = flat_correct(ccd, flats[filt])

        if config.lacosmic:
            cleaned = cosmicray_lacosmic(ccd,
                                         pssl=ccd.meta['meanbias'],
                                         sigclip=3.5,
                                         satlevel=config.saturation,
                                         readnoise=ccd.meta['rdnoise'])
            ccd.mask += nd.binary_dilation(cleaned.mask)
            ccd.header['LACOSMIC'] = 1, 'L.A.Cosmic processing flag.'
        else:
            ccd.header['LACOSMIC'] = 0, 'L.A.Cosmic processing flag.'

        ccd.meta['FLATFILE'] = (flats[filt].meta['FILENAME'],
                                'Name of the flat field correction used.')
        ccd.write(os.sep.join([ic.location, fn]), overwrite=True)

    ic.refresh()
    return ic
示例#23
0
flat_data = fits.open(prefix_processed+'flat_'+color_select+'.fits')[0].data

index_science = (obj_name == extract_id) & (color_name == color_select)

trace_cube = np.loadtxt(prefix_processed+stand_id+'_'+color_select+'.txt')
lt_arr= trace_cube[:,0]
rt_arr= trace_cube[:,1]

print(file_name[index_science])


for idx, file_id in enumerate(file_name[index_science]):

    science_data = fits.open(file_id)[0].data[edge_low:edge_up, edge_l:edge_r]
    science = (science_data - bias_data[edge_low:edge_up, edge_l:edge_r])/(flat_data+10**-18)
    science, mask = ccdproc.cosmicray_lacosmic(science, sigclip=5)



    plt.figure(figsize=(12,6))
    plt.imshow(science, origin = 'lower', vmin = 0, vmax = 1000, aspect='auto', cmap="jet")
    plt.plot(np.arange(science.shape[1]), lt_arr,color='red')
    plt.plot(np.arange(science.shape[1]), rt_arr,color='red')
    plt.colorbar()
    plt.show()


    final_spec = np.zeros(science.shape[1])

    for kk in np.arange(science.shape[1]):
示例#24
0
import glob
import numpy as np
from astropy.io import fits
import ccdproc


reduced_light_path = './products/reduced*'
reduced_light_files = glob.glob(reduced_light_path)


for name in reduced_light_files:
	with fits.open(name) as img:
		header=img[0].header
		scidata=img[0].data
		newdata, mask = ccdproc.cosmicray_lacosmic(scidata, sigclip=10, sigfrac=0.3, objlim=5.0, gain=1.0, readnoise=6.5, satlevel=65535.0, pssl=0.0, niter=4, sepmed=True, cleantype=u'meanmask', fsmode=u'median', psfmodel=u'gauss', psffwhm=3.5, psfsize=7, psfk=None, psfbeta=4.765, verbose=False)  


		mask=1*mask

		header['FRAME']='Light Reduced Cosmic Rays Removed'
		header['comment'] = 'Cosmic Rays Removed'
		hdu=fits.PrimaryHDU(data=newdata, header=header)
		filename=name.replace('./products/','')
		hdu.writeto('./products/cosmic_ray_removed_'+filename, overwrite=True)


		hdu=fits.PrimaryHDU(data=mask)
		hdu.writeto('./products/cosmic_ray_mask_of_'+filename, overwrite=True)

示例#25
0
def hotpixfix_wrapper(sci_data, sigclip=5):
    return ccdproc.cosmicray_lacosmic(sci_data, sigclip=sigclip)[0]
infiles = args.infile

print infiles

if args.bias:
   mbias = CCDData.read(args.bias, unit = u.adu)
else:
   mbias = None

if args.flat: 
   raise Exception('Flat fielding is not currently implemented')
if args.dark: 
   raise Exception('Dark correction is not currently implemented')

for infile in infiles:
    ccd = CCDData.read(infile, unit = u.adu)
    ccd = ccdproc.ccd_process(ccd, oscan='[1117:1181, 1:330]', oscan_median=True, 
                              trim='[17:1116,1:330]', master_bias=mbias,
                              error=True, gain=1.0 * u.electron/u.adu, 
                              readnoise=5.0 * u.electron)
    if args.cray:
       ccd = ccdproc.cosmicray_lacosmic(ccd, sigclip=4.5, sigfrac=0.3,
                   objlim=5.0, gain=1.0, readnoise=6.5,
                   satlevel=65536.0, pssl=0.0, niter=4,
                   sepmed=True, cleantype='meanmask', fsmode='median',
                   psfmodel='gauss', psffwhm=2.5, psfsize=7,
                   psfk=None, psfbeta=4.765, verbose=False)
    ccd.write('p'+os.path.basename(infile), clobber=True)

parser.add_argument(
    '--rdnoise',
    dest='rdnoise',
    default=7.3,
    help=
    'readnoise in e-.  default is 1.3, which applies to HDI camera.  Siena STL11000M = 11 e-'
)

#parser.add_argument('--', dest='pixelscalex', default='0.00011808', help='pixel scale in x (default = 0.00011808)')
args = parser.parse_args()
files = sorted(glob.glob(args.filestring + '*.fits'))
nfiles = len(files)
i = 1
for f in files:
    print(('ZAPPING COSMIC RAYS FOR FILE %i OF %i' % (i, nfiles)))
    with fits.open(f) as hdu1:
        print(('working on ', f))

        # convert data to CCDData format and save header
        ccd = CCDData(hdu1[0].data, unit=u.adu)
        header = hdu1[0].header
        crimage = ccdproc.cosmicray_lacosmic(ccd,
                                             gain=float(args.gain),
                                             readnoise=float(args.rdnoise))
        header[
            'HISTORY'] = 'Cosmic rays rejected using ccdproc.cosmicray_lacosmic '
        fits.writeto('z' + f, crimage, header)
        hdu1.close()
    i += 1
    print('\n')
示例#28
0
    f, filt, exptime = t.split()
    
    print 'ZAPPING COSMIC RAYS FOR FILE %i OF %i'%(i,nfiles)


    
    with fits.open(f) as hdu1:
        print 'working on ',f
        outfile=f
        # convert data to CCDData format and save header
        ccd = CCDData(hdu1[0].data, unit=u.adu)
        header = hdu1[0].header

        if args.zap:
            
            zccd = ccdproc.cosmicray_lacosmic(ccd, gain = float(args.gain), readnoise = float(args.rdnoise))
            outfile = 'z'+outfile

        # subtract dark
        if args.dark:
            if args.zap:
                infile=zccd
            else:
                infile = ccd
            dccd = ccdproc.subtract_dark(infile, dark)
            outfile = 'd'+outfile

        # flatten
        if args.flat:
            # flatten
            if args.dark:
示例#29
0
def main(path0, out_pdf='', silent=False, verbose=True, overwrite=False):
    '''
    Main function to generate PDF illustrating alignment on target

    Parameters
    ----------
    path0 : str
     Path to FITS file. Must include '/' at the end

    silent : boolean
      Turns off stdout messages. Default: False

    verbose : boolean
      Turns on additional stdout messages. Default: True

    overwrite : boolean
      Overwrite files if they exists. Default: False

    Returns
    -------

    Notes
    -----
    Created by Chun Ly, 24 March 2017
    Modified by Chun Ly, 01 April 2017
     - Handle CRs and bad pixels using cosmicrays_lacosmic
    Modified by Chun Ly, 04 April 2017
     - Use find_gnirs_window_mean to find center
    Modified by Chun Ly, 04-05 April 2017
     - Adjust greyscale limits to handle slit image (make it black),
       and faint sources
     Use find_gnirs_window_mean to find center
    Modified by Chun Ly, 05 April 2017
     - Handle alignment sequences with more than just 4 frames
     - Handle excess subplots for individual PDF pages (remove axes)
     - Compute seeing FWHM for acquisition images
    Modified by Chun Ly, 06 April 2017
     - Get coordinates for slit in cutout
    Modified by Chun Ly, 11 May 2017
     - Use slit image to find center when telluric data is only available
    Modified by Chun Ly, 3 July 2017
     - Add overwrite option to prevent overwriting file
    Modified by Chun Ly, 9 January 2018
     - Import glog and call for stdout and ASCII logging
     - Pass mylogger to find_gnirs_window_mean(), find_gnirs_window()
    Modified by Chun Ly, 20 April 2018
     - Pass mylogger to gauss2d_fit()
     - Switch print statements to mylogger calls
    Modified by Chun Ly, 22 April 2018
     - Bug fix: mylogger calls mistakes
     - Bug fix: mylogger calls mistakes (cont'd)
    '''

    # + on 09/01/2018
    logfile = path0 + 'align_check.log'
    mylogger = glog.log0(logfile)._get_logger()

    if silent == False: mylogger.info('### Begin main : ' + systime())

    dir_list, list_path = dir_check.main(path0,
                                         mylogger=mylogger,
                                         silent=silent,
                                         verbose=verbose)

    out_pdf_default = out_pdf
    for path in list_path:
        infile = path + 'hdr_info.QA.tbl'
        if not exists(infile):
            mylogger.warning('File does not exist : ' + infile)
            mylogger.warning('Exiting!!! ' + systime())
            return

        out_pdf = path + 'align_check.pdf' if out_pdf == '' else path + out_pdf

        # Mod on 03/07/2017
        if overwrite == False and exists(out_pdf):
            mylogger.warn('File exists!! Will not overwrite ' + out_pdf)
        else:
            pp = PdfPages(out_pdf)

            if silent == False: mylogger.info('Reading: ' + infile)
            tab0 = asc.read(infile, format='fixed_width_two_line')

            align = [ii for ii in xrange(len(tab0)) if tab0['QA'][ii] == 'N/A']
            if silent == False:
                mylogger.info('Number of alignment images found : ' +
                              str(len(align)))

            ID = tab0['object'][align]
            ID0 = list(set(ID))  #Unique ID's
            if silent == False:
                mylogger.info('Sources found : ' + ', '.join(ID0))

            # + on 04/04/2017
            win_ref_idx = [
                tt for tt in xrange(len(tab0))
                if (tab0['QA'][tt] == 'N/A') and ('Acq' in tab0['slit'][tt])
                and ('HIP' not in tab0['object'][tt]) and (
                    'HD' not in tab0['object'][tt])
            ]

            # Mod on 11/05/2017
            if len(win_ref_idx) > 0:
                win_ref_file = path + tab0['filename'][win_ref_idx[0]]
                mylogger.info('Reference image for finding GNIRS window : ' +
                              win_ref_file)

                x_min, x_max, y_min, y_max, x_cen, \
                    y_cen = find_gnirs_window_mean(win_ref_file, mylogger=mylogger)
            else:
                mylogger.info('Using telluric image as reference')
                win_ref_file = path + tab0['filename'][0]
                slit_x0, slit_y0_lo, slit_y0_hi = get_slit_trace(win_ref_file)
                x_min, x_max = min(slit_x0), max(slit_x0)
                x_cen = (x_min + x_max) / 2.0
                y_cen = (np.median(slit_y0_lo) + np.median(slit_y0_hi)) / 2.0
                y_min, y_max = y_cen - size2d[0].value / 2.0, y_cen + size2d[
                    0].value / 2.0

            pos_cen = (x_cen, y_cen)
            new_size = u.Quantity((y_max - y_min, x_max - x_min), u.pixel)

            # + on 20/04/2018, Mod on 22/04/2018
            mylogger.info('pos_cen : (%f, %f) ' % (pos_cen[0], pos_cen[1]))
            mylogger.info('new_size : [%f, %f] pix ' %
                          (new_size[0].value, new_size[1].value))

            for ii in xrange(len(ID0)):
                t_idx = [
                    tt for tt in xrange(len(tab0)) if
                    (tab0['object'][tt] == ID0[ii] and tab0['QA'][tt] == 'N/A')
                ]

                t_files = [path + a for a in tab0['filename'][t_idx]]
                ncols = 2.0
                nrows = 2  # np.ceil(len(t_idx)/ncols)
                ncols, nrows = np.int(ncols), np.int(nrows)

                # Mod on 05/04/2017
                if len(t_idx) <= nrows * ncols:
                    fig, ax_arr = plt.subplots(nrows=nrows, ncols=ncols)

                #med0, x_min, x_max, y_min, \
                #    y_max, x_cen, y_cen = find_gnirs_window(t_files[1], mylogger=mylogger)

                # Later + on 24/03/2017 | Mod on 04/04/2017
                xcen, ycen = find_star(t_files[-1],
                                       pos=pos_cen,
                                       find_size2d=new_size)
                # Fix to get relative coordinate for Cutout2D image
                #xcen -= pos_cen[0]-new_size[1].value/2.0
                #ycen -= pos_cen[1]-new_size[0].value/2.0

                slit_x0, slit_y0_lo, slit_y0_hi = get_slit_trace(
                    t_files[0])  #, x_min, x_max)
                # Adjust values for offset that is applied
                # Bug: Mod on 04/04/2017 to get proper coordinate
                slit_x0 -= np.int64(pos_cen[0] - size2d[1].value / 2.0)
                slit_y0_lo -= pos_cen[1] - size2d[0].value / 2.0
                slit_y0_hi -= pos_cen[1] - size2d[0].value / 2.0

                for jj in xrange(len(t_idx)):
                    jj_idx = t_idx[jj]

                    # + on 05/04/2017
                    if len(t_idx) > (nrows * ncols):
                        if jj % (nrows * ncols) == 0:
                            fig, ax_arr = plt.subplots(nrows=nrows,
                                                       ncols=ncols)

                    im0 = fits.getdata(t_files[jj])
                    hdr0 = fits.getheader(t_files[jj], ext=0)  # Get WCS header

                    # + 01/04/2017
                    im0_clean = cosmicray_lacosmic(im0, sigclip=10)[0]

                    cutout = Cutout2D(im0_clean,
                                      pos_cen,
                                      size2d,
                                      mode='partial',
                                      fill_value=np.nan)

                    t_col, t_row = jj % ncols, (jj / ncols) % nrows

                    # Mod on 04/04/2017 to handle bright and faint stars
                    max0 = np.max(cutout.data)

                    # Compute median within GNIRS window
                    # + on 04-05/04/2017
                    temp = im0_clean[-50:-1, :]
                    bgd0, sig0 = np.median(temp), np.std(temp)
                    idx_y, idx_x = np.where(im0_clean > (bgd0 + 5 * sig0))
                    med0 = np.median(im0_clean[idx_y, idx_x])
                    mylogger.info('## max0 : %f   med0 : %f ' % (max0, med0))
                    if max0 > 50000:
                        z1, z2 = zscale.get_limits(cutout.data)
                        z2 = max0  # Change for better stretch for telluric star
                    else:
                        if ('Acq_' not in tab0['slit'][jj_idx]) and \
                           (tab0['exptime'][jj_idx] == 3):
                            # First frame that will show the longslit
                            z1, z2 = 0.0, 0.5 * max0
                        else:
                            # This should handle faint and bright stars
                            z1, z2 = 0.5 * med0, max0

                    norm = ImageNormalize(vmin=z1, vmax=z2)
                    t_ax = ax_arr[t_row, t_col]
                    t_ax.imshow(cutout.data,
                                cmap='Greys',
                                origin='lower',
                                norm=norm)
                    #aplpy.FITSFigure(cutout)

                    # Draw trace of slit
                    t_ax.plot(slit_x0, slit_y0_lo, 'r-')
                    t_ax.plot(slit_x0, slit_y0_hi, 'r-')

                    t_ax.xaxis.set_ticklabels([])
                    t_ax.yaxis.set_ticklabels([])

                    fig.suptitle(path, fontsize=14)

                    txt0 = tab0['filename'][jj_idx] + '\n'
                    txt0 += tab0['datelabel'][jj_idx] + '\n'
                    txt0 += tab0['UT_date'][jj_idx] + '\n'
                    txt0 += tab0['object'][jj_idx]
                    t_ax.annotate(txt0, [0.025, 0.95],
                                  xycoords='axes fraction',
                                  ha='left',
                                  va='top')

                    # Plot inset | Later + on 24/03/2017
                    axins = zoomed_inset_axes(t_ax, 6, loc=4)
                    norm2 = ImageNormalize(vmin=z1, vmax=z2)
                    axins.imshow(cutout.data,
                                 cmap='Greys',
                                 origin='lower',
                                 norm=norm2)

                    # Draw trace of slit
                    axins.plot(slit_x0, slit_y0_lo, 'r-')
                    axins.plot(slit_x0, slit_y0_hi, 'r-')

                    # Mod on 04/04/2017 to get Cutout2d coordinates
                    c_xcen = xcen - (pos_cen[0] - size2d[1].value / 2.0)
                    c_ycen = ycen - (pos_cen[1] - size2d[0].value / 2.0)
                    x1, x2, y1, y2 = c_xcen - 20, c_xcen + 20, c_ycen - 20, c_ycen + 20
                    axins.set_xlim([x1, x2])
                    axins.set_ylim([y1, y2])
                    axins.xaxis.set_ticklabels([])
                    axins.yaxis.set_ticklabels([])
                    mark_inset(t_ax,
                               axins,
                               loc1=1,
                               loc2=3,
                               fc="none",
                               ec="b",
                               ls='dotted',
                               lw=0.5)

                    # Compute FWHM of alignment star | + on 05/04/2017
                    if ('Acq_' not in tab0['slit'][jj_idx]) and \
                       (tab0['exptime'][jj_idx] == 3):
                        mylogger.info('No source in slit : ' +
                                      tab0['filename'][jj_idx])
                    else:
                        # + on 06/04/2017
                        c_size2d = u.Quantity((40, 40), u.pixel)
                        c_slit_x0 = slit_x0 - (c_xcen -
                                               c_size2d[1].value / 2.0)
                        c_slit_y0_lo = slit_y0_lo - (c_ycen -
                                                     c_size2d[0].value / 2.0)
                        c_slit_y0_hi = slit_y0_hi - (c_ycen -
                                                     c_size2d[0].value / 2.0)
                        im0_crop = Cutout2D(cutout.data, (c_xcen, c_ycen),
                                            c_size2d,
                                            mode='partial',
                                            fill_value=np.nan)
                        gauss2d_fit(im0_crop.data,
                                    hdr0,
                                    t_ax,
                                    c_slit_x0,
                                    c_slit_y0_lo,
                                    c_slit_y0_hi,
                                    mylogger=mylogger)  # Mod on 06/04/2017

                    # Write each page separately | + on 05/04/2017
                    if len(t_idx) > (nrows * ncols):
                        # Mod later on 05/04/2017 to handle excess subplots
                        if jj == len(t_idx) - 1:
                            rem0 = len(t_idx) % (nrows * ncols)  # remainder
                            if rem0 != 0:
                                for rr in range(rem0, nrows * ncols, 1):
                                    t_col, t_row = rr % ncols, (rr /
                                                                ncols) % nrows
                                    ax_arr[t_row, t_col].axis('off')

                        if (jj % (nrows * ncols) == nrows*ncols-1) or \
                           (jj == len(t_idx)-1):
                            subplots_adjust(left=0.02,
                                            bottom=0.02,
                                            top=0.95,
                                            right=0.98,
                                            wspace=0.02,
                                            hspace=0.02)
                            fig.set_size_inches(11, 8)
                            fig.savefig(pp, format='pdf')
                #endfor

                # Mod on 05/04/2017
                if len(t_idx) <= nrows * ncols:
                    # Mod later on 05/04/2017 to handle excess subplots
                    for rr in range(len(t_idx), nrows * ncols):
                        t_col, t_row = rr % ncols, (rr / ncols) % nrows
                        ax_arr[t_row, t_col].axis('off')

                    subplots_adjust(left=0.02,
                                    bottom=0.02,
                                    top=0.95,
                                    right=0.98,
                                    wspace=0.02,
                                    hspace=0.02)
                    fig.set_size_inches(11, 8)
                    fig.savefig(pp, format='pdf')
            #endfor

            pp.close()
        #endelse

        out_pdf = out_pdf_default

    if silent == False: mylogger.info('### End main : ' + systime())
        data[i,:] = np.interp(warr, wmap[i,:], data[i,:])
    hdu[0].data = data
    hdu[0].header['CTYPE1'] = 'LAMBDA'
    hdu[0].header['CTYPE2'] = 'PIXEL'
    hdu[0].header['CD1_1'] = dw
    hdu[0].header['CD2_1'] = 0.0
    hdu[0].header['CD1_2'] = 0.0
    hdu[0].header['CD2_2'] = 1.0 
    hdu[0].header['CRPIX1'] = 0.0
    hdu[0].header['CRPIX2'] = 0.0
    hdu[0].header['CRVAL1'] = warr.min()
    hdu[0].header['CRVAL2'] = 0.0
    hdu[0].header['CDELT1'] = 1.0
    hdu[0].header['CDELT2'] = 1.0
    hdu[0].header['DC-FLAG'] = 0

    return hdu


if __name__=='__main__':
   #filename=sys.argv[1]
   import glob
   import ccdproc
   for filename in glob.glob('w_r*fit'):
      hdu = fits.open(filename)
      hdu[0].data, crmask = ccdproc.cosmicray_lacosmic(hdu[0].data)
      hdu = rectify(hdu)
      hdu.writeto(filename.replace('w_', 't_'), clobber=True)
      

示例#31
0
def process_fits(fitspath,
                 *,
                 obstype=None,
                 object=None,
                 exposure_times=None,
                 percentile=None,
                 percentile_min=None,
                 percentile_max=None,
                 window=None,
                 darks=None,
                 cosmic_ray=False,
                 cosmic_ray_kwargs={},
                 gain=None,
                 readnoise=None,
                 normalise=False,
                 normalise_func=np.ma.average,
                 combine_type=None,
                 sigma_clip=False,
                 low_thresh=3,
                 high_thresh=3):
    """Combine all FITS images of a given type and exposure time from a given directory.

    Parameters
    ----------
    fitspath: str
        Path to the FITS images to process. Can be a path to a single file, or a path to a
        directory. If the latter the directory will be searched for FITS files and checked
        against criteria from obstype, object, exposure_times critera.
    obstype: str, optional
        Observation type, an 'OBSTYPE' FITS header value e.g. 'DARK', 'OBJ'. If given only files
        with matching OBSTYPE will be processed.
    object: str, optional
        Object name, i.e. 'OBJECT' FITS header value. If given only files with matching OBJECT
        will be processed.
    exposure_times: float or sequence, optional
        Exposure time(s), i.e 'TOTALEXP' FITS header value(s). If given only files with matching
        TOTALEXP will be processed.
    percentile: float, optional
        If given will only images whose percentile value fall between percentile_min and
        percentile_max will be processed, e.g. set to 50.0 to select images by median value,
        set to 99.5 to select images by their 99.5th percentile value.
    percentile_min: float, optional
        Minimum percentile value.
    percentile_max: float, optional
        Maximum percentil value.
    window: (int, int, int, int), optional
        If given will trim images to the window defined as (x0, y0, x1, y1), where (x0, y0)
        and (x1, y1) are the coordinates of the bottom left and top right corners.
    darks: str or sequence, optional
        Filename(s) of dark frame(s) to subtract from the image(s). If given a dark frame with
        matching TOTALEXP will be subtracted from each image during processing.
    cosmic_ray: bool, optional
        Whether to perform single image cosmic ray removal, using the lacosmic algorithm,
        default False. Requires both gain and readnoise to be set.
    cosmic_ray_kwargs: dict, optional
        Additional keyword arguments to pass to the ccdproc.cosmicray_lacosmic function.
    gain: str or astropy.units.Quantity, optional
        Either a string indicating the FITS keyword corresponding to the (inverse gain), or
        a Quantity containing the gain value to use. If both gain and read noise are given
        an uncertainty frame will be created.
    readnoise: str or astropy.units.Quantity, optional
        Either a string indicating the FITS keyword corresponding to read noise, or a Quantity
        containing the read noise value to use. If both read noise and gain are given then an
        uncertainty frame will be created.
    normalise: bool, optional
        If True each image will be normalised. Default False.
    normalise_func: callable, optional
        Function to use for normalisation. Each image will be divided by normalise_func(image).
        Default np.ma.average.
    combine_type: str, optional
        Type of image combination to use, 'MEAN' or 'MEDIAN'. If None the individual
        images will be processed but not combined and the return value will be a list of
        CCDData objects. Default None.
    sigma_clip: bool, optional
        If True will perform sigma clipping on the image stack before combining, default=False.
    low_thresh: float, optional
        Lower threshold to use for sigma clipping, in standard deviations. Default is 3.0.
    high_thresh: float, optional
        Upper threshold to use for sigma clipping, in standard deviations. Default is 3.0.


    Returns
    -------
    master: ccdproc.CCDData
        Combined image.

    """
    if exposure_times:
        try:
            # Should work for any sequence or iterable type
            exposure_times = set(exposure_times)
        except TypeError:
            # Not a sequence or iterable, try using as a single value.
            exposure_times = {
                float(exposure_times),
            }

    if darks:
        try:
            dark_filenames = set(darks)
        except TypeError:
            dark_filenames = {
                darks,
            }
        dark_dict = {}
        for filename in dark_filenames:
            try:
                dark_data = CCDData.read(filename)
            except ValueError:
                # Might be no units in FITS header. Assume ADU.
                dark_data = CCDData.read(filename, unit='adu')
            dark_dict[dark_data.header['totalexp']] = dark_data

    if combine_type and combine_type not in ('MEAN', 'MEDIAN'):
        raise ValueError(
            "combine_type must be 'MEAN' or 'MEDIAN', got '{}''".format(
                combine_type))

    fitspath = Path(fitspath)
    if fitspath.is_file():
        # FITS path points to a single file, turn into a list.
        filenames = [
            fitspath,
        ]
    elif fitspath.is_dir():
        # FITS path is a directory. Find FITS file and collect values of selected FITS headers
        ifc = ImageFileCollection(fitspath, keywords='*')
        if len(ifc.files) == 0:
            raise RuntimeError("No FITS files found in {}".format(fitspath))
        # Filter by observation type.
        if obstype:
            try:
                ifc = ifc.filter(obstype=obstype)
            except FileNotFoundError:
                raise RuntimeError(
                    "No FITS files with OBSTYPE={}.".format(obstype))
        # Filter by object name.
        if object:
            try:
                ifc = ifc.filter(object=object)
            except FileNotFoundError:
                raise RuntimeError(
                    "No FITS files with OBJECT={}.".format(object))
        filenames = [
            Path(ifc.location).joinpath(filename) for filename in ifc.files
        ]
    else:
        raise ValueError(
            "fitspath '{}' is not an accessible file or directory.".format(
                fitspath))

    # Load image(s) and process them.
    images = []
    for filename in filenames:
        try:
            ccddata = CCDData.read(filename)
        except ValueError:
            # Might be no units in FITS header. Assume ADU.
            ccddata = CCDData.read(filename, unit='adu')
        # Filtering by exposure times here because it's hard filter ImageFileCollection
        # with an indeterminate number of possible values.
        if not exposure_times or ccddata.header['totalexp'] in exposure_times:
            if window:
                ccddata = ccdproc.trim_image(ccddata[window[1]:window[3] + 1,
                                                     window[0]:window[2] + 1])

            if percentile:
                # Check percentile value is within specified range, otherwise skip to next image.
                percentile_value = np.percentile(ccddata.data, percentile)
                if percentile_value < percentile_min or percentile_value > percentile_max:
                    continue

            if darks:
                try:
                    ccddata = ccdproc.subtract_dark(
                        ccddata,
                        dark_dict[ccddata.header['totalexp']],
                        exposure_time='totalexp',
                        exposure_unit=u.second)
                except KeyError:
                    raise RuntimeError(
                        "No dark with matching totalexp for {}.".format(
                            filename))

            if gain:
                if isinstance(gain, str):
                    egain = ccddata.header[gain]
                    egain = egain * u.electron / u.adu
                elif isinstance(gain, u.Quantity):
                    try:
                        egain = gain.to(u.electron / u.adu)
                    except u.UnitsError:
                        egain = (1 / gain).to(u.electron / u.adu)
                else:
                    raise ValueError(
                        f"gain must be a string or Quantity, got {gain}.")

            if readnoise:
                if isinstance(readnoise, str):
                    rn = ccddata.header[readnoise]
                    rn = rn * u.electron
                elif isinstance(readnoise, u.Quantity):
                    try:
                        rn = readnoise.to(u.electron / u.pixel)
                    except u.UnitsError:
                        rn = (readnoise * u.pixel).to(u.electron)
                else:
                    raise ValueError(
                        f"readnoise must be a string or Quantity, got {readnoise}."
                    )

            if gain and readnoise:
                ccddata = ccdproc.create_deviation(ccddata,
                                                   gain=egain,
                                                   readnoise=rn,
                                                   disregard_nan=True)

            if gain:
                ccddata = ccdproc.gain_correct(ccddata, gain=egain)

            if cosmic_ray:
                if not gain and readnoise:
                    raise ValueError(
                        "Cosmic ray removal required both gain & readnoise.")

                ccddata = ccdproc.cosmicray_lacosmic(
                    ccddata,
                    gain=1.0,  # ccddata already gain corrected
                    readnoise=rn,
                    **cosmic_ray_kwargs)

            if normalise:
                ccddata = ccddata.divide(normalise_func(ccddata.data))

            images.append(ccddata)

    n_images = len(images)
    if n_images == 0:
        msg = "No FITS files match exposure time criteria"
        raise RuntimeError(msg)

    if n_images == 1 and combine_type:
        warn(
            "Combine type '{}' selected but only 1 matching image, skipping image combination.'"
        )
        combine_type = None

    if combine_type:
        combiner = Combiner(images)

        # Sigma clip data
        if sigma_clip:
            if combine_type == 'MEAN':
                central_func = np.ma.average
            else:
                # If not MEAN has to be MEDIAN, checked earlier that it was one or the other.
                central_func = np.ma.median
            combiner.sigma_clipping(low_thresh=low_thresh,
                                    high_thresh=high_thresh,
                                    func=central_func)

        # Stack images.
        if combine_type == 'MEAN':
            master = combiner.average_combine()
        else:
            master = combiner.median_combine()

        # Populate header of combined image with metadata about the processing.
        master.header['fitspath'] = str(fitspath)
        if obstype:
            master.header['obstype'] = obstype
        if exposure_times:
            if len(exposure_times) == 1:
                master.header['totalexp'] = float(exposure_times.pop())
            else:
                master.header['totalexp'] = tuple(exposure_times)
        master.header['nimages'] = n_images
        master.header['combtype'] = combine_type
        master.header['sigclip'] = sigma_clip
        if sigma_clip:
            master.header['lowclip'] = low_thresh
            master.header['highclip'] = high_thresh

    else:
        # No image combination, just processing indivudal image(s)
        if n_images == 1:
            master = images[0]
        else:
            master = images

    return master
示例#32
0
def mask_method_two(data, meta, isplots=0, save=False):
    """
    A second method to extract the masks for the first and
    second orders in NIRISS data. This method uses the vertical
    profile of a summed image to identify the borders of each
    order.
    
    ""
    Parameters
    -----------
    data : object
    meta : object
    isplots : int, optional
       Level of plots that should be created in the S3 stage.
       This is set in the .ecf control files. Default is 0.
       This stage will plot if isplots >= 5.
    save : bool, optional
       Has the option to save the initial guesses for the location
       of the NIRISS orders. This is set in the .ecf control files.
       Default is False.

    Returns
    -------
    meta : object
    """
    def identify_peaks(column, height, distance):
        p, _ = find_peaks(column, height=height, distance=distance)
        return p

    summed = np.nansum(data.data, axis=0)
    ccd = CCDData(summed * units.electron)

    new_ccd_no_premask = ccdp.cosmicray_lacosmic(ccd,
                                                 readnoise=150,
                                                 sigclip=5,
                                                 verbose=False)

    summed_f277 = np.nansum(data.f277, axis=(0, 1))

    f277_peaks = np.zeros((summed_f277.shape[1], 2))
    peaks = np.zeros((new_ccd_no_premask.shape[1], 6))
    double_peaked = [500, 700,
                     1850]  # hard coded numbers to help set height bounds

    for i in range(summed.shape[1]):

        # Identifies peaks in the F277W filtered image
        fp = identify_peaks(summed_f277[:, i], height=100000, distance=10)
        if len(fp) == 2:
            f277_peaks[i] = fp

        if i < double_peaked[0]:
            height = 2000
        elif i >= double_peaked[0] and i < double_peaked[1]:
            height = 100
        elif i >= double_peaked[1]:
            height = 5000

        p = identify_peaks(new_ccd_no_premask[:, i].data,
                           height=height,
                           distance=10)
        if i < 900:
            p = p[p > 40]  # sometimes catches an upper edge that doesn't exist

        peaks[i][:len(p)] = p

    # Removes 0s from the F277W boundaries
    xf = np.arange(0, summed_f277.shape[1], 1)
    good = f277_peaks[:, 0] != 0
    xf = xf[good]
    f277_peaks = f277_peaks[good]

    # Fitting a polynomial to the boundary of each order
    x = np.arange(0, new_ccd_no_premask.shape[1], 1)
    avg = np.zeros((new_ccd_no_premask.shape[1], 6))

    for ind in range(4):  # CHANGE THIS TO 6 TO ADD THE THIRD ORDER
        q = peaks[:, ind] > 0

        # removes outliers
        diff = np.diff(peaks[:, ind][q])
        good = np.where(
            np.abs(diff) <= np.nanmedian(diff) + 2 * np.nanstd(diff))
        good = good[5:-5]
        y = peaks[:, ind][q][good] + 0
        y = y[x[q][good] > xf[-1]]

        # removes some of the F277W points to better fit the 2nd order
        if ind < 2:
            cutoff = -1
        else:
            cutoff = 250

        xtot = np.append(xf[:cutoff], x[q][good][x[q][good] > xf[-1]])
        if ind == 0 or ind == 2:
            ytot = np.append(f277_peaks[:, 0][:cutoff], y)
        else:
            ytot = np.append(f277_peaks[:, 1][:cutoff], y)

        # Fits a 4th degree polynomiall
        poly = np.polyfit(xtot, ytot, deg=4)
        fit = np.poly1d(poly)

        avg[:, ind] = fit(x)

    if isplots >= 5:
        plt.figure(figsize=(14, 4))
        plt.title('Order Approximation')
        plt.imshow(summed, vmin=0, vmax=2e3)
        plt.plot(x,
                 np.nanmedian(avg[:, :2], axis=1),
                 'k',
                 lw=2,
                 label='First Order')
        plt.plot(x,
                 np.nanmedian(avg[:, 2:4], axis=1),
                 'r',
                 lw=2,
                 label='Second Order')
        plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
        plt.show()

    tab = Table()
    tab['x'] = x
    tab['order_1'] = np.nanmedian(avg[:, :2], axis=1)
    tab['order_2'] = np.nanmedian(avg[:, 2:4], axis=1)

    if save:
        tab.write('niriss_order_fits_method2.csv', format='csv')

    meta.tab2 = tab

    return meta
示例#33
0
    image_sci, header = fits.getdata(crr_image,
                                     header=True,
                                     ext=0,
                                     cobbler=True)

    # Getting the deviation, gain value for the image. the input gain in line 16 is used if the units of image is different from the read out noise.
    # gain and read out noise depends on the detector. So enter the appropriate value.

    data = CCDData(image_sci, unit='adu')
    # cleaning the image and removing the cosmic ray using lacosmic
    if (header['FILTER'] == medianFlatFilter
        ):  #to process the images with only specific filter name.
        cr_cleaned = ccdproc.cosmicray_lacosmic(data,
                                                sigclip=5,
                                                gain=2.6,
                                                readnoise=15,
                                                niter=4,
                                                cleantype="medmask",
                                                psfsize=5)

        # saving the output fits image.
        if not os.path.exists(basedir + opPath):
            os.makedirs(basedir + opPath)
        fits.writeto(basedir + opPath + "\crr_" + os.path.basename(crr_image),
                     np.array(cr_cleaned),
                     header,
                     checksum=True,
                     overwrite=True)

# hooray!! end of code!! :)
示例#34
0
def reduce_night(science_collection, dark_collection, flat_collection, config, config_arguments):
    """
    This function reduce science data of one night and save the results to a folder named "reduced". The reduction
    is performed as follows:

        - Create a list of masterdarks (each masterdark has a different value of the exposure time) ^1
        - Create a list of masterflats (each masterflat has a different value of the filter) ^1

        - Reduce the science data as follows:

            *For each filter:
                *For each exposure time with that filter:

                    -- Look for the masterdark with the nearest exposure time
                    -- Look for the masterflat with the current filter.
                    -- Substract masterdark
                    -- Flat field correct the data
                    -- Clean cosmic rays (if requested)
                    -- Save image to ./Calibrated folder

    (1) The master(flat/dark)s are created using mean combine.

    :param science_collection: Numpy array - A numpy array with the science collection data produced by FitsLookup.
    :param dark_collection: Numpy array - A numpy array with the dark collection data produced by FitsLookup.
    :param flat_collection: Numpy array - A numpy array with the flat collection data produced by FitsLookup.
    :param config_values: Dictionary - Dictionary - A dictionary provided by the function get_config_dict that
                            contains the config of the fits files ( readed from conf.INI ).
    :param config_arguments: Dictionary - A dictionary provided by argparse initialization that contain the current flags.
    :return: Integer - 0 if no errors raised 1 if errors raised.
    """

    # Supress astropy warnings

    warnings.filterwarnings('ignore')

    # Renaming some config_arguments for easy acess

    work_dir = config_arguments.dir[0]

    # Get the filter and exposure collection of science and flat images

    science_filter_collection = set(science_collection['filter'])
    science_exposures_collection = set(science_collection['exptime'])
    dark_exposures_collection = set(dark_collection['exptime'])
    flat_filter_collection = set(flat_collection['filter'])

    # Inform the user of the filter / exptime found.
    science_exp_times_as_string = ", ".join(
        [str(x) for x in science_exposures_collection])
    dark_exp_times_as_string = ", ".join(
        [str(x) for x in dark_exposures_collection])
    module_logger.info("We have found {0} filters in the science images: {1}".format(
        len(science_filter_collection), ", ".join(science_filter_collection)))
    module_logger.info("We have found {0} exposure times science images: {1}".format(
        len(science_exposures_collection), science_exp_times_as_string))
    module_logger.info("We have found {0} exposure times dark calibrators: {1}".format(
        len(dark_exposures_collection), dark_exp_times_as_string))
    module_logger.info("We have found {0} filters in the flat calibrators {1}".format(
        len(flat_filter_collection), ", ".join(flat_filter_collection)))

    # Check if we have the same filters in flats and science, if not, get the
    # intersection

    if not science_filter_collection.issubset(flat_filter_collection):

        module_logger.warning(
            "There are more filters in the science images than in the flat calibrators")

        science_filter_collection = science_filter_collection.intersection(
            flat_filter_collection)

        module_logger.warning("Triying to work with common filters.")
        module_logger.info("We have found {0} common filters in the science images: {1}".format(
            len(science_filter_collection), ", ".join(science_filter_collection)))

        if not science_filter_collection:
            module_logger.warning(
                "There are no common filters between science images and flat calibrators")
            module_logger.warning("This night will be skiped.")
            return 1

    # Warn the user if we found science images of 0 seconds

    if 0 in science_exposures_collection:
        number_of_null_images = len(filter_collection(
            science_collection, [('exptime', 0)]))
        module_logger.warning(
            "We have found {0} science images with 0 seconds of exposure time.".format(number_of_null_images))
        science_exposures_collection.discard(0)
        module_logger.warning(
            "Discarding images with 0 seconds of exposure time for this night: {0} exposure(s) remain.".format(
                len(science_exposures_collection)))

    # ------- MASTER DARK CREATION --------

    module_logger.info("Starting the creation of the master dark")
    module_logger.info("{0} different exposures for masterdarks were found.".format(
        len(dark_exposures_collection)))

    master_dark_collection = dict()

    # Loop over each exposure time.
    for dark_exposure_item in dark_exposures_collection:
        module_logger.info("Creating masterdark with exposure of {0}s".format(dark_exposure_item))
        # Initializate dark list for current collection.
        exposure_dark_list = list()

        for dark_image_data in filter_collection(dark_collection, [('exptime', dark_exposure_item)]):
            # Open the images and append to the dark list
            dark_image = dark_image_data['filename']
            ccd = CCDData.read(dark_image, unit=config.image_units)
            # If we have overscan, subtract and trim.
            if config.subtract_overscan:
                if config_arguments.verbose_flag_2:
                    module_logger.info("Subtracting overscan of {0}".format(dark_image))
                ccd = subtract_and_trim_overscan(ccd, config)
            exposure_dark_list.append(ccd)

        # median combine the data
        cb = ccdproc.Combiner(exposure_dark_list)
        master_dark = cb.median_combine(median_func=np.median)

        # Add the masterdark to the master_flat collection
        master_dark_collection.update({dark_exposure_item: master_dark})

        # Save the masterdark if needed.
        if config.save_masterdark:
            # Filename to save
            aux = '{0}/masterdark_{1}.fits'.format(config_arguments.save_path, dark_exposure_item)
            module_logger.info('Saving master dark to {0}'.format(aux))
            master_dark.to_hdu().writeto(aux)

    # ------- MASTER FLAT CREATION --------

    module_logger.info("Starting the creation of the master flats")
    module_logger.info("{0} different filters for masterflats were found".format(
        len(flat_filter_collection)))

    master_flat_collection = dict()

    # Go thought the different filters in the collection

    for flat_filter in flat_filter_collection:

        module_logger.info(
            "Creating masterflat with filter {0}".format(flat_filter))

        # Initializate the list that will carry the flat images of the actual
        # filter

        filter_flat_list = list()

        for flat_image_data in filter_collection(flat_collection, [('filter', flat_filter)]):

            # Open the images and append to the filter's flat list
            flat_image = flat_image_data['filename']
            ccd = CCDData.read(flat_image, unit=config.image_units)
            # Subtract and trim overscan
            if config.subtract_overscan:
                if config_arguments.verbose_flag_2:
                    module_logger.info("Subtracting overscan of {0}".format(flat_image))
                ccd = subtract_and_trim_overscan(ccd, config)
            filter_flat_list.append(ccd)


        # median combine the flats after scaling each by its mean
        cb = ccdproc.Combiner(filter_flat_list)
        cb.scaling = lambda x: 1.0 / np.mean(x)
        master_flat = cb.median_combine(median_func=np.median)

        # Add the masterflat to the master_flat collection

        master_flat_collection.update({flat_filter: master_flat})

        # Save the masterflat if needed.
        if config.save_masterflat:
            aux = '{0}/masterflat_{1}.fits'.format(config_arguments.save_path, flat_filter)
            module_logger.info('Saving master flat to {0}'.format(aux))
            master_flat.to_hdu().writeto(aux)

    # ------- REDUCE SCIENCE DATA --------

    module_logger.info("Starting the calibration of the science images")

    # Go thought the different files in the collection

    for image_filter in science_filter_collection:

        module_logger.info("Now calibrating filter: {0}".format(image_filter))

        # Iterate thought each different exposure. This is because the dark files
        # can have different exposures and the calibration must be performed with
        # the masterdark with the nearest exposure time.
        for science_exposure in science_exposures_collection:

            # Important!! If you have more classifiers in the numpy dtype and you want
            # to use them, you must modify the code here. For example, if you want to
            # use a 'temp' value as classifier, after modify the dtype following the
            # instructions in FitsLookup, you must add a loop here and modify the sub_collection.
            # Once you have the 'temp' in the dtype, you must add a loop here as:
            #
            # >>>for temp_value in  set(science_collection['temp']):
            #        module_logger.info("Now calibrating temp: {0}".format(temp_value))
            #
            # After this, you MUST indent all the following code (of this function) four spaces to
            # the right, of course. Then, you only have to modify the science_subcollection as follows:
            #
            # >>> science_subcollection = filter_collection(
            #    science_collection, [('filter', image_filter),
            #                            ('exptime', science_exposure),
            #                             ('temp', temp_value) ])
            #
            # Follow this steps for every classifier you want to add. Yay!
            # --------------------------------------------------------------

            # Science subcollection is a really bad name, but is descriptive. Remember that this subcollection
            # are the images with the current filter that has the current
            # exposure time. E.g. ('r' and 20', 'r' and 30).
            science_subcollection = filter_collection(
                science_collection, [('filter', image_filter),
                                     ('exptime', science_exposure)])

            # Continue if we have files to process. This will check if for some filter
            # there are not enought images with the actual exposure time.
            if science_subcollection.size:

                module_logger.info(
                    "Now calibrating exposure: {0}".format(science_exposure))

                # Determine if we have a masterdark with the science exposure file.
                #
                #   - If we have a exposure matching masterdark, use it.
                #   - If we do not have a exposure matching masterdark, use the nearest.
                try:
                    selected_masterdark = master_dark_collection[science_exposure]
                    nearest_exposure = 0, science_exposure
                except KeyError:
                    # Get the nearest exoposure in the dark collection.
                    nearest_exposure = min(enumerate(master_dark_collection.keys()),
                                           key=lambda x: abs(x[1] - science_exposure))
                    # Notice that nearest_exposure is a tuple of the form
                    # (index,exposure).
                    selected_masterdark = master_dark_collection[
                        nearest_exposure[1]]

                # Initialize the progress bar variables

                total_len = len(science_subcollection)
                meantime = []

                # Loop for each image with current (filter,exptime).
                for contador, science_image_data_with_current_exposure in enumerate(science_subcollection):

                    # To supress astropy warnings.
                    devnull = open(os.devnull, "w")
                    sys.stdout = devnull

                    # Notice that until sys stdout is reasigned, no printing
                    # will be allowed in the following lines.

                    # Start timing
                    start = time.time()
                    # Extract the filename from the image data
                    science_image = science_image_data_with_current_exposure['filename']
                    # Read the image
                    ccd = CCDData.read(science_image, unit=config.image_units, wcs=None)
                    # Subtract overscan
                    if config.subtract_overscan:
                        if config_arguments.verbose_flag_2:
                            module_logger.info("Subtracting overscan of {0}".format(science_image))
                        ccd = subtract_and_trim_overscan(ccd, config)
                    # Master dark substraction
                    if config_arguments.verbose_flag_2:
                        sys.stdout = sys.__stdout__  # Restart stdout printing
                        module_logger.info("Subtracting dark of image {0} of {1}".format(contador + 1, total_len))
                        sys.stdout = devnull
                    else:
                        module_logger.debug("Subtracting dark of image {0} of {1}".format(contador + 1, total_len))

                    selected_masterdark._wcs = ccd._wcs  # FIXME: currently needed due to bug
                    ccd = ccdproc.subtract_dark(ccd, selected_masterdark, dark_exposure=nearest_exposure[1] * u.second,
                                                data_exposure=science_exposure * u.second)

                    # flat-field correct the data
                    if config_arguments.verbose_flag_2:
                        sys.stdout = sys.__stdout__  # Restart stdout printing
                        module_logger.info("Flat-field correcting image {0} of {1}".format(contador + 1, total_len))
                        sys.stdout = devnull
                    else:
                        module_logger.debug("Flat-field correcting image {0} of {1}".format(contador + 1, total_len))

                    current_master_flat = master_flat_collection[image_filter]
                    current_master_flat._wcs = ccd._wcs  # FIXME: currently needed due to bug
                    ccd = ccdproc.flat_correct(ccd, current_master_flat)

                    # If we need to clean cosmic rays, do it.

                    if config_arguments.cosmic_flag:

                        if config_arguments.verbose_flag_2:
                            sys.stdout = sys.__stdout__  # Restart stdout printing
                            module_logger.info(
                                "Cosmic ray cleaning of image {0} of {1}".format(contador + 1, total_len))
                            sys.stdout = devnull
                        else:
                            module_logger.debug(
                                "Cosmic ray cleaning of image {0} of {1}".format(contador + 1, total_len))

                        ccd = ccdproc.cosmicray_lacosmic(ccd, error_image=None, thresh=5, mbox=11, rbox=11, gbox=5)

                    # Save the calibrated image to a file
                    output_filename = os.path.join(config_arguments.save_path, os.path.basename(science_image))

                    if config_arguments.verbose_flag_2:
                        sys.stdout = sys.__stdout__  # Restart stdout printing
                        module_logger.info(
                            "Saving image {0} of {1} to {2}".format(contador + 1, total_len, output_filename))
                        sys.stdout = devnull
                    else:
                        module_logger.debug(
                            "Saving image {0} of {1} to {2}".format(contador + 1, total_len, output_filename))

                    ccd.write(output_filename, clobber=True)

                    end = time.time()
                    meantime.append(end - start)

                    sys.stdout = sys.__stdout__  # Restart stdout printing


                    # Progressbar in case that we have not activated the no_interaction flag nor the advance
                    # verbose flag.
                    if not config_arguments.no_interaction and not config_arguments.verbose_flag_2:

                        if config_arguments.verbose_flag:
                            update_progress(float(contador + 1) / total_len,
                                            np.mean(meantime) * (total_len - (contador + 1)))

    return 0