Esempio n. 1
0
    def action(self, ccd):
        """
        Trim an image to bounds given in the widget.

        Returns
        -------

        trimmed : `ccdproc.CCDData`
            Trimmed image.
        """
        # Don't do anything if not activated
        if not self.toggle.value:
            pass
        whole_axis = slice(None, None)
        partial_axis = slice(self._start.value, self._stop.value)
        # create a two-element list which will be filled with the appropriate
        # slice based on the widget settings.
        if self._pre.value == 0:
            trimmed = ccdproc.trim_image(ccd[whole_axis, partial_axis])
        else:
            trimmed = ccdproc.trim_image(ccd[partial_axis, whole_axis])

        return trimmed
Esempio n. 2
0
    def action(self, ccd):
        """
        Trim an image to bounds given in the widget.

        Returns
        -------

        trimmed : `ccdproc.CCDData`
            Trimmed image.
        """
        # Don't do anything if not activated
        if not self.toggle.value:
            pass
        whole_axis = slice(None, None)
        partial_axis = slice(self._axis_selection.start,
                             self._axis_selection.stop)
        # create a two-element list which will be filled with the appropriate
        # slice based on the widget settings.
        if self._axis_selection.full_axis == 0:
            trimmed = ccdproc.trim_image(ccd[whole_axis, partial_axis])
        else:
            trimmed = ccdproc.trim_image(ccd[partial_axis, whole_axis])

        return trimmed
Esempio n. 3
0
def process(ccd, gain, oscan, tsec):
    """Basic CCD processing for required for data
       
    
    """

    #oscan subtract
    ccd = ccdproc.subtract_overscan(ccd, overscan=oscan, median=True)

    #gain correct
    ccd = ccdproc.gain_correct(ccd, gain, gain_unit=u.electron / u.adu)

    #trim the image
    ccd = ccdproc.trim_image(ccd, fits_section=tsec)

    return ccd
Esempio n. 4
0
def process(ccd, gain, oscan, tsec): 
    """Basic CCD processing for required for data
       
    
    """
    
    #oscan subtract
    ccd = ccdproc.subtract_overscan(ccd, overscan=oscan, median=True)
 
    #gain correct
    ccd = ccdproc.gain_correct(ccd, gain, gain_unit=u.electron/u.adu)

    #trim the image
    ccd = ccdproc.trim_image(ccd, fits_section=tsec)

    return ccd
Esempio n. 5
0
def oscan_trim_file(fname, datahdus=0):
    hdulist = fits.open(fname)
    nhdus = len(hdulist)
    if nhdus > 1:
        istart = 1
    else:
        istart = 0
    # loop from first-data to last HDU, unless datahdus is set
    hduindexes = list(range(nhdus))[istart:]
    if datahdus != 0:
        hduindexes = datahdus
    for i in hduindexes:
        hdulist = fits.open(fname)
        data1 = ccdproc.CCDData(hdulist[i].data, unit="adu")
        data1.header = hdulist[i].header
        # What happens if file is already overscan-subtracted?
        # We should probably default to using a model
        if modeling:
            oscan1 = ccdproc.subtract_overscan(
                data1,
                fits_section=data1.header['BIASSEC'],
                add_keyword={
                    'overscan': True,
                    'calstat': 'O'
                },
                model=models.Polynomial1D(1))
        else:
            oscan1 = ccdproc.subtract_overscan(
                data1,
                fits_section=data1.header['BIASSEC'],
                add_keyword={
                    'overscan': True,
                    'calstat': 'O'
                },
                model=None)

        trim1 = ccdproc.trim_image(oscan1,
                                   fits_section=oscan1.header['TRIMSEC'],
                                   add_keyword={
                                       'trimmed': True,
                                       'calstat': 'OT'
                                   })
        fits.update(fname, trim1.data, header=trim1.header, ext=i)
    hdulist.close()
    mylog("Overscan and trim {0}".format(fname))
    return
Esempio n. 6
0
def oscan_and_trim(image_list):
    """
    Remove overscan and trim a list of images. The original list is replaced by a list of images
    with the changes applied.

    Implementation done by ccdproc


    Parameters:
    ----------

    image_list :: List of CCDData corresponding to images
    
    
    """
    for idx, img in enumerate(image_list):
        oscan = ccdproc.subtract_overscan(img,overscan=img[:,521:544], add_keyword={'oscan_sub': True, 'calstat': 'O'}, model=models.Polynomial1D(1))
        image_list[idx] = ccdproc.trim_image(oscan[:,10:521], add_keyword={'trimmed': True, 'calstat': 'OT'})
Esempio n. 7
0
    def loader_generator(actually_load_data):
        # Ideally we would just return a shape instead of an empty data array in
        # the case where actually_load_data is false, but we can't use
        # `ccdproc.trim_image()` without having a CCDData in hand, so to get the
        # right shape we're going to need to create a full CCDData anyway.

        for fits_path in glob(join(dirname, '*.fits')):
            # `astropy.nddata.ccddata.fits_ccddata_reader` only opens FITS from
            # filenames, not from an open HDUList, which means that creating
            # multiple CCDDatas from the same FITS file rapidly becomes
            # inefficient. So, we emulate its logic.

            with fits.open(fits_path) as hdu_list:
                for idx, hdu in enumerate(hdu_list):
                    if idx == 0:
                        header0 = hdu.header
                    else:
                        hdr = hdu.header
                        hdr.extend(header0, unique=True)

                        # This ccddata function often generates annoying warnings
                        with warnings.catch_warnings():
                            warnings.simplefilter('ignore')
                            hdr, wcs = ccddata._generate_wcs_and_update_header(
                                hdr)

                        # Note: we skip all the unit-handling logic here since the LSST
                        # sim data I'm using don't have anything useful.

                        if actually_load_data:
                            data = hdu.data
                        else:
                            data = np.empty(hdu.shape, dtype=np.void)

                        ccd = ccddata.CCDData(data,
                                              meta=hdr,
                                              unit=unit,
                                              wcs=wcs)

                        ccd = ccdproc.trim_image(
                            ccd, fits_section=ccd.header['DATASEC'])
                        yield (f'{fits_path}:{idx}', ccd)
Esempio n. 8
0
def trim_bias(refresh=False):
    biascollection = ImageFileCollection('HD115709/bias', ext=4)
    flag = 0
    tbiaspathlist=[]
    if refresh == True:
        for ccdb, biasn in biascollection.ccds(return_fname=True, ccd_kwargs={'unit': 'adu'}):

            print('trimming', biasn)

            ccdb.header['imtype'] = ('bias', 'type of image')
            if flag == 0:
                print('all biases will be trimmed to :', ccdb.meta['trimsec'])
                flag = 1
            tbias = ccdp.trim_image(ccdb, fits_section=str(ccdb.meta['trimsec']))
            tbias.meta['imtype'] = ('trimmed bias', 'type of image')
            tbias.meta['taxis1'] = (2048, 'dimension1')
            tbias.meta['taxis2'] = (4096, 'dimension2')
            tbias.write('Trimmed_Bias/' + biasn[0:8] + '_trim.fits', overwrite=True)
            tbiaspathlist.append('Trimmed_Bias/' + biasn[0:8] + '_trim.fits')
    return biascollection,tbiaspathlist
Esempio n. 9
0
def oscan_trim_file(fname):
    hdulist = fits.open(fname)
    nhdus = len(hdulist)
    if nhdus > 1:
        istart = 1
    else:
        istart = 0
    # loop from first-data to last HDU.
    for i in range(nhdus)[istart:]:
        hdulist = fits.open(fname)
        data1 = ccdproc.CCDData(hdulist[i].data, unit="adu")
        data1.header = hdulist[i].header
        # What happens if file is already overscan-subtracted?
        if modeling:
            oscan1 = ccdproc.subtract_overscan(
                data1,
                fits_section=data1.header['BIASSEC'],
                add_keyword={
                    'overscan': True,
                    'calstat': 'O'
                },
                model=models.Polynomial1D(1))
        else:
            oscan1 = ccdproc.subtract_overscan(
                data1,
                fits_section=data1.header['BIASSEC'],
                add_keyword={
                    'overscan': True,
                    'calstat': 'O'
                },
                model=None)

        trim1 = ccdproc.trim_image(oscan1,
                                   fits_section=oscan1.header['TRIMSEC'],
                                   add_keyword={
                                       'trimmed': True,
                                       'calstat': 'OT'
                                   })
        fits.update(fname, trim1.data, header=trim1.header, ext=i)
    hdulist.close()
    return
Esempio n. 10
0
def _correct_overscan_hdu(hdu_ccd):
    """
    Given an HDU loaded as a CCDData object perform overscan correction.

    Arguments
    ---------

        hdu_ccd : astropy.nddata.CCDData
            HDU to perform correction

    Returns
    -------
        
        img_trim : astropy.nddata.CCDData
            Corrected HDU
    """

    img_osub = (ccdproc.subtract_overscan(
        hdu_ccd,
        fits_section=hdu_ccd.header['BIASSEC'],
        model=None,
        median=True,
        add_keyword={
            'overscan': True,
            'calstat': 'O'
        }))

    img_trim = (ccdproc.trim_image(img_osub,
                                   fits_section=img_osub.header['TRIMSEC'],
                                   add_keyword={
                                       'trimmed': True,
                                       'calstat': 'OT'
                                   }))

    #  Updating header and overwriting processed image
    del img_trim.header['BIASSEC']
    del img_trim.header['TRIMSEC']

    return img_trim
Esempio n. 11
0
def create_master_flat(list_files, flat_filter=None, fitsfile=None, bias=None, fits_section=None, gain=None, 
	method='median', key_filter='filter', dfilter={'imagetyp':'FLAT'}, mask=None, key_find='find', 
	invert_find=False, sjoin=','):
    if gain is not None and not isinstance(gain, u.Quantity):
        gain = gain * u.electron / u.adu
    lflat = []
    if dfilter is not None and key_filter is not None and flat_filter is not None:
        dfilter = addKeysListDict(dfilter, {key_filter: flat_filter})
    list_files = getListFiles(list_files, dfilter, mask, key_find=key_find, invert_find=invert_find)
    if len(list_files) == 0:
        print ('WARNING: No FLAT files available for filter "%s"' % flat_filter)
        return 
    for filename in list_files:
        ccd = CCDData.read(filename, unit= u.adu)
        trimmed = True if fits_section is not None else False
        ccd = ccdproc.trim_image(ccd, fits_section=fits_section, add_keyword={'trimmed': trimmed})
        if gain is not None:
            ccd = ccdproc.gain_correct(ccd, gain)
        if bias is not None:
            if isinstance(bias, str):
                bias = fits2CCDData(bias, single=True)
            ccd = ccdproc.subtract_bias(ccd, bias)
        lflat.append(ccd)
    combine = ccdproc.combine(lflat, method=method)
    if gain is not None and not 'GAIN' in combine.header:
        combine.header.set('GAIN', gain.value, gain.unit)
    combine.header['CGAIN'] = True if gain is not None else False
    combine.header['IMAGETYP'] = 'FLAT'
    combine.header['CMETHOD'] = method
    combine.header['CCDVER'] = VERSION
    addKeyHdr(combine.header, 'MBIAS', getFilename(bias))
    if sjoin is not None:
        combine.header['LFLAT'] = sjoin.join([os.path.basename(fits) for fits in list_files])
    combine.header['NFLAT'] = len(list_files)
    if fitsfile is not None:
        combine.header['FILENAME'] = os.path.basename(fitsfile)
        combine.write(fitsfile, clobber=True)
    return combine
Esempio n. 12
0
def view_slice(image,
               window,
               axis=0,
               fig_size=(20, 10),
               title=None,
               traces=None):
    trimmed_data = trim_image(image[window[1]:window[3]+1, window[0]:window[2]+1]).data

    if axis == 1:
        trimmed_data = np.rot90(trimmed_data, k=-1)
    elif axis != 0:
        raise ValueError("Axis must be 0 or 1, got {}.".format(axis))

    projection = np.ma.mean(trimmed_data, axis=0)

    figure_ar = fig_size[0] / fig_size[1]
    slice_ar = trimmed_data.shape[1] / trimmed_data.shape[0]
    f, (ax1, ax2) = plt.subplots(2,
                                 1,
                                 sharex=True,
                                 gridspec_kw={'height_ratios': (1.5, (slice_ar / figure_ar - 1.5))})
    ax1.imshow(trimmed_data, origin='lower')
    ax2.plot(projection)
    ax1.set_xlim(0, trimmed_data.shape[1] - 1)
    ax2.set_ylim(0, projection.max() * 1.05)

    if traces:
        if axis == 1:
            mean_pos = (window[2] + window[0]) / 2
        else:
            mean_pos = (window[3] + window[1]) / 2
        ax2.scatter(traces(mean_pos), np.zeros_like(traces(mean_pos)), marker=2, color='g')

    f.set_size_inches(fig_size)
    if title:
        ax1.set_title(title)
    plt.tight_layout()
def createmasterbias():
    #create biaslist
    biaslist = sorted(glob.glob('HD115709/bias/r*.fit'))
    print('Number of biases used =', len(biaslist))

    #open bias zero for cube dimensions
    hdub = fits.open(biaslist[0])
    bias0 = hdub[4].data
    print(biaslist[0], 'is open, shape:', bias0.shape)
    hdub.close()
    print(biaslist[0], 'is closed')

    #create biascube with shape of science and len(biaslist)
    biascube = np.zeros((science0.shape[0], science0.shape[1], len(biaslist)),
                        dtype=bias0.dtype)
    print('biascube created with shape :', biascube.shape)

    #crop and stack biases (biasc= counter, and biasn = name)
    for biasc, biasn in enumerate(biaslist):
        print('Open :', biasn)
        hdu = fits.open(biaslist[biasc])
        if hdu[4].header['CHIPNAME'] == 'A5382-1-7':
            bias = CCDData(hdu[4].data, unit=u.adu)
            tbias = ccdproc.trim_image(bias, fits_section=sciencewindow)
            biascube[:, :, biasc] = tbias.data
            hdu.close()
        else:
            hdu.close()
            print(biasn, 'returned wrong chipname for selected extension')
            exit

    #take median and write to disk
    mbias = np.nanmedian(biascube, 2)
    ccdmbias = CCDData(mbias, unit=u.adu)
    ccdmbias.write('tmaster_bias.fits', overwrite=True)
    print('master bias shape :', ccdmbias.data.shape)
def createmasterflat():

    #create flatlist
    flatlist = sorted(glob.glob('HD115709/flat_SII/r*.fit'))
    print('Number of flats used =', len(flatlist))

    #open flat0 for cube len
    hduf = fits.open(flatlist[0])
    flat0 = hduf[4].data
    print(flatlist[0], 'is open, shape:', flat0.shape)
    hduf.close()
    print(flatlist[0], 'is closed')

    #create flatcube with shape of science and len(flatlist)
    flatcube = np.zeros((science0.shape[0], science0.shape[1], len(flatlist)),
                        dtype=flat0.dtype)

    #convert trim and populate flatcube after bias correction
    for flatc, flatn in enumerate(flatlist):
        print('Open :', flatn)
        hdu = fits.open(flatlist[flatc])
        if hdu[4].header['CHIPNAME'] == 'A5382-1-7':
            ccdflat = CCDData(hdu[4].data, unit=u.adu)
            ccdtflat = ccdproc.trim_image(ccdflat, fits_section=sciencewindow)
            ccdcflat = ccdproc.subtract_bias(ccdtflat, ccdmbias_use)
            flatcube[:, :, flatc] = ccdcflat.data
            hdu.close()
        else:
            hdu.close()
            print(flatn, 'returned wrong chipname for selected extension')
            exit
    #take median and write to disk
    mflat = np.nanmedian(flatcube, 2)
    ccdmflat = CCDData(mflat, unit=u.adu)
    ccdmflat.write('tmaster_flat.fits',
                   overwrite=True)  # write the fits to disk
Esempio n. 15
0
def main(night_path, skip_list_file, mask_file, overwrite=False, plot=False):
    """
    See argparse block at bottom of script for description of parameters.
    """

    night_path = path.realpath(path.expanduser(night_path))
    if not path.exists(night_path):
        raise IOError("Path '{}' doesn't exist".format(night_path))
    logger.info("Reading data from path: {}".format(night_path))

    base_path, night_name = path.split(night_path)
    data_path, run_name = path.split(base_path)
    output_path = path.realpath(
        path.join(data_path, 'processed', run_name, night_name))
    os.makedirs(output_path, exist_ok=True)
    logger.info("Saving processed files to path: {}".format(output_path))

    if plot:  # if we're making plots
        plot_path = path.realpath(path.join(output_path, 'plots'))
        logger.debug("Will make and save plots to: {}".format(plot_path))
        os.makedirs(plot_path, exist_ok=True)
    else:
        plot_path = None

    # check for files to skip (e.g., saturated or errored exposures)
    if skip_list_file is not None:  # a file containing a list of filenames to skip
        with open(skip_list_file, 'r') as f:
            skip_list = [x.strip() for x in f if x.strip()]
    else:
        skip_list = None

    # look for pixel mask file
    if mask_file is not None:
        with open(
                mask_file, 'r'
        ) as f:  # load YAML file specifying pixel masks for nearby sources
            pixel_mask_spec = yaml.load(f.read())
    else:
        pixel_mask_spec = None

    # generate the raw image file collection to process
    ic = GlobImageFileCollection(night_path, skip_filenames=skip_list)
    logger.info("Frames to process:")
    logger.info("- Bias frames: {}".format(
        len(ic.files_filtered(imagetyp='BIAS'))))
    logger.info("- Flat frames: {}".format(
        len(ic.files_filtered(imagetyp='FLAT'))))
    logger.info("- Comparison lamp frames: {}".format(
        len(ic.files_filtered(imagetyp='COMP'))))
    logger.info("- Object frames: {}".format(
        len(ic.files_filtered(imagetyp='OBJECT'))))

    # HACK:
    ic = GlobImageFileCollection(night_path, skip_filenames=skip_list)

    # ============================
    # Create the master bias frame
    # ============================

    # overscan region of the CCD, using FITS index notation
    oscan_fits_section = "[{}:{},:]".format(oscan_idx, oscan_idx + oscan_size)

    master_bias_file = path.join(output_path, 'master_bias.fits')

    if not os.path.exists(master_bias_file) or overwrite:
        # get list of overscan-subtracted bias frames as 2D image arrays
        bias_list = []
        for hdu, fname in ic.hdus(return_fname=True, imagetyp='BIAS'):
            logger.debug('Processing Bias frame: {0}'.format(fname))
            ccd = CCDData.read(path.join(ic.location, fname), unit='adu')
            ccd = ccdproc.gain_correct(ccd, gain=ccd_gain)
            ccd = ccdproc.subtract_overscan(ccd, overscan=ccd[:, oscan_idx:])
            ccd = ccdproc.trim_image(ccd,
                                     fits_section="[1:{},:]".format(oscan_idx))
            bias_list.append(ccd)

        # combine all bias frames into a master bias frame
        logger.info("Creating master bias frame")
        master_bias = ccdproc.combine(bias_list,
                                      method='average',
                                      clip_extrema=True,
                                      nlow=1,
                                      nhigh=1,
                                      error=True)
        master_bias.write(master_bias_file, overwrite=True)

    else:
        logger.info("Master bias frame file already exists: {}".format(
            master_bias_file))
        master_bias = CCDData.read(master_bias_file)

    if plot:
        # TODO: this assumes vertical CCD
        assert master_bias.shape[0] > master_bias.shape[1]
        aspect_ratio = master_bias.shape[1] / master_bias.shape[0]

        fig, ax = plt.subplots(1, 1, figsize=(10, 12 * aspect_ratio))
        vmin, vmax = zscaler.get_limits(master_bias.data)
        cs = ax.imshow(master_bias.data.T,
                       origin='bottom',
                       cmap=cmap,
                       vmin=max(0, vmin),
                       vmax=vmax)
        ax.set_title('master bias frame [zscale]')

        fig.colorbar(cs)
        fig.tight_layout()
        fig.savefig(path.join(plot_path, 'master_bias.png'))
        plt.close(fig)

    # ============================
    # Create the master flat field
    # ============================
    # HACK:
    ic = GlobImageFileCollection(night_path, skip_filenames=skip_list)

    master_flat_file = path.join(output_path, 'master_flat.fits')

    if not os.path.exists(master_flat_file) or overwrite:
        # create a list of flat frames
        flat_list = []
        for hdu, fname in ic.hdus(return_fname=True, imagetyp='FLAT'):
            logger.debug('Processing Flat frame: {0}'.format(fname))
            ccd = CCDData.read(path.join(ic.location, fname), unit='adu')
            ccd = ccdproc.gain_correct(ccd, gain=ccd_gain)
            ccd = ccdproc.ccd_process(ccd,
                                      oscan=oscan_fits_section,
                                      trim="[1:{},:]".format(oscan_idx),
                                      master_bias=master_bias)
            flat_list.append(ccd)

        # combine into a single master flat - use 3*sigma sigma-clipping
        logger.info("Creating master flat frame")
        master_flat = ccdproc.combine(flat_list,
                                      method='average',
                                      sigma_clip=True,
                                      low_thresh=3,
                                      high_thresh=3)
        master_flat.write(master_flat_file, overwrite=True)

        # TODO: make plot if requested?

    else:
        logger.info("Master flat frame file already exists: {}".format(
            master_flat_file))
        master_flat = CCDData.read(master_flat_file)

    if plot:
        # TODO: this assumes vertical CCD
        assert master_flat.shape[0] > master_flat.shape[1]
        aspect_ratio = master_flat.shape[1] / master_flat.shape[0]

        fig, ax = plt.subplots(1, 1, figsize=(10, 12 * aspect_ratio))
        vmin, vmax = zscaler.get_limits(master_flat.data)
        cs = ax.imshow(master_flat.data.T,
                       origin='bottom',
                       cmap=cmap,
                       vmin=max(0, vmin),
                       vmax=vmax)
        ax.set_title('master flat frame [zscale]')

        fig.colorbar(cs)
        fig.tight_layout()
        fig.savefig(path.join(plot_path, 'master_flat.png'))
        plt.close(fig)

    # =====================
    # Process object frames
    # =====================
    # HACK:
    ic = GlobImageFileCollection(night_path, skip_filenames=skip_list)

    logger.info("Beginning object frame processing...")
    for hdu, fname in ic.hdus(return_fname=True, imagetyp='OBJECT'):
        new_fname = path.join(output_path, 'p_{}'.format(fname))

        # -------------------------------------------
        # First do the simple processing of the frame
        # -------------------------------------------

        logger.debug("Processing '{}' [{}]".format(hdu.header['OBJECT'],
                                                   fname))
        if path.exists(new_fname) and not overwrite:
            logger.log(1, "\tAlready processed! {}".format(new_fname))
            ext = SourceCCDExtractor(filename=path.join(
                ic.location, new_fname),
                                     plot_path=plot_path,
                                     zscaler=zscaler,
                                     cmap=cmap,
                                     **ccd_props)
            nccd = ext.ccd

            # HACK: F**K this is a bad hack
            ext._filename_base = ext._filename_base[2:]

        else:
            # process the frame!
            ext = SourceCCDExtractor(filename=path.join(ic.location, fname),
                                     plot_path=plot_path,
                                     zscaler=zscaler,
                                     cmap=cmap,
                                     unit='adu',
                                     **ccd_props)

            _pix_mask = pixel_mask_spec.get(
                fname, None) if pixel_mask_spec is not None else None
            nccd = ext.process_raw_frame(pixel_mask_spec=_pix_mask,
                                         master_bias=master_bias,
                                         master_flat=master_flat)
            nccd.write(new_fname, overwrite=overwrite)

        # -------------------------------------------
        # Now do the 1D extraction
        # -------------------------------------------

        fname_1d = path.join(output_path, '1d_{0}'.format(fname))
        if path.exists(fname_1d) and not overwrite:
            logger.log(1, "\tAlready extracted! {}".format(fname_1d))
            continue

        else:
            logger.debug("\tExtracting to 1D")

            # first step is to fit a voigt profile to a middle-ish row to determine LSF
            lsf_p = ext.get_lsf_pars()  # MAGIC NUMBER

            try:
                tbl = ext.extract_1d(lsf_p)
            except Exception as e:
                logger.error('Failed! {}: {}'.format(e.__class__.__name__,
                                                     str(e)))
                continue

            hdu0 = fits.PrimaryHDU(header=nccd.header)
            hdu1 = fits.table_to_hdu(tbl)
            hdulist = fits.HDUList([hdu0, hdu1])

            hdulist.writeto(fname_1d, overwrite=overwrite)

        del ext

    # ==============================
    # Process comparison lamp frames
    # ==============================
    # HACK:
    ic = GlobImageFileCollection(night_path, skip_filenames=skip_list)

    logger.info("Beginning comp. lamp frame processing...")
    for hdu, fname in ic.hdus(return_fname=True, imagetyp='COMP'):
        new_fname = path.join(output_path, 'p_{}'.format(fname))

        logger.debug("\tProcessing '{}'".format(hdu.header['OBJECT']))

        if path.exists(new_fname) and not overwrite:
            logger.log(1, "\tAlready processed! {}".format(new_fname))
            ext = CompCCDExtractor(filename=path.join(ic.location, new_fname),
                                   plot_path=plot_path,
                                   zscaler=zscaler,
                                   cmap=cmap,
                                   **ccd_props)
            nccd = ext.ccd

            # HACK: F**K this is a bad hack
            ext._filename_base = ext._filename_base[2:]

        else:
            # process the frame!
            ext = CompCCDExtractor(filename=path.join(ic.location, fname),
                                   plot_path=plot_path,
                                   unit='adu',
                                   **ccd_props)

            _pix_mask = pixel_mask_spec.get(
                fname, None) if pixel_mask_spec is not None else None
            nccd = ext.process_raw_frame(
                pixel_mask_spec=_pix_mask,
                master_bias=master_bias,
                master_flat=master_flat,
            )
            nccd.write(new_fname, overwrite=overwrite)

        # -------------------------------------------
        # Now do the 1D extraction
        # -------------------------------------------

        fname_1d = path.join(output_path, '1d_{0}'.format(fname))
        if path.exists(fname_1d) and not overwrite:
            logger.log(1, "\tAlready extracted! {}".format(fname_1d))
            continue

        else:
            logger.debug("\tExtracting to 1D")

            try:
                tbl = ext.extract_1d()
            except Exception as e:
                logger.error('Failed! {}: {}'.format(e.__class__.__name__,
                                                     str(e)))
                continue

            hdu0 = fits.PrimaryHDU(header=nccd.header)
            hdu1 = fits.table_to_hdu(tbl)
            hdulist = fits.HDUList([hdu0, hdu1])

            hdulist.writeto(fname_1d, overwrite=overwrite)
Esempio n. 16
0
def hrs_process(image_name, ampsec=[], oscansec=[], trimsec=[],
                masterbias=None, error=False, bad_pixel_mask=None, flip=False,
                rdnoise=None, oscan_median=True, oscan_model=None):
    """Processing required for HRS observations.  If the images have multiple
       amps, then this will process each part of the image and recombine them
       into for the final results

    Parameters
    ----------
    image_name: str
       Name of file to be processed

    ampsec: list
       List of ampsections.  This list should have the same length as the
       number of amps in the data set.  The sections should be given
       in the format of fits_sections (see below).

    oscansec: list
       List of overscan sections.  This list should have the same length as the
       number of amps in the data set.  The sections should be given
       in the format of fits_sections (see below).

    trimsec: list
       List of overscan sections.  This list should have the same length as the
       number of amps in the data set.  The sections should be given
       in the format of fits_sections (see below).

    error: boolean
        If True, create an uncertainty array for ccd

    masterbias: None, `~numpy.ndarray`,  or `~ccdproc.CCDData`
        A materbias frame to be subtracted from ccd.

    bad_pixel_mask: None or `~numpy.ndarray`
        A bad pixel mask for the data. The bad pixel mask should be in given
        such that bad pixels havea value of 1 and good pixels a value of 0.


    flip: boolean
        If True, the image will be flipped such that the orders run from the
        bottom of the image to the top and the dispersion runs from the left
        to the right.

    rdnoise: None or `~astropy.Quantity`
        Read noise for the observations.  The read noise should be in
        `~astropy.units.electron`

    oscan_median :  bool, optional
        If true, takes the median of each line.  Otherwise, uses the mean

    oscan_model :  `~astropy.modeling.Model`, optional
        Model to fit to the data.  If None, returns the values calculated
        by the median or the mean.

    Returns
     -------
    ccd: `~ccdproc.CCDData`
        Data processed and


    Notes
    -----

    The format of the `fits_section` string follow the rules for slices that
    are consistent with the FITS standard (v3) and IRAF usage of keywords like
    TRIMSEC and BIASSEC. Its indexes are one-based, instead of the
    python-standard zero-based, and the first index is the one that increases
    most rapidly as you move through the array in memory order, opposite the
    python ordering.

    The 'fits_section' argument is provided as a convenience for those who are
    processing files that contain TRIMSEC and BIASSEC. The preferred, more
    pythonic, way of specifying the overscan is to do it by indexing the data
    array directly with the `overscan` argument.

    """
    # read in the data
    ccd = ccdproc.CCDData.read(image_name, unit=u.adu)

    try:
        namps = ccd.header['CCDAMPS']
    except KeyError:
        namps = ccd.header['CCDNAMPS']
        
    # thow errors for the wrong number of amps
    if len(ampsec) != namps:
        raise ValueError('Number of ampsec does not equal number of amps')
    if len(oscansec) != namps:
        raise ValueError('Number of oscansec does not equal number of amps')
    if len(trimsec) != namps:
        raise ValueError('Number of trimsec does not equal number of amps')

    if namps == 1:
        if ccd.header['OBSTYPE']=='Bias':
            gain = None
        else:
            gain = float(ccd.header['gain'].split()[0]) * u.electron / u.adu

        nccd = ccd_process(ccd, oscan=oscansec[0], trim=trimsec[0],
                           error=error, masterbias=masterbias,
                           bad_pixel_mask=bad_pixel_mask, gain=gain,
                           rdnoise=rdnoise, oscan_median=oscan_median,
                           oscan_model=oscan_model)
    else:
        ccd_list = []
        xsize = 0
        ysize = 0
        gain_ave = 0

        #determine size of the image
        ys, xs = ccd.data.shape
        x1=0
        x2=0
        y1=0
        y2=ys
        for i in range(namps):
            amp = ccdproc.utils.slices.slice_from_string(ampsec[i], fits_convention=True)
            ay1, ay2, _ = amp[0].indices(ys)
            ax1, ax2, _ = amp[1].indices(xs)
            trim = ccdproc.utils.slices.slice_from_string(trimsec[i], fits_convention=True) 
            ty1, ty2, _ = trim[0].indices(ys)
            tx1, tx2, _ = trim[1].indices(xs)

            dx1 = max(tx1, ax1)
            x1 = min(x1 or dx1, dx1)

            #assumes bias frames are given in increasing x-order
            x2 = ax2 - (ax2-ax1-tx2)
            

        data = np.zeros((y2-y1, x2-x1))
        if ccd.mask is not None:
           mask = np.zeros(((y2-y1, x2-x1)))
        else:
           mask = None 

        if ccd.uncertainty is not None:
            raise NotImplementedError(
                'Support for uncertainties not implimented yet')
        else:
           uncertainty = None

        for i in range(namps):
            gain_ave += float(ccd.header['gain'].split()[i])
        gain_ave = gain_ave / namps * u.electron / u.adu
        for i in range(namps):
            # trim the image to just the amplifier
            cc = ccdproc.trim_image(ccd, fits_section=ampsec[i])

            # determine its position in the area after correction
            amp = ccdproc.utils.slices.slice_from_string(ampsec[i], fits_convention=True)
            ay1, ay2, _ = amp[0].indices(ys)
            ax1, ax2, _ = amp[1].indices(xs)
            trim = ccdproc.utils.slices.slice_from_string(trimsec[i], fits_convention=True) 
            ty1, ty2, _ = trim[0].indices(ys)
            tx1, tx2, _ = trim[1].indices(xs)
            dx1 = max(ax1 - x1, 0)
            dx2 = ax2 - x1
            dy1 = ay1
            dy2 = ay2
 
            # gain correct the arrray
            gain = float(ccd.header['gain'].split()[i]) * u.electron / u.adu
            ncc = ccd_process(cc, oscan=oscansec[i], trim=trimsec[i],
                              error=False, masterbias=None, gain=gain,
                              bad_pixel_mask=None, rdnoise=rdnoise,
                              oscan_median=oscan_median,
                              oscan_model=oscan_model)

            # normalize all to have the same gain
            ncc = ncc.divide(gain_ave)
               
           
            data[dy1:dy2,dx1:dx2] = ncc.data
            if ccd.mask is not None:
               mask[dy1:dy2,dx1:dx2] = ncc.mask


        # create new entryy
        nccd = ccdproc.CCDData(data, unit=ncc.unit, mask=mask,
                               uncertainty=uncertainty)
        nccd.header = ccd.header
        nccd = ccd_process(nccd, masterbias=masterbias, error=error, gain=gain_ave,
                           rdnoise=rdnoise, bad_pixel_mask=bad_pixel_mask)

    if flip:
        nccd.data = nccd.data[::-1, ::-1]
        if (nccd.mask is not None):
            nccd.mask = nccd.mask[::-1, ::-1]
        if (nccd.uncertainty is not None):
            nccd.uncertainty = nccd.uncertainty[::-1, ::-1]

    return nccd
Esempio n. 17
0
def process_fits(fitspath,
                 *,
                 obstype=None,
                 object=None,
                 exposure_times=None,
                 percentile=None,
                 percentile_min=None,
                 percentile_max=None,
                 window=None,
                 darks=None,
                 cosmic_ray=False,
                 cosmic_ray_kwargs={},
                 gain=None,
                 readnoise=None,
                 normalise=False,
                 normalise_func=np.ma.average,
                 combine_type=None,
                 sigma_clip=False,
                 low_thresh=3,
                 high_thresh=3):
    """Combine all FITS images of a given type and exposure time from a given directory.

    Parameters
    ----------
    fitspath: str
        Path to the FITS images to process. Can be a path to a single file, or a path to a
        directory. If the latter the directory will be searched for FITS files and checked
        against criteria from obstype, object, exposure_times critera.
    obstype: str, optional
        Observation type, an 'OBSTYPE' FITS header value e.g. 'DARK', 'OBJ'. If given only files
        with matching OBSTYPE will be processed.
    object: str, optional
        Object name, i.e. 'OBJECT' FITS header value. If given only files with matching OBJECT
        will be processed.
    exposure_times: float or sequence, optional
        Exposure time(s), i.e 'TOTALEXP' FITS header value(s). If given only files with matching
        TOTALEXP will be processed.
    percentile: float, optional
        If given will only images whose percentile value fall between percentile_min and
        percentile_max will be processed, e.g. set to 50.0 to select images by median value,
        set to 99.5 to select images by their 99.5th percentile value.
    percentile_min: float, optional
        Minimum percentile value.
    percentile_max: float, optional
        Maximum percentil value.
    window: (int, int, int, int), optional
        If given will trim images to the window defined as (x0, y0, x1, y1), where (x0, y0)
        and (x1, y1) are the coordinates of the bottom left and top right corners.
    darks: str or sequence, optional
        Filename(s) of dark frame(s) to subtract from the image(s). If given a dark frame with
        matching TOTALEXP will be subtracted from each image during processing.
    cosmic_ray: bool, optional
        Whether to perform single image cosmic ray removal, using the lacosmic algorithm,
        default False. Requires both gain and readnoise to be set.
    cosmic_ray_kwargs: dict, optional
        Additional keyword arguments to pass to the ccdproc.cosmicray_lacosmic function.
    gain: str or astropy.units.Quantity, optional
        Either a string indicating the FITS keyword corresponding to the (inverse gain), or
        a Quantity containing the gain value to use. If both gain and read noise are given
        an uncertainty frame will be created.
    readnoise: str or astropy.units.Quantity, optional
        Either a string indicating the FITS keyword corresponding to read noise, or a Quantity
        containing the read noise value to use. If both read noise and gain are given then an
        uncertainty frame will be created.
    normalise: bool, optional
        If True each image will be normalised. Default False.
    normalise_func: callable, optional
        Function to use for normalisation. Each image will be divided by normalise_func(image).
        Default np.ma.average.
    combine_type: str, optional
        Type of image combination to use, 'MEAN' or 'MEDIAN'. If None the individual
        images will be processed but not combined and the return value will be a list of
        CCDData objects. Default None.
    sigma_clip: bool, optional
        If True will perform sigma clipping on the image stack before combining, default=False.
    low_thresh: float, optional
        Lower threshold to use for sigma clipping, in standard deviations. Default is 3.0.
    high_thresh: float, optional
        Upper threshold to use for sigma clipping, in standard deviations. Default is 3.0.


    Returns
    -------
    master: ccdproc.CCDData
        Combined image.

    """
    if exposure_times:
        try:
            # Should work for any sequence or iterable type
            exposure_times = set(exposure_times)
        except TypeError:
            # Not a sequence or iterable, try using as a single value.
            exposure_times = {
                float(exposure_times),
            }

    if darks:
        try:
            dark_filenames = set(darks)
        except TypeError:
            dark_filenames = {
                darks,
            }
        dark_dict = {}
        for filename in dark_filenames:
            try:
                dark_data = CCDData.read(filename)
            except ValueError:
                # Might be no units in FITS header. Assume ADU.
                dark_data = CCDData.read(filename, unit='adu')
            dark_dict[dark_data.header['totalexp']] = dark_data

    if combine_type and combine_type not in ('MEAN', 'MEDIAN'):
        raise ValueError(
            "combine_type must be 'MEAN' or 'MEDIAN', got '{}''".format(
                combine_type))

    fitspath = Path(fitspath)
    if fitspath.is_file():
        # FITS path points to a single file, turn into a list.
        filenames = [
            fitspath,
        ]
    elif fitspath.is_dir():
        # FITS path is a directory. Find FITS file and collect values of selected FITS headers
        ifc = ImageFileCollection(fitspath, keywords='*')
        if len(ifc.files) == 0:
            raise RuntimeError("No FITS files found in {}".format(fitspath))
        # Filter by observation type.
        if obstype:
            try:
                ifc = ifc.filter(obstype=obstype)
            except FileNotFoundError:
                raise RuntimeError(
                    "No FITS files with OBSTYPE={}.".format(obstype))
        # Filter by object name.
        if object:
            try:
                ifc = ifc.filter(object=object)
            except FileNotFoundError:
                raise RuntimeError(
                    "No FITS files with OBJECT={}.".format(object))
        filenames = [
            Path(ifc.location).joinpath(filename) for filename in ifc.files
        ]
    else:
        raise ValueError(
            "fitspath '{}' is not an accessible file or directory.".format(
                fitspath))

    # Load image(s) and process them.
    images = []
    for filename in filenames:
        try:
            ccddata = CCDData.read(filename)
        except ValueError:
            # Might be no units in FITS header. Assume ADU.
            ccddata = CCDData.read(filename, unit='adu')
        # Filtering by exposure times here because it's hard filter ImageFileCollection
        # with an indeterminate number of possible values.
        if not exposure_times or ccddata.header['totalexp'] in exposure_times:
            if window:
                ccddata = ccdproc.trim_image(ccddata[window[1]:window[3] + 1,
                                                     window[0]:window[2] + 1])

            if percentile:
                # Check percentile value is within specified range, otherwise skip to next image.
                percentile_value = np.percentile(ccddata.data, percentile)
                if percentile_value < percentile_min or percentile_value > percentile_max:
                    continue

            if darks:
                try:
                    ccddata = ccdproc.subtract_dark(
                        ccddata,
                        dark_dict[ccddata.header['totalexp']],
                        exposure_time='totalexp',
                        exposure_unit=u.second)
                except KeyError:
                    raise RuntimeError(
                        "No dark with matching totalexp for {}.".format(
                            filename))

            if gain:
                if isinstance(gain, str):
                    egain = ccddata.header[gain]
                    egain = egain * u.electron / u.adu
                elif isinstance(gain, u.Quantity):
                    try:
                        egain = gain.to(u.electron / u.adu)
                    except u.UnitsError:
                        egain = (1 / gain).to(u.electron / u.adu)
                else:
                    raise ValueError(
                        f"gain must be a string or Quantity, got {gain}.")

            if readnoise:
                if isinstance(readnoise, str):
                    rn = ccddata.header[readnoise]
                    rn = rn * u.electron
                elif isinstance(readnoise, u.Quantity):
                    try:
                        rn = readnoise.to(u.electron / u.pixel)
                    except u.UnitsError:
                        rn = (readnoise * u.pixel).to(u.electron)
                else:
                    raise ValueError(
                        f"readnoise must be a string or Quantity, got {readnoise}."
                    )

            if gain and readnoise:
                ccddata = ccdproc.create_deviation(ccddata,
                                                   gain=egain,
                                                   readnoise=rn,
                                                   disregard_nan=True)

            if gain:
                ccddata = ccdproc.gain_correct(ccddata, gain=egain)

            if cosmic_ray:
                if not gain and readnoise:
                    raise ValueError(
                        "Cosmic ray removal required both gain & readnoise.")

                ccddata = ccdproc.cosmicray_lacosmic(
                    ccddata,
                    gain=1.0,  # ccddata already gain corrected
                    readnoise=rn,
                    **cosmic_ray_kwargs)

            if normalise:
                ccddata = ccddata.divide(normalise_func(ccddata.data))

            images.append(ccddata)

    n_images = len(images)
    if n_images == 0:
        msg = "No FITS files match exposure time criteria"
        raise RuntimeError(msg)

    if n_images == 1 and combine_type:
        warn(
            "Combine type '{}' selected but only 1 matching image, skipping image combination.'"
        )
        combine_type = None

    if combine_type:
        combiner = Combiner(images)

        # Sigma clip data
        if sigma_clip:
            if combine_type == 'MEAN':
                central_func = np.ma.average
            else:
                # If not MEAN has to be MEDIAN, checked earlier that it was one or the other.
                central_func = np.ma.median
            combiner.sigma_clipping(low_thresh=low_thresh,
                                    high_thresh=high_thresh,
                                    func=central_func)

        # Stack images.
        if combine_type == 'MEAN':
            master = combiner.average_combine()
        else:
            master = combiner.median_combine()

        # Populate header of combined image with metadata about the processing.
        master.header['fitspath'] = str(fitspath)
        if obstype:
            master.header['obstype'] = obstype
        if exposure_times:
            if len(exposure_times) == 1:
                master.header['totalexp'] = float(exposure_times.pop())
            else:
                master.header['totalexp'] = tuple(exposure_times)
        master.header['nimages'] = n_images
        master.header['combtype'] = combine_type
        master.header['sigclip'] = sigma_clip
        if sigma_clip:
            master.header['lowclip'] = low_thresh
            master.header['highclip'] = high_thresh

    else:
        # No image combination, just processing indivudal image(s)
        if n_images == 1:
            master = images[0]
        else:
            master = images

    return master
outdir = sys.argv[2]

if not os.path.isdir(outdir): os.mkdir(outdir)
os.chdir(outdir)

#change this to point to your raw data directory
ic1 = ImageFileCollection(indir)

#create the bias frames
blue_bias_list = []
for filename in ic1.files_filtered(obstype='Bias', isiarm='Blue arm'):
    print ic1.location + filename
    ccd = CCDData.read(ic1.location + filename, unit = u.adu)
    #this has to be fixed as the bias section does not include the whole section that will be trimmed
    ccd = ccdproc.subtract_overscan(ccd, median=True,  overscan_axis=0, fits_section='[1:966,4105:4190]')
    ccd = ccdproc.trim_image(ccd, fits_section=ccd.header['TRIMSEC'] )
    blue_bias_list.append(ccd)
master_bias_blue = ccdproc.combine(blue_bias_list, method='median')
master_bias_blue.write('master_bias_blue.fits', clobber=True)

red_bias_list = []
for filename in ic1.files_filtered(obstype='Bias', isiarm='Red arm'):
    print ic1.location + filename
    ccd = CCDData.read(ic1.location + filename, unit = u.adu)
    #this has to be fixed as the bias section does not include the whole section that will be trimmed
    ccd = ccdproc.subtract_overscan(ccd, median=True,  overscan_axis=0, fits_section='[1:966,4105:4190]')
    ccd = ccdproc.trim_image(ccd, fits_section=ccd.header['TRIMSEC'] )
    red_bias_list.append(ccd)
master_bias_red = ccdproc.combine(red_bias_list, method='median')
master_bias_red.write('master_bias_red.fits', clobber=True)
Esempio n. 19
0
def combine_ccd(fitslist=None,
                summary_table=None,
                table_filecol="file",
                trim_fits_section=None,
                output=None,
                unit='adu',
                subtract_frame=None,
                combine_method='median',
                reject_method=None,
                normalize_exposure=False,
                normalize_average=False,
                exposure_key='EXPTIME',
                mem_limit=2e9,
                combine_uncertainty_function=None,
                extension=0,
                type_key=None,
                type_val=None,
                dtype="float32",
                uncertainty_dtype="float32",
                output_verify='fix',
                overwrite=False,
                verbose=True,
                **kwargs):
    ''' Combining images
    Slight variant from ccdproc.
    # TODO: accept the input like ``sigma_clip_func='median'``, etc.
    Parameters
    ----------
    fitslist: path-like, list of path-like, or list of CCDData
        The list of path to FITS files or the list of CCDData to be
        stacked. It is useful to give list of CCDData if you have
        already stacked/loaded FITS file into a list by your own
        criteria. If ``None`` (default), you must give ``fitslist`` or
        ``summary_table``. If it is not ``None``, this function will do
        very similar job to that of ``ccdproc.combine``. Although it is
        not a good idea, a mixed list of CCDData and paths to the files
        is also acceptable.

    summary_table: pandas.DataFrame or astropy.table.Table
        The table which contains the metadata of files. If there are
        many FITS files and you want to use stacking many times, it is
        better to make a summary table by ``filemgmt.make_summary`` and
        use that instead of opening FITS files' headers every time you
        call this function. If you want to use ``summary_table`` instead
        of ``fitslist`` and have set ``loadccd=True``, you must not have
        ``None`` or ``NaN`` value in the
        ``summary_table[table_filecol]``.

    table_filecol: str
        The column name of the ``summary_table`` which contains the path
        to the FITS files.

    trim_fits_section : str or None, optional
        The ``fits_section`` of ``ccdproc.trim_image``. Region of
        ``ccd`` from which the overscan is extracted; see
        `~ccdproc.subtract_overscan` for details.
        Default is ``None``.

    output : path-like or None, optional.
        The path if you want to save the resulting ``ccd`` object.
        Default is ``None``.

    unit : `~astropy.units.Unit` or str, optional.
        The units of the data.
        Default is ``'adu'``.

    subtract_frame : array-like, optional.
        The frame you want to subtract from the image after the
        combination. It can be, e.g., dark frame, because it is easier
        to calculate Poisson error before the dark subtraction and
        subtract the dark later.
        TODO: This maybe unnecessary.
        Default is ``None``.

    combine_method : str or None, optinal.
        The ``method`` for ``ccdproc.combine``, i.e., {'average',
        'median', 'sum'}
        Default is ``None``.

    reject_method : str
        Made for simple use of ``ccdproc.combine``, [None, 'minmax',
        'sigclip' == 'sigma_clip', 'extrema' == 'ext']. Automatically
        turns on the option, e.g., ``clip_extrema = True`` or
        ``sigma_clip = True``. Leave it blank for no rejection.
        Default is ``None``.

    normalize_exposure : bool, optional.
        Whether to normalize the values by the exposure time of each
        frame before combining.
        Default is ``False``.

    normalize_average : bool, optional.
        Whether to normalize the values by the average value of each
        frame before combining.
        Default is ``False``.

    exposure_key : str, optional
        The header keyword for the exposure time.
        Default is ``"EXPTIME"``.

    combine_uncertainty_function : callable, None, optional
        The uncertainty calculation function of ``ccdproc.combine``. If
        ``None`` use the default uncertainty func when using average,
        median or sum combine, otherwise use the function provided.
        Default is ``None``.

    extension: int or str, optional
        The extension to be used.
        Default is ``0``.

    dtype : str or `numpy.dtype` or None, optional
        Allows user to set dtype. See `numpy.array` ``dtype`` parameter
        description. If ``None`` it uses ``np.float64``.
        Default is ``None``.

    type_key, type_val: str, list of str
        The header keyword for the ccd type, and the value you want to
        match. For an open HDU named ``hdu``, e.g., only the files which
        satisfies ``hdu[extension].header[type_key] == type_val`` among
        all the ``fitslist`` will be used.

    output_verify : str
        Output verification option.  Must be one of ``"fix"``,
        ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``.
        May also be any combination of ``"fix"`` or ``"silentfix"`` with
        ``"+ignore"``, ``+warn``, or ``+exception" (e.g.
        ``"fix+warn"``).  See the astropy documentation below:
        http://docs.astropy.org/en/stable/io/fits/api/verification.html#verify

    mem_limit : float, optional
        Maximum memory which should be used while combining (in bytes).
        Default is ``2.e9``.

    **kwarg:
        kwargs for the ``ccdproc.combine``. See its documentation. This
        includes (RHS are the default values)
        ```
        weights=None,
        scale=None,
        mem_limit=16000000000.0,
        clip_extrema=False,
        nlow=1,
        nhigh=1,
        minmax_clip=False,
        minmax_clip_min=None,
        minmax_clip_max=None,
        sigma_clip=False,
        sigma_clip_low_thresh=3,
        sigma_clip_high_thresh=3,
        sigma_clip_func=<numpy.ma.core._frommethod instance>,
        sigma_clip_dev_func=<numpy.ma.core._frommethod instance>,
        combine_uncertainty_function=None, **ccdkwargs
        ```

    Returns
    -------
    master: astropy.nddata.CCDData
        Resulting combined ccd.
    '''
    def _set_reject_method(reject_method):
        ''' Convenience function for ccdproc.combine reject switches
        '''
        clip_extrema, minmax_clip, sigma_clip = False, False, False

        if reject_method in ['extrema', 'ext']:
            clip_extrema = True
        elif reject_method in ['minmax']:
            minmax_clip = True
        elif reject_method in ['sigma_clip', 'sigclip']:
            sigma_clip = True
        else:
            if reject_method not in [None, 'no']:
                raise KeyError("reject must be one in [None, 'minmax', " +
                               "'sigclip'=='sigma_clip', 'extrema'=='ext']")

        return clip_extrema, minmax_clip, sigma_clip

    def _print_info(combine_method, Nccd, reject_method, **kwargs):
        if reject_method is None:
            reject_method = 'no'

        info_str = ('"{:s}" combine {:d} images by "{:s}" rejection')

        print(info_str.format(combine_method, Nccd, reject_method))
        print(dict(**kwargs))
        return

    # def _normalize_exptime(ccdlist, exposure_key):
    #     _ccdlist = ccdlist.copy()
    #     exptimes = []

    #     for i in range(len(_ccdlist)):
    #         exptime = _ccdlist[i].header[exposure_key]
    #         exptimes.append(exptime)
    #         _ccdlist[i] = _ccdlist[i].divide(exptime)

    #     if verbose:
    #         if len(np.unique(exptimes)) != 1:
    #             print('There are more than one exposure times:\n\t', end=' ')
    #             print(np.unique(exptimes), end=' ')
    #             print('seconds')
    #         print(f'Normalized images by exposure time ("{exposure_key}").')

    #     return _ccdlist

    def _add_and_print(s, header, verbose):
        header.add_history(s)
        if verbose:
            print(s)

    # Give only one
    if ((fitslist is not None) + (summary_table is not None) != 1):
        raise ValueError(
            "One and only one of [fitslist, summary_table] must be given.")

    # If fitslist
    if fitslist is not None:
        try:
            fitslist = list(fitslist)
        except TypeError:
            raise TypeError("fitslist must be convertable to list. " +
                            f"It's now {type(fitslist)}.")

    # If summary_table
    if summary_table is not None:
        if ((not isinstance(summary_table, Table))
                and (not isinstance(summary_table, pd.DataFrame))):
            raise TypeError(
                "summary_table must be an astropy Table or Pandas " +
                f"DataFrame. It's now {type(summary_table)}.")

    # Check for type_key and type_val
    if ((type_key is None) ^ (type_val is None)):
        raise ValueError(
            "type_key and type_val must be both specified or both None.")

    if (output is not None) and (Path(output).exists()):
        if overwrite:
            print(f"{output} already exists:\n\tBut will be overridden.")
        else:
            print(f"{output} already exists:")
            return load_if_exists(output, loader=CCDData.read, if_not=None)

    # Do we really need to accept all three of normalize & scale?
    # if scale is None:
    #     scale = np.ones(len(ccdlist))
    if (((normalize_average) + (normalize_exposure)) > 1):
        raise ValueError("Only up to one of [normalize_average, " +
                         "normalize_exposure] must be not None.")

    # Set history messages
    str_history = ('{:d} images with {:s} = {:s} are "{:s}" combined ' +
                   'using "{:s}" rejection (additional kwargs: {})')
    str_nexp = "Each frame normalized by exposure time before combination."
    str_navg = "Each frame normalized by average value before combination."
    str_subt = "Subtracted a user-provided frame"
    str_trim = "Trim by FITS section {}"

    if reject_method is None:
        reject_method = 'no'

    # Select CCDs by
    ccdlist = stack_FITS(fitslist=fitslist,
                         summary_table=summary_table,
                         table_filecol=table_filecol,
                         extension=extension,
                         unit=unit,
                         type_key=type_key,
                         type_val=type_val,
                         loadccd=False)
    #  trim_fits_section=trim_fits_section,
    # loadccd=False: Loading CCD here may cause memory blast...

    try:
        header = ccdlist[0].header
    except AttributeError:
        header = fits.getheader(ccdlist[0])

    if verbose:
        _print_info(combine_method=combine_method,
                    Nccd=len(ccdlist),
                    reject_method=reject_method,
                    dtype=dtype,
                    **kwargs)

    scale = None
    # Normalize by exposure
    if normalize_exposure:
        tmp = make_summary(fitslist=fitslist,
                           keywords=[exposure_key],
                           verbose=False,
                           sort_by=None)
        exptimes = tmp[exposure_key].tolist()
        scale = 1 / np.array(exptimes)
        _add_and_print(str_nexp, header, verbose)

    # Normalize by pixel average
    if normalize_average:

        def invavg(a):
            return 1 / np.mean(a)

        scale = invavg
        _add_and_print(str_navg, header, verbose)

    # Set rejection switches
    clip_extrema, minmax_clip, sigma_clip = _set_reject_method(reject_method)

    if len(ccdlist) == 1:
        if isinstance(ccdlist[0], CCDData):
            master = ccdlist[0]
        else:
            master = load_ccd(ccdlist[0], extension=extension, unit=unit)
    else:
        master = combine(
            img_list=ccdlist,
            method=combine_method,
            clip_extrema=clip_extrema,
            minmax_clip=minmax_clip,
            sigma_clip=sigma_clip,
            mem_limit=mem_limit,
            combine_uncertainty_function=combine_uncertainty_function,
            unit=unit,
            hdu=extension,
            scale=scale,
            dtype=dtype,
            **kwargs)

    ncombine = len(ccdlist)
    header["COMBVER"] = (ccdproc.__version__,
                         "ccdproc version used for combine.")
    header["NCOMBINE"] = (ncombine, "Number of combined images")
    header["COMBMETH"] = (combine_method, "Combining method")

    header.add_history(
        str_history.format(ncombine, str(type_key), str(type_val),
                           str(combine_method), str(reject_method), kwargs))

    if subtract_frame is not None:
        subtract = CCDData(subtract_frame.copy())
        master.data = master.subtract(subtract).data
        _add_and_print(str_subt, header, verbose)

    if trim_fits_section is not None:
        master = trim_image(master, fits_section=trim_fits_section)
        _add_and_print(str_trim.format(trim_fits_section), header, verbose)

    master.header = header
    master = CCDData_astype(master,
                            dtype=dtype,
                            uncertainty_dtype=uncertainty_dtype)

    if output is not None:
        if verbose:
            print(f"Writing FITS to {output}... ", end='')
        master.write(output, output_verify=output_verify, overwrite=overwrite)
        if verbose:
            print("Saved.")

    return master
Esempio n. 20
0
os.chdir(outdir)

#change this to point to your raw data directory
ic1 = ImageFileCollection(indir)

#create the bias frames
blue_bias_list = []
for filename in ic1.files_filtered(obstype='Bias', isiarm='Blue arm'):
    print ic1.location + filename
    ccd = CCDData.read(ic1.location + filename, unit=u.adu)
    #this has to be fixed as the bias section does not include the whole section that will be trimmed
    ccd = ccdproc.subtract_overscan(ccd,
                                    median=True,
                                    overscan_axis=0,
                                    fits_section='[1:966,4105:4190]')
    ccd = ccdproc.trim_image(ccd, fits_section=ccd.header['TRIMSEC'])
    blue_bias_list.append(ccd)
master_bias_blue = ccdproc.combine(blue_bias_list, method='median')
master_bias_blue.write('master_bias_blue.fits', clobber=True)

red_bias_list = []
for filename in ic1.files_filtered(obstype='Bias', isiarm='Red arm'):
    print ic1.location + filename
    ccd = CCDData.read(ic1.location + filename, unit=u.adu)
    #this has to be fixed as the bias section does not include the whole section that will be trimmed
    ccd = ccdproc.subtract_overscan(ccd,
                                    median=True,
                                    overscan_axis=0,
                                    fits_section='[1:966,4105:4190]')
    ccd = ccdproc.trim_image(ccd, fits_section=ccd.header['TRIMSEC'])
    red_bias_list.append(ccd)
Esempio n. 21
0
# Table Variables Setting
cards = ['DATE-OBS', 'NAXIS1', 'NAXIS2',
         'XPIXSZ','YPIXSZ','XBINNING', 'YBINNING',
         'EXPTIME', 'EGAIN',
         'OBJECT']

dtypes = ['U24', int, int,
          float,float, int, int,
          float, float,
          'U16']

df = input("현재 작업 파일을 그대로 사용하겠습니까? Y/N, 그 외의 모든 입력은 N으로 처리")

if df == "Y":
    for filename in allfitsname:
        ccd = trim_image(CCDData.read(filename,unit=u.adu),fits_section=f"[{x1}:{x2}, {y1}:{y2}]")
        #16bit를 32비트로 만듦
        ccd = yfu.CCDData_astype(ccd,dtype = 'float32')
        filename += 's'
        ccd.write(newfitspath/filename,overwrite=True)    
        #fits로 만들어 저장
    
    
#%%
# Trimming된 새 Data들로 Path와 directory를 바꿈
    
os.chdir(newfitspath)
allfitsname = glob.glob('*.fits')
allfitspath = list(newfitspath.glob('*.fits'))
allfitspath.sort()
allfitsname.sort()
Esempio n. 22
0
)
#parser.add_argument('--gain', dest = 'gain', default= 1.3, help = 'gain in e-/ADU.  default is 1.3, which applies to HDI camera')
#parser.add_argument('--rdnoise', dest = 'rdnoise', default= 7.3, help = 'gain in e-/ADU.  default is 1.3, which applies to HDI camera')

args = parser.parse_args()
files = sorted(glob.glob(args.filestring + '*.fits'))
nfiles = len(files)

poly_model = models.Polynomial1D(1)

for f in files:
    # read in image
    # was having trouble getting image into the format that ccdproc wants
    print 'working on ', f
    # convert data to CCDData format and save header
    ccd = CCDData.read(f, unit='adu')

    # subtract overscan
    o_subtracted = ccdproc.subtract_overscan(ccd,
                                             fits_section=args.irafbiassec,
                                             model=poly_model)
    #header['HISTORY'] = 'overscan subtracted '+args.irafbiassec

    # trim image
    head_updates = {'CCDSEC': args.iraftrimsec, 'BIASSEC': args.irafbiassec}
    trimmed = ccdproc.trim_image(o_subtracted,
                                 fits_section=args.iraftrimsec,
                                 add_keyword=head_updates)

    trimmed.write('tr' + f, overwrite=True)
Esempio n. 23
0
def ccdproc(images,
            master_bias=None,
            master_dark=None,
            master_flat=None,
            masks=None,
            trim=None):
    """
    Perform image reduction (bias, dark and flat corretion) on ccd data.

    Parameters
    ----------
    images : generator, type of 'ccdproc.CCDData'
        Images to be combined.

    master_bias : ccdproc.CCDData
        Master Bias image.

    master_dark : ccdproc.CCDData
        Master Dark image.

    master_flat : ccdproc.CCDData
        Master Flat image.

    masks : str, list of str or optional
        Area to be masked.

    trim : str or optional
        Trim section.

    Yields
    ------
    'ccdproc.CCDData'
        yield the next 'ccdproc.CCDData'.

    Examples
    --------

    >>> from tuglib.io import FitsCollection
    >>> from tuglib.reduction import bias_combine, dark_combine, flat_combine
    >>> from tuglib.reduction import ccdproc
    >>>
    >>> path = '/home/user/data/'
    >>> masks = ['[:, 1023:1025]', '[:1023, 56:58]']
    >>> trim = '[:, 24:2023]'
    >>>
    >>> images = FitsCollection(location=path, gain=0.57, read_noise=4.11))
    >>>
    >>> bias_ccds = images.ccds(OBJECT='BIAS', trim=trim, masks=masks)
    >>> dark_ccds = images.ccds(OBJECT='DARK', trim=trim, masks=masks)
    >>> flat_ccds = images.ccds(OBJECT='FLAT', FILTER='V',
                                trim=trim, masks=masks)
    >>>
    >>> sci_ccds = images.ccds(OBJECT='Star', FILTER='V',
                               trim=trim, masks=masks)
    >>>
    >>> master_bias = bias_combine(bias_ccds, method='median')
    >>> master_dark = dark_combine(dark_ccds, master_bias, method='median')
    >>> master_flat = flat_combine(flat_ccds, master_bias, master_dark)
    >>>
    >>> # Yield a generator which point reduced images.
    >>> reduced_ccds = ccdproc(sci_ccds, master_bias, master_dark, master_flat)
    """

    if not isinstance(images, types.GeneratorType):
        raise TypeError("'images' should be a 'ccdproc.CCDData' object.")

    if not isinstance(master_bias, (type(None), CCDData)):
        raise TypeError(
            "'master_bias' should be 'None' or 'ccdproc.CCDData' object.")

    if not isinstance(master_dark, (type(None), CCDData)):
        raise TypeError(
            "'master_dark' should be 'None' or  'ccdproc.CCDData' object.")

    if not isinstance(master_flat, (type(None), CCDData)):
        raise TypeError(
            "'master_flat' should be 'None' or  'ccdproc.CCDData' object.")

    if masks is not None:
        if not isinstance(masks, (str, list, type(None))):
            raise TypeError(
                "'masks' should be 'str', 'list' or 'None' object.")

    if trim is not None:
        if not isinstance(trim, str):
            raise TypeError("'trim' should be a 'str' object.")

    mask = None

    if master_dark is not None:
        dark_exptime = master_dark.meta['EXPTIME'] * u.second

    for ccd in images:
        if (mask is None) and (masks is not None):
            shape = ccd.shape
            mask = make_mask(shape, masks)
            ccd.mask = mask

        ccd = trim_image(ccd, trim)

        if master_bias is not None:
            ccd = subtract_bias(ccd, master_bias)

        if master_dark is not None:
            data_exptime = ccd.meta['EXPTIME'] * u.second
            ccd = subtract_dark(ccd,
                                master_dark,
                                dark_exposure=dark_exptime,
                                data_exposure=data_exptime,
                                exposure_time='EXPTIME',
                                exposure_unit=u.second,
                                scale=True)

        if master_flat is not None:
            ccd = flat_correct(ccd, master_flat)

        yield ccd
     if fil[-1] == '4':
         sci_full = CCDData(np.concatenate(sci_final, axis=1),
                            header=header,
                            unit=u.electron / u.second)
     elif fil[-1] == '8':
         sci_full = CCDData(np.concatenate([
             sci_final[1],
             np.fliplr(sci_final[0]), sci_final[3],
             np.fliplr(sci_final[2]), sci_final[5],
             np.fliplr(sci_final[4]), sci_final[7],
             np.fliplr(sci_final[6])
         ],
                                           axis=1),
                            header=header,
                            unit=u.electron / u.second)
     sci_full = ccdproc.trim_image(
         sci_full[700:2400, 4100:6100])  #[700:2400,300:7300])
     sci_full.header['DATASEC'] = (
         '[1:%s,1:%s]' %
         (np.shape(sci_full.data)[1], np.shape(sci_full.data)[0]))
     reprojected.append(sci_full)
     reprojected[i].write(sci_file.replace(sci_path, red_path),
                          overwrite=True)
 align = align_stars(reprojected)
 com = [CCDData(x, unit=u.electron / u.second) for x in align]
 sci_med = ccdproc.combine(reprojected,
                           method='median',
                           sigma_clip=True,
                           sigma_clip_func=np.ma.median)
 sci_med.header = reprojected[0].header
 ra = sci_med.header['RA'].split(':')
 dec = sci_med.header['DEC'].split(':')
Esempio n. 25
0
    else:
        biases = im[i]
#if there is just one, make it two of the same for the combine!
if (len(im) == 1):
    biases += ','+im[0]
bias_path += 'master/'
try:
    os.mkdir(bias_path)
except:
    pass    
bias_path += bias_master
logme('Creating master bias frame (%s)...'%(bias_path))
bias = ccdproc.combine(biases, method='median', unit='adu', add_keyword=False)
#trim it, if necessary    
if(len(trim_range) > 0):
    bias = ccdproc.trim_image(bias, trim_range);
#write master frame to file    
hdulist = bias.to_hdu()
hdulist.writeto(bias_path, clobber=True)
    
#dark
#create master dark frame
im=glob.glob(dark_path+'*.fits')+glob.glob(dark_path+'*.fit')
if(len(im) <= 0):
    logme('Error. Dark calibration frame(s) not found (%s).' % dark_path)
    log.close()
    sys.exit(-1)
darks = None
bias_header = None
for i in range(0,len(im)):
    #is (any) dark bias corrected?
Esempio n. 26
0
from astropy.io import fits
import ccdproc
from os import listdir
import numpy as np

images = listdir('./data_corr/i')
for i in images:
	img = ccdproc.CCDData.read('./data_corr/i/'+i, unit='adu')
	trimmed = ccdproc.trim_image(img,fits_section='[150:1800, 150:1800]')
	trimmed.write('./data_corr_trimmed/i/'+i)

images = listdir('./data_corr/v')
for i in images:
	img = ccdproc.CCDData.read('./data_corr/v/'+i, unit='adu')
	trimmed = ccdproc.trim_image(img,fits_section='[150:1800, 150:1800]')
	trimmed.write('./data_corr_trimmed/v/'+i)
Esempio n. 27
0
    def ccds(self, masks=None, trim=None, **kwargs):
        """
        Generator that yields each 'ccdproc.CCDData' objects in the collection.

        Parameters
        ----------
        masks : str, list of str or optional
            Area to be masked.

        trim : str or optional
            Trim section.

        **kwargs :
            Any additional keywords are used to filter the items returned.

        Yields
        ------
        'ccdproc.CCDData'
            yield the next 'ccdproc.CCDData' in the collection.

        Examples
        --------
        >>> from tuglib.io import FitsCollection
        >>>
        >>> mask = '[:, 1000:1046]'
        >>> trim = '[100:1988, :]'
        >>> images = FitsCollection(
                location='/home/user/data/fits/', gain=0.57, read_noise=4.11)
        >>> biases = images.ccds(OBJECT='BIAS', masks=mask, trim=trim)
        """

        if masks is not None:
            if not isinstance(masks, (str, list, type(None))):
                raise TypeError(
                    "'masks' should be 'str', 'list' or 'None' object.")

        if trim is not None:
            if not isinstance(trim, str):
                raise TypeError("'trim' should be a 'str' object.")

        tmp = np.full(len(self._collection), True, dtype=bool)

        if len(kwargs) != 0:
            for key, val in kwargs.items():
                if key == 'filename':
                    file_mask = np.array([
                        fnmatch.fnmatch(filename, kwargs['filename'])
                        for filename in self._filenames_without_path
                    ],
                                         dtype=bool)

                    tmp = tmp & file_mask
                else:
                    tmp = tmp & (self._collection[key.upper()] == val)

        if np.count_nonzero(tmp) == 0:
            yield None

        x = self._collection[tmp]['NAXIS1'][0]
        y = self._collection[tmp]['NAXIS2'][0]
        shape = (y, x)

        mask = None
        if masks is not None:
            mask = make_mask(shape, masks)

        for filename in self._collection[tmp]['filename']:
            ccd = CCDData.read(filename,
                               unit=self._unit,
                               output_verify='silentfix+ignore')

            ccd.mask = mask
            ccd = trim_image(ccd, trim)

            if (self._gain is not None) and (self._read_noise is not None):
                data_with_deviation = create_deviation(
                    ccd,
                    gain=self._gain,
                    readnoise=self._read_noise,
                    disregard_nan=self._disregard_nan)

                gain_corrected = gain_correct(data_with_deviation, self._gain)

                yield gain_corrected
            else:
                yield ccd
Esempio n. 28
0
ind_flat2_2 = (logt['obj']=='flat')*(logt['exp_time']==36)
ind_flat3 = (logt['obj']=='flat')*(logt['exp_time']==144)

flat1_1 = ccdproc.combine(','.join(logt['filename'][ind_flat1_1]), unit='adu', method='median')
flat1_2 = ccdproc.combine(','.join(logt['filename'][ind_flat1_2]), unit='adu', method='median')
flat2_1 = ccdproc.combine(','.join(logt['filename'][ind_flat2_1]), unit='adu', method='median')
flat2_2 = ccdproc.combine(','.join(logt['filename'][ind_flat2_2]), unit='adu', method='median')
flat3 = ccdproc.combine(','.join(logt['filename'][ind_flat3]), unit='adu', method='median')
flat1 = ccdproc.combine([flat1_1.multiply(9.).divide(8.), flat1_2], method='average')
flat2 = ccdproc.combine([flat2_1.multiply(36.).divide(32.), flat2_2], method='average')

flat1_bias = ccdproc.subtract_bias(flat1, bias)
flat2_bias = ccdproc.subtract_bias(flat2, bias)
flat3_bias = ccdproc.subtract_bias(flat3, bias)

flat1_bias_trim = ccdproc.trim_image(flat1_bias[:, :4096])
flat2_bias_trim = ccdproc.trim_image(flat2_bias[:, :4096])
flat3_bias_trim = ccdproc.trim_image(flat3_bias[:, :4096])


""" combine flat """
flat_list = [flat1_bias_trim, flat2_bias_trim, flat3_bias_trim]

# find & combine & group apertures
ap_comb = combine_apertures(flat_list, n_jobs=10)
cheb_coefs, ap_uorder_interp = group_apertures(ap_comb, start_col=2100, order_dist=10)

# combine flat
flat_comb, flat_origin = combine_flat(flat_list, ap_uorder_interp, sat_count=45000, p=95)
flat_comb = ccdproc.CCDData(flat_comb, unit='adu')
Esempio n. 29
0
def stack_FITS(fitslist=None,
               summary_table=None,
               extension=0,
               unit='adu',
               table_filecol="file",
               trim_fits_section=None,
               loadccd=True,
               type_key=None,
               type_val=None):
    ''' Stacks the FITS files specified in fitslist
    Parameters
    ----------
    fitslist: None, list of path-like, or list of CCDData
        The list of path to FITS files or the list of CCDData to be
        stacked. It is useful to give list of CCDData if you have
        already stacked/loaded FITS file into a list by your own
        criteria. If ``None`` (default), you must give ``fitslist`` or
        ``summary_table``. If it is not ``None``, this function will do
        very similar job to that of ``ccdproc.combine``. Although it is
        not a good idea, a mixed list of CCDData and paths to the files
        is also acceptable.

    summary_table: None, pandas.DataFrame or astropy.table.Table
        The table which contains the metadata of files. If there are
        many FITS files and you want to use stacking many times, it is
        better to make a summary table by ``filemgmt.make_summary`` and
        use that instead of opening FITS files' headers every time you
        call this function. If you want to use ``summary_table`` instead
        of ``fitslist`` and have set ``loadccd=True``, you must not have
        ``None`` or ``NaN`` value in the
        ``summary_table[table_filecol]``.

    extension: int or str
        The extension of FITS to be stacked. For single extension, set
        it as 0.

    unit: Unit or str, optional
        The unit of the CCDs to be loaded.
        Used only when ``fitslist`` is not a list of ``CCDData`` and
        ``loadccd`` is ``True``.

    table_filecol: str
        The column name of the ``summary_table`` which contains the path
        to the FITS files.

    trim_fits_section : str or None, optional
        The ``fits_section`` of ``ccdproc.trim_image``. Region of
        ``ccd`` from which the overscan is extracted; see
        `~ccdproc.subtract_overscan` for details.
        Default is ``None``.

    loadccd: bool, optional
        Whether to return file paths or loaded CCDData. If ``False``, it
        is a function to select FITS files using ``type_key`` and
        ``type_val`` without using much memory.
        This is ignored if ``fitslist`` is given and composed of
        ``CCDData`` objects.

    type_key, type_val: str, list of str
        The header keyword for the ccd type, and the value you want to
        match.

    Return
    ------
    matched: list of Path or list of CCDData
        list containing Path to files if ``loadccd`` is ``False``.
        Otherwise it is a list containing loaded CCDData after loading
        the files. If ``ccdlist`` is given a priori, list of CCDData
        will be returned regardless of ``loadccd``.
    '''
    def _parse_val(value):
        val = str(value)
        if val.lstrip('+-').isdigit():  # if int
            result = int(val)
        else:
            try:
                result = float(val)
            except ValueError:
                result = str(val)
        return result

    def _check_mismatch(row):
        mismatch = False
        for k, v in zip(type_key, type_val):
            hdr_val = _parse_val(row[k])
            parse_v = _parse_val(v)
            if (hdr_val != parse_v):
                mismatch = True
                break
        return mismatch

    if ((fitslist is not None) + (summary_table is not None) != 1):
        raise ValueError(
            "One and only one of fitslist or summary_table must " +
            "be not None.")

    # If fitslist
    if fitslist is not None:
        table_mode = False
        try:
            fitslist = list(fitslist)
        except TypeError:
            raise TypeError("fitslist must be convertable to list. " +
                            f"It's now {type(fitslist)}.")

    # If summary_table
    if summary_table is not None:
        table_mode = True
        if ((not isinstance(summary_table, Table))
                and (not isinstance(summary_table, pd.DataFrame))):
            raise TypeError(
                "summary_table must be an astropy Table or Pandas " +
                f"DataFrame. It's now {type(summary_table)}.")

    # Check for type_key and type_val
    type_key, type_val, _ = chk_keyval(type_key=type_key,
                                       type_val=type_val,
                                       group_key=None)

    # Setting whether to group
    grouping = False
    if len(type_key) > 0:
        grouping = True

    print("Analyzing FITS... ", end='')
    # Set fitslist and summary_table based on the given input and grouping.
    if table_mode:
        if isinstance(summary_table, Table):
            summary_table = summary_table.to_pandas()
        fitslist = summary_table[table_filecol].tolist()
    else:
        if grouping:
            summary_table = make_summary(fitslist,
                                         extension=extension,
                                         verbose=True,
                                         fname_option='relative',
                                         keywords=type_key,
                                         sort_by=None,
                                         pandas=True)
        # else: no need to make summary_table.

    print("Done", end='')

    if loadccd:
        print(" and loading FITS... ")
    else:
        print(".")

    matched = []

    # Append appropriate CCDs or filepaths to matched
    if grouping:  # summary_table is used.
        for i, row in summary_table.iterrows():
            mismatch = _check_mismatch(row)
            if mismatch:  # skip this row (file)
                continue

            # if not skipped:
            # TODO: Is it better to remove Path here?
            if isinstance(fitslist[i], CCDData):
                matched.append(fitslist[i])
            else:  # it must be a path to the file
                fpath = Path(fitslist[i])
                if loadccd:
                    ccd_i = load_ccd(fpath, extension=extension, unit=unit)
                    if trim_fits_section is not None:
                        ccd_i = trim_image(ccd_i,
                                           fits_section=trim_fits_section)
                    matched.append(ccd_i)
                else:
                    matched.append(fpath)
    else:  # summary_table is not used.
        for item in fitslist:
            if isinstance(item, CCDData):
                matched.append(item)
            else:
                if loadccd:
                    ccd_i = load_ccd(item, extension=extension, unit=unit)
                    if trim_fits_section is not None:
                        ccd_i = trim_image(ccd_i,
                                           fits_section=trim_fits_section)
                    matched.append(ccd_i)
                else:  # TODO: Is is better to remove Path here?
                    matched.append(Path(item))

    # Generate warning OR information messages
    if len(matched) == 0:
        if grouping:
            warn('No FITS file had "{:s} = {:s}"'.format(
                str(type_key), str(type_val)) +
                 "Maybe int/float/str confusing?")
        else:
            warn('No FITS file found')
    else:
        if grouping:
            N = len(matched)
            ks = str(type_key)
            vs = str(type_val)
            if loadccd:
                print(f'{N} FITS files with "{ks} = {vs}" are loaded.')
            else:
                print(f'{N} FITS files with "{ks} = {vs}" are selected.')
        else:
            if loadccd:
                print('{:d} FITS files are loaded.'.format(len(matched)))

    return matched
Esempio n. 30
0
def correctData(filename, master_bias, master_flat, filetype):
    """
    Correct a science image using the available
    master calibrations. Skip a calibration step if the
    master frame does not exist.

    No reduced file is written in this new scheme.
    Instead, the corrected data is passed directly
    to the phot() routine, photometry is done as per
    the configuration and the photometry is written out
    only.

    TODO: Finish docstring
    """
    print('Reducing {0:s}...'.format(filename))
    with fits.open(filename) as fitsfile:
        # correct times for science spectra,
        # don't bother for arcs
        hdr = fitsfile[0].header
        if filetype == 'science':
            half_exptime = hdr[EXPTIME_KEYWORD]/2.
            utstart = hdr[UTSTART_KEYWORD]
            dateobs = hdr[DATEOBS_KEYWORD]
            ra = hdr[RA_KEYWORD]
            dec = hdr[DEC_KEYWORD]
            time_start = Time('{}T{}'.format(dateobs, utstart),
                              scale='utc',
                              format='isot',
                              location=OBSERVATORY)
            # correct to mid exposure time
            jd_mid = time_start + half_exptime*u.second
            ltt_bary, ltt_helio = getLightTravelTimes(ra, dec, jd_mid)
            time_bary = jd_mid.tdb + ltt_bary
            time_helio = jd_mid.utc + ltt_helio
            hdr['BJD-MID'] = time_bary.jd
            hdr['HJD-MID'] = time_helio.jd
            hdr['JD-MID'] = jd_mid.jd
            hdr['UT-MID'] = jd_mid.isot
    ccd = CCDData.read(filename, unit=u.adu)
    if master_bias:
        ccd = subtract_bias(ccd, master_bias)
    else:
        print('No master bias, skipping correction...')
    if master_flat:
        ccd = flat_correct(ccd, master_flat)
    else:
        print('No master flat, skipping correction...')
    # after calibrating we get np.float64 data
    # if there are no calibrations we maintain dtype = np.uint16
    # sep weeps
    # fix this by doing the following
    if isinstance(ccd.data[0][0], np.uint16):
        ccd.data = ccd.data.astype(np.float64)
    # trim the data
    ccd_trimmed = trim_image(ccd[1000:3001, :])
    # write out the trimmed file and the updated header
    #ccd_trimmed.write(filename, hdr, clobber=True)
    trimmed_filename = '{}_t.fits'.format(filename.split('.')[0])
    fits.writeto(trimmed_filename, ccd_trimmed.data, hdr)
    # remove the old untrimmed data
    os.system('rm {}'.format(filename))
Esempio n. 31
0
def bdf_process(ccd,
                output=None,
                mbiaspath=None,
                mdarkpath=None,
                mflatpath=None,
                fits_section=None,
                calc_err=False,
                unit='adu',
                gain=None,
                rdnoise=None,
                gain_key="GAIN",
                rdnoise_key="RDNOISE",
                gain_unit=u.electron / u.adu,
                rdnoise_unit=u.electron,
                dark_exposure=None,
                data_exposure=None,
                exposure_key="EXPTIME",
                exposure_unit=u.s,
                dark_scale=False,
                min_value=None,
                norm_value=None,
                verbose=True,
                output_verify='fix',
                overwrite=True,
                dtype="float32"):
    ''' Do bias, dark and flat process.
    Parameters
    ----------
    ccd: array-like
        The ccd to be processed.
    output: path-like
        Saving directory
    '''

    proc = CCDData(ccd)
    hdr_new = proc.header

    if mbiaspath is None:
        do_bias = False
        # mbias = CCDData(np.zeros_like(ccd), unit=unit)
    else:
        do_bias = True
        mbias = CCDData.read(mbiaspath, unit=unit)
        hdr_new.add_history(f"Bias subtracted using {mbiaspath}")

    if mdarkpath is None:
        do_dark = False
        mdark = None
    else:
        do_dark = True
        mdark = CCDData.read(mdarkpath, unit=unit)
        hdr_new.add_history(f"Dark subtracted using {mdarkpath}")
        if dark_scale:
            hdr_new.add_history(
                f"Dark scaling {dark_scale} using {exposure_key}")

    if mflatpath is None:
        do_flat = False
        # mflat = CCDData(np.ones_like(ccd), unit=unit)
    else:
        do_flat = True
        mflat = CCDData.read(mflatpath)
        hdr_new.add_history(f"Flat corrected using {mflatpath}")

    if fits_section is not None:
        proc = trim_image(proc, fits_section)
        mbias = trim_image(mbias, fits_section)
        mdark = trim_image(mdark, fits_section)
        mflat = trim_image(mflat, fits_section)
        hdr_new.add_history(f"Trim by FITS section {fits_section}")

    if do_bias:
        proc = subtract_bias(proc, mbias)

    if do_dark:
        proc = subtract_dark(proc,
                             mdark,
                             dark_exposure=dark_exposure,
                             data_exposure=data_exposure,
                             exposure_time=exposure_key,
                             exposure_unit=exposure_unit,
                             scale=dark_scale)
        # if calc_err and verbose:
        #     if mdark.uncertainty is not None:
        #         print("Dark has uncertainty frame: Propagate in arithmetics.")
        #     else:
        #         print("Dark does NOT have uncertainty frame")

    if calc_err:
        if gain is None:
            gain = fu.get_from_header(hdr_new,
                                      gain_key,
                                      unit=gain_unit,
                                      verbose=verbose,
                                      default=1.).value

        if rdnoise is None:
            rdnoise = fu.get_from_header(hdr_new,
                                         rdnoise_key,
                                         unit=rdnoise_unit,
                                         verbose=verbose,
                                         default=0.).value

        err = fu.make_errmap(proc, gain_epadu=gain, subtracted_dark=mdark)

        proc.uncertainty = StdDevUncertainty(err)
        errstr = (f"Error calculated using gain = {gain:.3f} [e/ADU] and " +
                  f"rdnoise = {rdnoise:.3f} [e].")
        hdr_new.add_history(errstr)

    if do_flat:
        if calc_err:
            if (mflat.uncertainty is not None) and verbose:
                print("Flat has uncertainty frame: Propagate in arithmetics.")
                hdr_new.add_history(
                    "Flat had uncertainty and is also propagated.")

        proc = flat_correct(proc,
                            mflat,
                            min_value=min_value,
                            norm_value=norm_value)

    proc = fu.CCDData_astype(proc, dtype=dtype)
    proc.header = hdr_new

    if output is not None:
        proc.write(output, output_verify=output_verify, overwrite=overwrite)

    return proc
def flux_extraction(file_name, path, out_path, images=True):
    """
	Parameters
	----------
	----------
	file_name : str
			Name of the image/telluric file
			from which flux has to be extracted
	path : str
			Path of the desired image file
	out_path : str
			Path of the output data and/or image file
	images : bool
			True if one wants to save visualization of flux data
			False if not.
			Default is True
	----------
	returns
	----------
	flux : data file
			.dat file containing the flux at
			various pixel values
			Path of this file would be similar to
			that of image file.
	----------
	"""
    pt = Path(path)
    f1 = ccdp.ImageFileCollection(pt)
    ccd = CCDData.read(path + file_name)  # + '.fits')

    # Trimming the Image
    trimmed = ccdp.trim_image(ccd, fits_section='[1:256, 100:1000]')
    trimmed.meta['TRIM'] = True
    trimmed.header = ccd.header
    #trimmed.write(file_name + '_trim.fits')

    # Reading the data from Trimmed image
    data = trimmed.data

    # Creating a function to detect the edges of slit
    # For lower edge
    def xlow(raw_data):
        """
		Parameters
		----------
		----------
		raw_data : numpy.ndarray
				Array containing flux at some particular wavelength
		----------
		returns
		----------
		number : float
				A pixel number showing the lower edge of slit
		----------
		"""
        j = 0
        for i in range(int(len(raw_data) / 5)):
            st = np.std(raw_data[j:j + 5])
            xlw = 0
            if st < 2:
                xlw = j
            if xlw != 0:
                break
            j = j + 5
        return xlw

    # For upper edge
    def xup(raw_data):
        """
		Parameters
		----------
		----------
		raw_data : numpy.ndarray
				Array containing flux at some particular wavelength
		----------
		returns
		----------
		number : float
				A pixel number showing the upper edge of slit
		----------
		"""
        j = 255
        for i in range(int(len(raw_data) / 5)):
            st = np.std(raw_data[j - 5:j])
            xup = 0
            if st < 2:
                xup = j
            if xup != 0:
                break
            j = j - 5
        return xup

    # Defining line and inverse line
    def line(x, m, c):
        return m * x + c

    def inv_line(x, m, c):
        bc = (x - c) / m
        return bc

    # Detecting the edges of the spectrum
    ys = np.array([150, 300, 450, 600, 750])
    xs_left = np.array([])
    xs_right = np.array([])
    xs_mid = np.array([])
    for i in range(len(ys)):
        dd1 = data[ys[i]]
        xll = xlow(dd1)
        xs_left = np.hstack((xs_left, xll))
        xuu = xup(dd1)
        xs_right = np.hstack((xs_right, xuu))

    popt_l, pcov_l = cft(line, xs_left, ys)
    popt_r, pcov_r = cft(line, xs_right, ys)

    # Detecting a line where spectrum could reside
    for i in range(len(ys)):
        ran_l = inv_line(ys[i], popt_l[0], popt_l[1])
        ran_r = inv_line(ys[i], popt_r[0], popt_r[1])
        xd1 = data[ys[i]]
        xd = xd1[int(ran_l):int(ran_r)]
        ma = np.max(xd)
        ab = np.where(xd == ma)
        xs_mid = np.hstack((xs_mid, ab[0][0] + ran_l))

    popt_m, pcov_m = cft(line, xs_mid, ys)

    # Finding total flux
    def total_flux(lam, xlim=20):
        ydata = data[lam]
        xmid = inv_line(lam, popt_m[0], popt_m[1])
        xlow = xmid - xlim
        xup = xmid + xlim
        total_flux1 = 0
        xdata = np.arange(int(xlow), int(xup + 1), 1)
        for i in range(len(xdata)):
            total_flux1 = total_flux1 + ydata[xdata[i]]
        return total_flux1

    # Flux as a function of pixel
    flux = np.array([])
    y11 = np.arange(0, 900, 1)
    for i in range(len(y11)):
        f11 = total_flux(y11[i])
        flux = np.hstack((flux, f11))

    # Saving the image file for flux
    if images == True:
        fig1 = plt.figure(figsize=(20, 10))
        plt.plot(flux)
        plt.xlabel('Pixel Number')
        plt.ylabel('Total Flux')
        plt.title('Total flux for ' + file_name + ' observation')
        plt.grid()
        plt.savefig(out_path + '/' + file_name + '_flux.png')
        plt.close(fig1)

    # Saving Data file of the flux
    f1 = open(out_path + '/' + file_name + '_flux.dat', 'w')
    f1.write('#Pixel\t\tFlux\n')
    for i in range(len(y11)):
        f1.write(str(y11[i]) + '\t\t' + str(flux[i]) + '\n')
    f1.close()
Esempio n. 33
0
def hrs_process(image_name, ampsec=[], oscansec=[], trimsec=[],
                masterbias=None, error=False, bad_pixel_mask=None, flip=False,
                rdnoise=None, oscan_median=True, oscan_model=None):
    """Processing required for HRS observations.  If the images have multiple
       amps, then this will process each part of the image and recombine them
       into for the final results

    Parameters
    ----------
    image_name: str
       Name of file to be processed

    ampsec: list
       List of ampsections.  This list should have the same length as the
       number of amps in the data set.  The sections should be given
       in the format of fits_sections (see below).

    oscansec: list
       List of overscan sections.  This list should have the same length as the
       number of amps in the data set.  The sections should be given
       in the format of fits_sections (see below).

    trimsec: list
       List of overscan sections.  This list should have the same length as the
       number of amps in the data set.  The sections should be given
       in the format of fits_sections (see below).

    error: boolean
        If True, create an uncertainty array for ccd

    masterbias: None, `~numpy.ndarray`,  or `~ccdproc.CCDData`
        A materbias frame to be subtracted from ccd.

    bad_pixel_mask: None or `~numpy.ndarray`
        A bad pixel mask for the data. The bad pixel mask should be in given
        such that bad pixels havea value of 1 and good pixels a value of 0.


    flip: boolean
        If True, the image will be flipped such that the orders run from the
        bottom of the image to the top and the dispersion runs from the left
        to the right.

    rdnoise: None or `~astropy.Quantity`
        Read noise for the observations.  The read noise should be in
        `~astropy.units.electron`

    oscan_median :  bool, optional
        If true, takes the median of each line.  Otherwise, uses the mean

    oscan_model :  `~astropy.modeling.Model`, optional
        Model to fit to the data.  If None, returns the values calculated
        by the median or the mean.

    Returns
     -------
    ccd: `~ccdproc.CCDData`
        Data processed and


    Notes
    -----

    The format of the `fits_section` string follow the rules for slices that
    are consistent with the FITS standard (v3) and IRAF usage of keywords like
    TRIMSEC and BIASSEC. Its indexes are one-based, instead of the
    python-standard zero-based, and the first index is the one that increases
    most rapidly as you move through the array in memory order, opposite the
    python ordering.

    The 'fits_section' argument is provided as a convenience for those who are
    processing files that contain TRIMSEC and BIASSEC. The preferred, more
    pythonic, way of specifying the overscan is to do it by indexing the data
    array directly with the `overscan` argument.

    """
    # read in the data
    ccd = ccdproc.CCDData.read(image_name, unit=u.adu)

    try:
        namps = ccd.header['CCDAMPS']
    except KeyError:
        namps = ccd.header['CCDNAMPS']
        
    # thow errors for the wrong number of amps
    if len(ampsec) != namps:
        raise ValueError('Number of ampsec does not equal number of amps')
    if len(oscansec) != namps:
        raise ValueError('Number of oscansec does not equal number of amps')
    if len(trimsec) != namps:
        raise ValueError('Number of trimsec does not equal number of amps')

    if namps == 1:
        gain = float(ccd.header['gain'].split()[0]) * u.electron / u.adu
        nccd = ccd_process(ccd, oscan=oscansec[0], trim=trimsec[0],
                           error=error, masterbias=masterbias,
                           bad_pixel_mask=bad_pixel_mask, gain=gain,
                           rdnoise=rdnoise, oscan_median=oscan_median,
                           oscan_model=oscan_model)
    else:
        ccd_list = []
        xsize = 0
        for i in range(namps):
            cc = ccdproc.trim_image(ccd, fits_section=ampsec[i])

            gain = float(ccd.header['gain'].split()[i]) * u.electron / u.adu
            ncc = ccd_process(cc, oscan=oscansec[i], trim=trimsec[i],
                              error=False, masterbias=None, gain=gain,
                              bad_pixel_mask=None, rdnoise=rdnoise,
                              oscan_median=oscan_median,
                              oscan_model=oscan_model)
            xsize = xsize + ncc.shape[1]
            ysize = ncc.shape[0]
            ccd_list.append(ncc)

        # now recombine the processed data
        ncc = ccd_list[0]
        data = np.zeros((ysize, xsize))
        if ncc.mask is not None:
            mask = np.zeros((ysize, xsize))
        else:
            mask = None
        if ncc.uncertainty is not None:
            raise NotImplementedError(
                'Support for uncertainties not implimented yet')
        else:
            uncertainty = None

        x1 = 0
        for i in range(namps):
            x2 = x1 + ccd_list[i].data.shape[1]
            data[:, x1:x2] = ccd_list[i].data
            if mask is not None:
                mask[:, x1:x2] = ccd_list[i].mask
            x1 = x2

        nccd = ccdproc.CCDData(data, unit=ncc.unit, mask=mask,
                               uncertainty=uncertainty)
        nccd.header = ccd.header
        nccd = ccd_process(nccd, masterbias=masterbias, error=error, gain=None,
                           rdnoise=rdnoise, bad_pixel_mask=bad_pixel_mask)

    if flip:
        nccd.data = nccd.data[::-1, ::-1]
        if (nccd.mask is not None):
            nccd.mask = nccd.mask[::-1, ::-1]
        if (nccd.uncertainty is not None):
            nccd.uncertainty = nccd.uncertainty[::-1, ::-1]

    return nccd
Esempio n. 34
0
from astropy.io import fits
import ccdproc
from os import listdir
import numpy as np

images = listdir('./data_corr/i')
for i in images:
    img = ccdproc.CCDData.read('./data_corr/i/' + i, unit='adu')
    trimmed = ccdproc.trim_image(img, fits_section='[150:1800, 150:1800]')
    trimmed.write('./data_corr_trimmed/i/' + i)

images = listdir('./data_corr/v')
for i in images:
    img = ccdproc.CCDData.read('./data_corr/v/' + i, unit='adu')
    trimmed = ccdproc.trim_image(img, fits_section='[150:1800, 150:1800]')
    trimmed.write('./data_corr_trimmed/v/' + i)
Esempio n. 35
0
def ccd_process(ccd, oscan=None, trim=None, error=False, masterbias=None,
                bad_pixel_mask=None, gain=None, rdnoise=None,
                oscan_median=True, oscan_model=None):
    """Perform basic processing on ccd data.

       The following steps can be included:
        * overscan correction
        * trimming of the image
        * create edeviation frame
        * gain correction
        * add a mask to the data
        * subtraction of master bias

       The task returns a processed `ccdproc.CCDData` object.

    Parameters
    ----------
    ccd: `ccdproc.CCDData`
        Frame to be reduced

    oscan: None, str, or, `~ccdproc.ccddata.CCDData`
        For no overscan correction, set to None.   Otherwise proivde a region
        of `ccd` from which the overscan is extracted, using the FITS
        conventions for index order and index start, or a
        slice from `ccd` that contains the overscan.

    trim: None or str
        For no trim correction, set to None.   Otherwise proivde a region
        of `ccd` from which the image should be trimmed, using the FITS
        conventions for index order and index start.

    error: boolean
        If True, create an uncertainty array for ccd

    masterbias: None, `~numpy.ndarray`,  or `~ccdproc.CCDData`
        A materbias frame to be subtracted from ccd.

    bad_pixel_mask: None or `~numpy.ndarray`
        A bad pixel mask for the data. The bad pixel mask should be in given
        such that bad pixels havea value of 1 and good pixels a value of 0.

    gain: None or `~astropy.Quantity`
        Gain value to multiple the image by to convert to electrons

    rdnoise: None or `~astropy.Quantity`
        Read noise for the observations.  The read noise should be in
        `~astropy.units.electron`


    oscan_median :  bool, optional
        If true, takes the median of each line.  Otherwise, uses the mean

    oscan_model :  `~astropy.modeling.Model`, optional
        Model to fit to the data.  If None, returns the values calculated
        by the median or the mean.

    Returns
    -------
    ccd: `ccdproc.CCDData`
        Reduded ccd

    Examples
    --------

    1. To overscan, trim, and gain correct a data set:

    >>> import numpy as np
    >>> from astropy import units as u
    >>> from hrsprocess import ccd_process
    >>> ccd = CCDData(np.ones([100, 100]), unit=u.adu)
    >>> nccd = ccd_process(ccd, oscan='[1:10,1:100]', trim='[10:100, 1,100]',
                           error=False, gain=2.0*u.electron/u.adu)


    """
    # make a copy of the object
    nccd = ccd.copy()

    # apply the overscan correction
    if isinstance(oscan, ccdproc.CCDData):
        nccd = ccdproc.subtract_overscan(nccd, overscan=oscan,
                                         median=oscan_median,
                                         model=oscan_model)
    elif isinstance(oscan, six.string_types):
        nccd = ccdproc.subtract_overscan(nccd, fits_section=oscan,
                                         median=oscan_median,
                                         model=oscan_model)
    elif oscan is None:
        pass
    else:
        raise TypeError('oscan is not None, a string, or CCDData object')

    # apply the trim correction
    if isinstance(trim, six.string_types):
        nccd = ccdproc.trim_image(nccd, fits_section=trim)
    elif trim is None:
        pass
    else:
        raise TypeError('trim is not None or a string')

    # create the error frame
    if error and gain is not None and rdnoise is not None:
        nccd = ccdproc.create_deviation(nccd, gain=gain, rdnoise=rdnoise)
    elif error and (gain is None or rdnoise is None):
        raise ValueError(
            'gain and rdnoise must be specified to create error frame')

    # apply the bad pixel mask
    if isinstance(bad_pixel_mask, np.ndarray):
        nccd.mask = bad_pixel_mask
    elif bad_pixel_mask is None:
        pass
    else:
        raise TypeError('bad_pixel_mask is not None or numpy.ndarray')

    # apply the gain correction
    if isinstance(gain, u.quantity.Quantity):
        nccd = ccdproc.gain_correct(nccd, gain)
    elif gain is None:
        pass
    else:
        raise TypeError('gain is not None or astropy.Quantity')

    # test subtracting the master bias
    if isinstance(masterbias, ccdproc.CCDData):
        nccd = ccdproc.subtract_bias(nccd, masterbias)
    elif isinstance(masterbias, np.ndarray):
        nccd.data = nccd.data - masterbias
    elif masterbias is None:
        pass
    else:
        raise TypeError(
            'masterbias is not None, numpy.ndarray,  or a CCDData object')

    return nccd
Esempio n. 36
0
    def __call__(self, collection=None, masks=None, trim=None, **kwargs):
        """
        Generator that yields each 'ccdproc.CCDData' objects in the collection.

        Parameters
        ----------
        collection : 'FitsCollection.collection' or optional
            Filtered collection.

        masks : str, list of str or optional
            Area to be masked.

        trim : str or optional
            Trim section.

        **kwargs :
            Any additional keywords are used to filter the items returned.

        Yields
        ------
        'ccdproc.CCDData'
            yield the next 'ccdproc.CCDData' in the collection.

        Examples
        --------
        >>> from tuglib.io import FitsCollection
        >>>
        >>> mask = '[:, 1000:1046]'
        >>> trim = '[100:1988, :]'
        >>>
        >>> images = FitsCollection(
                location='/home/user/data/fits/', gain=0.57, read_noise=4.11)
        >>>
        >>> query = images['EXPTIME'] == 100.0
        >>> sub_collections = images[query]
        >>>
        >>> ccds = images(sub_collections, masks=mask, trim=trim)
        """

        if masks is not None:
            if not isinstance(masks, (str, list, type(None))):
                raise TypeError(
                    "'masks' should be 'str', 'list' or 'None' object.")

        if trim is not None:
            if not isinstance(trim, str):
                raise TypeError("'trim' should be a 'str' object.")

        if collection is None:
            return self.ccds(masks=masks, trim=trim, **kwargs)

        tmp = np.full(len(collection), True, dtype=bool)

        if len(kwargs) != 0:
            for key, val in kwargs.items():
                tmp = tmp & (collection[key] == val)

        x = collection[tmp]['NAXIS1'][0]
        y = collection[tmp]['NAXIS2'][0]
        shape = (y, x)

        mask = None
        if masks is not None:
            mask = make_mask(shape, masks)

        if (self._gain is not None) and (self._read_noise is not None):
            for filename in collection[tmp]['filename']:
                ccd = CCDData.read(filename, unit=self._unit)

                ccd.mask = mask
                ccd = trim_image(ccd, trim)

                data_with_deviation = create_deviation(
                    ccd,
                    gain=self._gain,
                    readnoise=self._read_noise,
                    disregard_nan=self._disregard_nan)

                gain_corrected = gain_correct(data_with_deviation, self._gain)

                yield gain_corrected
        else:
            for filename in collection[tmp]['filename']:
                ccd = CCDData.read(filename, unit=self._unit)

                ccd.mask = mask
                ccd = trim_image(ccd, trim)

                yield ccd
Esempio n. 37
0
def trim_image(fn, trim, logfile):
    print('trimming image')
    print('trimming image with {}'.format(trim), file=logfile)
    ccd = ccdproc.trim_image(ccdproc.CCDData.read(fn)[trim])
    return ccd
Esempio n. 38
0
def analyze_image(imagefile, dark=None, flat=None, box_size=30, medfilt=False,
                  plot=False, seeing=0, pixelscale=0.1798, verbose=False):
    im = reduce_image(imagefile, dark=dark, flat=flat)
    hdul = fits.open(imagefile)

    # Get info about alignment box positions
    alignment_box_table = Table(hdul[4].data)

    if plot == True:
        plt.figure(figsize=(16,6))

    pixels = []
    targets = []
    for i,box in enumerate(alignment_box_table):
        result = None
        slitno = int(box['Slit_Number'])
        bar_nos = slit_to_bars(slitno)
        bar_pos = [hdul[0].header.get(f'B{b:02d}POS') for b in bar_nos]
        box_pos = np.mean(bar_pos)
        box_pix = physical_to_pixel([[box_pos, slitno]])[0]
        boxat = [int(box_pix[0]), int(box_pix[1])]
        fits_section = f'[{boxat[0]-box_size:d}:{boxat[0]+box_size:d}, '\
                       f'{boxat[1]-box_size:d}:{boxat[1]+box_size:d}]'
        region = trim_image(im, fits_section=fits_section)

        targ_pos = float(box['Target_to_center_of_slit_distance'])/pixelscale

        if plot == True:
            plt.subplot(1,len(alignment_box_table),i+1, aspect='equal')
            plt.title(f"Alignment Box {i+1}\n{fits_section}")
            plt.imshow(region.data, origin='lower',
                       vmin=np.percentile(region.data, 85)*0.95,
                       vmax=region.data.max()*1.02)

#         try:
        result = fit_alignment_box(region, box_size=box_size, verbose=False,
                                   seeing=seeing, medfilt=medfilt)
        star_pix = np.array([result['Star X']+boxat[0]-box_size,
                             result['Star Y']+boxat[1]-box_size])
        fitted_box_pix = np.array([result['Box X']+boxat[0]-box_size,
                                   result['Box Y']+boxat[1]-box_size])
        slitang = 0.22*np.pi/180
        targ_pix_im = (result['Box X']-np.sin(slitang)*targ_pos,
                       result['Box Y']+np.cos(slitang)*targ_pos)
        targ_pix = np.array([targ_pix_im[0]+boxat[0]-box_size,
                             targ_pix_im[1]+boxat[1]-box_size])
        pixels.append(list(star_pix))
        targets.append(list(targ_pix))
        pix_err = targ_pix - star_pix
        pos_err = pix_err*pixelscale

        if plot == True:
            cxy = (result['Star X'], result['Star Y'])
            c = plt.Circle(cxy, result['FWHM pix'], linewidth=2, ec='g', fc='none', alpha=0.3)
            ax = plt.gca()
            ax.add_artist(c)
            plt.plot(result['Star X'], result['Star Y'], 'g.')
#             plt.plot(result['Box X'], result['Box Y'], 'y+', alpha=0.5, ms=10)
            plt.plot(targ_pix_im[0], targ_pix_im[1], 'rx', alpha=0.5)

        if verbose:
            print(f"Alignment Box {i+1} results:")
            print(f"  Sky Amplitude: {result['Sky Amplitude']:.0f} ADU")
            print(f"  Star Amplitude: {result['Star Amplitude']:.0f} ADU")
            print(f"  Star FWHM: {result['FWHM arcsec']:.2f}")
            print(f"  Star Position: {star_pix[0]:.1f}, {star_pix[1]:.1f}")
            print(f"  Target Position: {targ_pix[0]:.1f}, {targ_pix[1]:.1f}")
            print(f"  Position Error: {pos_err[0]:+.2f}, {pos_err[1]:+.2f} arcsec")
#         except:
#             print(f'Alignment Box {i+1} failed: {result}')

        if plot == True:
            plt.xticks([], [])
            plt.yticks([], [])

    # Calculate Transformation
    off_Xpix, off_Ypix, off_R, err_R, A = fit_transforms(pixels, targets)

    off_X = off_Xpix * pixelscale
    off_Y = off_Ypix * pixelscale
    th_XY = 0.10
    th_R = 0.030
    send_X = off_X if abs(off_X) > th_XY else 0
    send_Y = off_Y if abs(off_Y) > th_XY else 0
    send_R = off_R if abs(off_R) > th_R else 0
    print()
    print(f"       Calculated   Err  Send        (Threshold)")
    print(f"Offset X =  {off_X:+.2f}       {send_X:+.2f} arcsec ({th_XY:.2f})")
    print(f"Offset Y =  {off_Y:+.2f}       {send_Y:+.2f} arcsec ({th_XY:.2f})")
    print(f"Rotation = {off_R:+.3f} {err_R:.3f} {send_R:+.3f} deg   ({th_R:.3f})")

    if plot == True:
        plt.show()
Esempio n. 39
0
parser.add_argument('--irafbiassec', dest = 'irafbiassec', default= '[4100:4150, 1:4150]', help = 'biassec in iraf notation.  default is [4100:4150, 1:4150], which applies to HDI camera')
parser.add_argument('--iraftrimsec', dest = 'iraftrimsec', default= '[1:4095, 1:4109]', help = 'biassec in iraf notation.  default is [4100:4150, 1:4150], which applies to HDI camera')
#parser.add_argument('--gain', dest = 'gain', default= 1.3, help = 'gain in e-/ADU.  default is 1.3, which applies to HDI camera')
#parser.add_argument('--rdnoise', dest = 'rdnoise', default= 7.3, help = 'gain in e-/ADU.  default is 1.3, which applies to HDI camera')


args = parser.parse_args()
files = sorted(glob.glob(args.filestring+'*.fits'))
nfiles=len(files)

poly_model = models.Polynomial1D(1)

for f in files:
    # read in image
    # was having trouble getting image into the format that ccdproc wants
    print 'working on ',f
    # convert data to CCDData format and save header
    ccd = CCDData.read(f, unit='adu')

    # subtract overscan
    o_subtracted = ccdproc.subtract_overscan(ccd, fits_section = args.irafbiassec, model=poly_model)
    header['HISTORY'] = 'overscan subtracted '+args.irafbiassec

    # trim image
    trimmed = ccdproc.trim_image(o_subtracted, fits_section = args.iraftrimsec)

    head_updates = {'CCDSEC':args.iraftrimsec}
    
    trimmed.write('tr'+f,add_keyword=head_updates)