Beispiel #1
0
def swarp(hdus, reference_hdu, rate, hdu_idx=None, stacking_mode="MEAN"):
    """
    use the WCS to project all image to the 'reference_hdu' shifting the the CRVAL of each image by rate*dt
    :param stacking_mode: what process to use for combining images MEAN or MEDIAN
    :param hdu_idx: which HDU in each HDUList listed in hdus is the ImageData in?
    :param hdus: list of HDUList
    :param reference_hdu: reference HDUList in hdus
    :param rate: dictionary with the ra/dec shift rates.
    :return: fits.HDUList
    """
    # Project the input images to the same grid using interpolation
    if stacking_mode not in ['MEDIAN', 'MEAN']:
        logging.warning(
            f'{stacking_mode} not available for swarp stack. Setting to MEAN')
        stacking_mode = 'MEAN'
    if hdu_idx is None:
        hdu_idx = HSC_HDU_MAP
    reference_date = mid_exposure_mjd(reference_hdu[0])
    stack_input = []
    logging.info(f'stacking at rate/angle set: {rate}')
    ccd_data = {}
    for hdu in hdus:
        wcs_header = hdu[1].header.copy()
        dt = (mid_exposure_mjd(hdu[0]) - reference_date)
        if rate is not None:
            wcs_header['CRVAL1'] += (rate['dra'] * dt)
            wcs_header['CRVAL2'] += (rate['ddec'] * dt)
        for layer in hdu_idx:
            data = hdu[hdu_idx[layer]].data
            if layer == 'variance':
                data = VarianceUncertainty(data)
            elif layer == 'mask':
                data = bitfield_to_boolean_mask(data,
                                                ignore_flags=STACK_MASK,
                                                flip_bits=True)
            ccd_data[layer] = data
        logging.info(f'Adding {hdu[0]} to projected stack.')
        stack_input.append(
            wcs_project(
                CCDData(ccd_data['image'],
                        mask=ccd_data['mask'],
                        header=wcs_header,
                        wcs=WCS(wcs_header),
                        unit='adu',
                        uncertainty=ccd_data['variance']),
                WCS(reference_hdu.header)))
        logging.debug(f'{stack_input[-1].header}')
    if rate is not None:
        combiner = Combiner(stack_input)
        if stacking_mode == 'MEDIAN':
            stacked_image = combiner.median_combine()
        else:
            stacked_image = combiner.average_combine()
        return fits.HDUList([
            fits.PrimaryHDU(header=reference_hdu[0]),
            fits.ImageHDU(data=stacked_image.data,
                          header=reference_hdu[1].header)
        ])
    else:
        return stack_input
def t120_mkflat(flat_dir=t120.t120_flat_dir,
                master_name_root='master',
                master_offset=t120.t120_ofst_dir + t120.t120_master_name):
    # read offset
    hdu_offset_list = fits.open(master_offset)
    offset = CCDData(hdu_offset_list[0].data, unit=u.adu)

    dict_ccd_data = {}
    list_ccd_data = []
    for fit_file in glob.glob(flat_dir + '*.fit'):
        t120.log.info('now opening file: ' + fit_file)
        hdu = fits.open(fit_file)
        filter_name = hdu[0].header['FILTER']
        t120.log.info('filter=' + filter_name)
        if not dict_ccd_data.has_key(filter_name):
            dict_ccd_data[filter_name] = []
        else:
            dict_ccd_data[filter_name].append(
                subtract_overscan(CCDData(hdu[0].data, unit=u.adu), offset))

    t120.log.info('now loop over the filters')
    for filter_name in dict_ccd_data:
        combiner = Combiner(dict_ccd_data[filter_name])
        master_flat = combiner.median_combine()
        hdu = master_flat.to_hdu()
        master_file = flat_dir + master_name_root + '-' + filter_name + '.fits'
        hdu.writeto(master_file, overwrite=True)
        t120.log.info('Master flat saved in ' + master_file)
    return
Beispiel #3
0
    def combine_average(self,
    data_list,
    out_file,
    clippling="none",
    min_clip=0,
    max_clip=0):
        ret = [False, None]
        if self.myof.not_file(out_file):
            all_data = []
            for i in data_list:
                this_data = self.mydata.get_data(i)
                if this_data[0]:
                    all_data.append(CCDData(this_data[1], unit=u.adu))
            if "NONE".startswith(clippling.upper()):
                if len(all_data) > 1:
                    try:
                        combiner = Combiner(all_data)
                        ret = [True, np.asarray(combiner.average_combine())]
                    except Exception as e:
                        self.myetc.print_and_log(e)
                        ret = [False, None]
                else:
                    self.myetc.print_and_log("Not enough data")
                    ret = [False, None]
            elif "MINMAX".startswith(clippling.upper()):
                if len(all_data) > 3:
                    try:
                        combiner = Combiner(all_data)
                        combiner.minmax_clipping(min_clip=min_clip,
                             max_clip=max_clip)
                        a = combiner.average_combine()
                        ret = [True, np.asarray(a)]
                    except Exception as e:
                        self.myetc.print_and_log(e)
                        ret = [False, None]
                else:
                    self.myetc.print_and_log("Not enough data")
                    ret = [False, None]
            elif "SIGMA".startswith(clippling.upper()):
                if len(all_data) > 3:
                    try:
                        combiner = Combiner(all_data)
                        combiner.minmax_clipping(low_thresh=min_clip,
                        high_thresh=max_clip)
                        ret = [True, np.asarray(combiner.average_combine())]
                    except Exception as e:
                        self.myetc.print_and_log(e)
                        ret = [False, None]
                else:
                    self.myetc.print_and_log("Not enough data")
                    ret = [False, None]
            else:
                self.myetc.print_and_log("Unknown clipping method")
                ret = [False, None]
            if ret[0]:
                self.mydata.create(out_file, ret[1], [])

        return ret[0]
Beispiel #4
0
    def test_combine(self):

        ccd1 = CCDData(np.random.normal(size=(10, 10)), unit='adu')
        ccd2 = ccd1.copy()
        ccd3 = ccd1.copy()

        combiner = Combiner([ccd1, ccd2, ccd3])
        combiner.sigma_clipping(low_thresh=2, high_thresh=5)
        combined_data = combiner.median_combine()

        np.testing.assert_equal(combined_data.data, ccd1.data)
Beispiel #5
0
    def _create_stack(self, images_list, stack_name):

        CCD_data_table = [CCDData(im.data, unit='adu') for im in images_list]
        combiner = Combiner(CCD_data_table)
        median = combiner.median_combine()

        master_hdr = self._create_stack_hdr(
            images_list, self.config_section.get('datetime_key'),
            self.config_section.get('jd_key'))

        self.info('Processing stack {} finished'.format(stack_name))
        self._save_stack(median, stack_name, master_hdr)
Beispiel #6
0
    def test_combine_masked(self):

        x = np.random.normal(size=(10, 10))
        x[5, :] = 0
        x = np.ma.masked_where(x == 0, x)

        ccd1 = CCDData(x, unit='adu')
        ccd2 = ccd1.copy()
        ccd3 = ccd1.copy()

        combiner = Combiner([ccd1, ccd2, ccd3])
        combiner.sigma_clipping(low_thresh=2, high_thresh=5)
        combined_data = combiner.median_combine()

        np.testing.assert_equal(combined_data.data, ccd1.data)
	def combinaImagensBias(self, numeroImagens=10):
            newlist, dados, i = [], [], 0
            step = round(len(self.listaImagensBias)/numeroImagens)
            while i < numeroImagens:
                newlist.append(self.cwd + '\\' + self.listaImagensBias[i*step])		
                i+=1
            for img in newlist:
                dados.append(fits.getdata(img, 0))		
            #gera um outro vetor na classe CCDData
            x = []
            for img in dados:
                x.append(CCDData(img, unit = 'adu'))
                i+=1
            combinedImage = Combiner(x)
            combinedImageMedian = combinedImage.average_combine() #average_median
            self.NPcombinedImage = np.asarray(fits.getdata(self.cwd + '\\' + self.listaImagensBias[0])[0])
Beispiel #8
0
    def _computeScienceImage(self):
        print('\n MASTER SCIENCE: \n')
        #        self.sciTrim = self._overscanAndtrim(self.science)
        # TODO: use ccd_process?
        if type(self._science) == list:
            scisCorrected = []
            for sci in self._science:
                darkCorrection = self._subtractDark(sci)
                flatCorrection = self._correctForFlat(darkCorrection)
                skyCorrection = self._subtractSky(flatCorrection)
                #                 sciFinal = self._trimImage(skyCorrection)
                scisCorrected.append(skyCorrection)
            print('Sigma clipping...')
            sciCombiner = Combiner(scisCorrected)
            sciCombiner.sigma_clipping(low_thresh=3.,
                                       high_thresh=3.,
                                       func=np.ma.median,
                                       dev_func=np.ma.std)
            print('Median combine...')
            medianSci = sciCombiner.median_combine()
            mask = self.getBadPixelMask() + medianSci.mask
            print('Getting master science frame...')
            self.masterSci = CCDData(medianSci, mask=mask, unit='adu')
            print('Writing the header...')
            self.masterSci.header = self._science[0].meta
            # TODO: risky header?
#             self.masterSci.header['FRAMETYP'] = \
#                 self._science[0].header['FRAMETYP']
#             self.masterSci.header['OBJECT'] = self._science[0].header['OBJECT']
#             self.masterSci.header['DIT'] = self._science[0].header['DIT']
#             self.masterSci.header['FILTER'] = \
#                 self._science[0].header['FILTER']
#             self.masterSci.header['OBJRA'] = self._science[0].header['OBJRA']
#             self.masterSci.header['OBJDEC'] = self._science[0].header['OBJDEC']
#             self.masterSci.header['DATE'] = self._science[0].header['DATE']
#             self.masterSci.header['GAIN'] = self._science[0].header['GAIN']
        else:
            sci_dark = self._subtractDark(self._science)
            sciFlat = self._correctForFlat(sci_dark)
            print('Getting master science frame...')
            self.masterSci = self._subtractSky(sciFlat)
            print('Writing the header...')
            self.masterSci.header = self._science.header

        if self._unit == 'electron':
            self.masterSci = self._adu2Electron(self.masterSci)
            self.masterSci.header['UNIT'] = 'electrons'
Beispiel #9
0
def create_super_bias(input_images, out_path, super_name):
    first_image = ccdproc.CCDData.read(input_images[0][0], unit='adu')
    last_image = ccdproc.CCDData.read(input_images[-1][-1], unit='adu')
    super_image = []
    super_image_sigma = []
    num = 0
    while len(input_images) > 0:  #I.e., there are chuncks to combine
        inputs = []
        print('SB chunk:  ', num + 1, len(input_images[0]), input_images[0])
        len_input = len(input_images[0])
        for img in range(len_input):
            print(input_images[0][img])
            im = ccdproc.CCDData.read(input_images[0][img], unit='adu')
            im.data = im.data.astype(np.float32)
            inputs.append(im)
            num += 1
        print(inputs[-1])  #show the last one
        combiner = Combiner(inputs)
        combiner.sigma_clipping(low_thresh=2, high_thresh=3, func=np.ma.mean)
        im_temp = combiner.average_combine()
        print(im_temp.data[2][3])
        super_image.append(im_temp)
        combiner = None  #get rid of big data no longer needed.
        inputs = None
        input_images.pop(0)
    #print('SI:  ', super_image)
    #Now we combine the outer data to make the master
    #breakpoint()
    combiner = Combiner(super_image)
    combiner.sigma_clipping(low_thresh=2, high_thresh=3, func=np.ma.mean)
    super_img = combiner.average_combine()
    super_image = None  #Again get rid of big stale data
    combiner = None
    super_img.data = super_img.data.astype(np.float32)
    #Here we should clean up egregious pixels.
    super_img.meta = first_image.meta  #Just pick up first header
    first_image = None
    mn, std = image_stats(super_img)
    super_img.meta['COMBINE'] = (num, 'No of images combined')
    super_img.meta['BSCALE'] = 1.0
    super_img.meta['BZERO'] = 0.0  #NB This does not appear to go into headers.
    super_img.meta['BUNIT'] = 'adu'
    super_img.meta['CNTRMEAN'] = mn
    super_img.meta['CNTRSTD'] = std
    super_img.write(out_path + str(super_name), overwrite=True)
    super_img = None
Beispiel #10
0
def geraArquivo(inputlist, n):
    scidata = []
    #vetor com os dados
    for img in inputlist:
        scidata.append(img)

    #gera um outro vetor na classe CCDData
    x = []
    i = 0
    while i < len(inputlist):
        x.append(CCDData(scidata[i], unit='adu'))
        i += 1

    combinedImage = Combiner(x)
    combinedImageMedian = combinedImage.median_combine()  #average_combine
    NPcombinedImage = np.asarray(combinedImageMedian)
    return NPcombinedImage
def t120_mkoffset(offset_dir=t120.t120_ofst_dir,
                  master_file_name=t120.t120_master_name):
    master_file = offset_dir + master_file_name
    listimg = ImageFileCollection(
        offset_dir)  #,glob_include='*.fit',glob_exclude='*.fits')
    listccd = []
    for ccd, file_name in listimg.ccds(ccd_kwargs={'unit': 'adu'},
                                       return_fname=True):
        t120.log.info('now considering file ' + file_name)
        listccd.append(ccd)

    combiner = Combiner(listccd)
    t120.log.info('now making the combination')
    master_offset = combiner.median_combine()
    fits_ccddata_writer(master_offset, master_file)
    t120.log.info('Result saved in ' + master_file)
    return master_file
Beispiel #12
0
def criaImgBias_Reduction(listaImgBias):
    imgReduce = 0
    bias = []
    for img in listaImgBias:
        bias.append(fits.getdata(img))

    #gera um outro vetor na classe CCDData
    x = []
    i = 0
    while i < len(bias):
        x.append(CCDData(bias[i], unit='adu'))
        i += 1

    combinedImage = Combiner(x)
    combinedImageMedian = combinedImage.average_combine()  #average_median
    NPcombinedImage = np.asarray(combinedImageMedian)
    return NPcombinedImage
Beispiel #13
0
    def _computeDarkImage(self):
        print('\n MASTER DARK: \n')
        #        self.darksTrim = self._overscanAndtrim(self.darks)

        print('Sigma clipping...')
        darksCombiner = Combiner(self._darks)
        darksCombiner.sigma_clipping(low_thresh=3,
                                     high_thresh=3,
                                     func=np.ma.median,
                                     dev_func=np.ma.std)
        print('Median combine...')
        self._darkMedian = darksCombiner.median_combine()
        self._computeBadPixelMaskUsingDarkFrames()
        mask = self.mask + self._darkMedian.mask
        print('Getting master dark frame...')
        self.masterDark = CCDData(self._darkMedian, mask=mask, unit='adu')

        print('Writing the master dark\'s header...')
        self.masterDark.header = self._darks[0].meta
Beispiel #14
0
    def _computeSkyImage(self):
        print('\n MASTER SKY: \n')
        #        self.skyTrim = self._overscanAndtrim(self.skies)
        skiesCorrected = []
        for sky in self._skies:
            skyDark = self._subtractDark(sky)
            skyFlat = self._correctForFlat(skyDark)
            skiesCorrected.append(skyFlat)
        print('Sigma clipping...')
        skyCombiner = Combiner(skiesCorrected)
        skyCombiner.sigma_clipping(low_thresh=3.,
                                   high_thresh=3.,
                                   func=np.ma.median,
                                   dev_func=np.ma.std)
        print('Median combine..')
        medianSky = skyCombiner.median_combine()

        mask = self.getBadPixelMask() + medianSky.mask
        print('Getting master sky frame...')
        self.masterSky = CCDData(medianSky, mask=mask, unit='adu')
        self.masterSky.header = skiesCorrected[0].meta
Beispiel #15
0
def create_super_dark(input_images, oPath, super_name, super_bias_name):
    inputs = []
    print('SD:  ', len(input_images), input_images, super_bias_name)
    super_bias_img = ccdproc.CCDData.read(super_bias_name,
                                          ignore_missing_end=True)
    for img in range(len(input_images)):
        corr_dark = ccdproc.subtract_bias(
            (ccdproc.CCDData.read(input_images[img], unit='adu')),
            super_bias_img)
        im = corr_dark
        im.data = im.data.astype(np.float32)
        im_offset = imageOffset(im, p_median=True)
        im_offset = float(im_offset)
        im.data -= im_offset
        inputs.append(im)
    combiner = Combiner(inputs)
    super_img = combiner.median_combine()
    #mn, std = imageStats(super_img)

    #super_img = super_img.add(100*u.adu)
    super_img.meta = inputs[0].meta
    #super_img.meta['PEDESTAL'] = -100
    super_img.meta['NCOMBINE'] = len(inputs)
    #super_img.meta['CNTRMEAN'] = mn
    #super_img.meta['CNTRSTD'] = std
    s_name = super_name.split('.')
    print('s_name_split:  ', s_name[0])
    tstring = datetime.datetime.now().isoformat().split('.')[0].split(':')
    wstring = str(oPath + '\\' + s_name[0] + '_' + \
                        tstring[0]+tstring[1]+tstring[2] + \
                        '.fits')
    super_img.write(wstring, overwrite=True)

    hots = hot_pixels(super_img)
    print(len(hots), hots)
    '''
    Need to trim negatives, and find hot pixels to create map.
    '''
    return
def t120_mkdark(dark_dir=t120.t120_dark_dir,
                master_offset=t120.t120_ofst_dir + t120.t120_master_name,
                master_file_name=t120.t120_master_name):
    # read offset
    hdu_offset_list = fits.open(master_offset)
    offset = CCDData(hdu_offset_list[0].data, unit=u.adu)

    master_file = dark_dir + master_file_name
    listimg = ImageFileCollection(
        dark_dir)  #,glob_include='*.fit',glob_exclude='*.fits')
    dict_ccd_data = {}
    list_ccd_data = []
    for fit_file in glob.glob(dark_dir + '*.fit'):
        t120.log.info('now opening file: ' + fit_file)
        hdu = fits.open(fit_file)
        exp_time = hdu[0].header['EXPTIME']
        strexptime = "%3.1f" % exp_time
        t120.log.info('EXPTIME=' + str(exp_time) + ' strexptime=' + strexptime)
        if not dict_ccd_data.has_key(strexptime):
            dict_ccd_data[strexptime] = []
        else:
            dict_ccd_data[strexptime].append(
                subtract_overscan(CCDData(hdu[0].data, unit=u.adu), offset))

    t120.log.info('now loop over the exp_time')
    for strexp_time in dict_ccd_data:
        t120.log.info('exp_time: ' + strexp_time)
        combiner = Combiner(dict_ccd_data[strexp_time])
        master_dark = combiner.median_combine()
        master_file = dark_dir + master_file_name.replace(
            '.fits', '') + '-' + strexp_time + '.fits'
        hdu = master_dark.to_hdu()
        #hdu[0].header.set('EXPTIME',value=exp_time,comment='Exposure time in sec')
        #hdu[0].header.set('EXPOSURE',value=exp_time,comment='Exposure time in sec')
        #hdu.writeto(master_file,overwrite=True)
        fits_ccddata_writer(master_dark, master_file)
        t120.log.info('Master dark saved in ' + master_file)
    return
Beispiel #17
0
def create_super_flat(input_images, oPath, super_name, super_bias_name,
                      super_dark_name):
    #NB Should cull low count input frames.
    inputs = []
    print('SF:  ', len(input_images))
    super_bias = ccdproc.CCDData.read(super_bias_name, ignore_missing_end=True)
    super_dark = ccdproc.CCDData.read(super_dark_name, ignore_missing_end=True)
    #super_dark = super_dark.subtract(super_dark.meta['PEDASTAL']*u.adu)

    for img in range(len(input_images)):
        img_in = ccdproc.CCDData.read(input_images[img],
                                      unit='adu',
                                      ignore_missing_end=True)
        bias_corr = ccdproc.subtract_bias(img_in, super_bias)
        print('Hello:  ', super_dark.meta['EXPTIME'], img_in.meta['EXPTIME'],
              type(bias_corr), type(super_dark), img_in.meta)
        corr_flat = ccdproc.subtract_dark(bias_corr, super_dark, scale=True, \
                    dark_exposure=super_dark.meta['EXPTIME']*u.s, \
                    data_exposure =img_in.meta['EXPTIME']*u.s)

        #corr_flat = ccdproc.
        inputs.append(corr_flat)
    combiner = Combiner(inputs)
    super_img = combiner.median_combine()
    super_img.meta = inputs[0].meta

    super_img.meta['NCOMBINE'] = len(inputs)
    s_name = super_name.split('.')
    print('s_name_split:  ', s_name[0])
    tstring = datetime.datetime.now().isoformat().split('.')[0].split(':')
    wstring = str(oPath + '\\' + s_name[0] + '_' + \
                        tstring[0]+tstring[1]+tstring[2] + \
                        '.fits')
    super_img.write(wstring, overwrite=True)

    #Turn the above into a circle region.
    return
Beispiel #18
0
    def _computeFlatImage(self):
        print('\n MASTER FLAT: \n')
        #       self.flatsTrim = self._overscanAndtrim(self.flats)
        print('Dark subtraction...')
        flatsDarkSubtracted = []
        for ima in self._flats:
            imaDarkSub = self._subtractDark(ima)
            flatsDarkSubtracted.append(imaDarkSub)

        print('Sigma clipping...')
        flatCombiner = Combiner(flatsDarkSubtracted)
        flatCombiner.sigma_clipping(low_thresh=3.,
                                    high_thresh=3.,
                                    func=np.ma.median,
                                    dev_func=np.ma.std)
        print('Median combine...')
        medianFlat = flatCombiner.median_combine()
        mask = self.getBadPixelMask() + medianFlat.mask
        print('Getting master flat frame...')
        self.masterFlat = CCDData(medianFlat, mask=mask, unit='adu')

        print('Writing the master flat\'s header...')
        # TODO: risky header?
        self.masterFlat.header = flatsDarkSubtracted[0].meta
def create_master_flat(filepath='../../../KeckData/MOSFIRE_FCS/',
                       flatfiles = ['m180130_0320.fits',
                                    'm180130_0321.fits',
                                    'm180130_0322.fits',
                                    'm180130_0323.fits',
                                    'm180130_0324.fits',],
                       darkfile = 'm180130_0001.fits',
                      ):
    dark = CCDData.read(os.path.join(filepath, darkfile), unit='adu')
    flats = []
    for i,file in enumerate(flatfiles):
        flat = CCDData.read(os.path.join(filepath, file), unit='adu')
        flat = flat.subtract(dark)
        flats.append(flat)

    flat_combiner = Combiner(flats)
    flat_combiner.sigma_clipping()
    scaling_func = lambda arr: 1/np.ma.average(arr)
    flat_combiner.scaling = scaling_func
    masterflat = flat_combiner.median_combine()

    masterflat.write('masterflat.fits', overwrite=True)
Beispiel #20
0
def combine(filelist,
            method,
            clip=None,
            clip_lo=None,
            clip_hi=None,
            norm=False,
            mask=None,
            normw=False):
    '''Combine input files with specified method'''
    if isinstance(filelist, ImageFileCollection):
        filelist = filelist.ccds(ccd_kwargs={'unit': 'adu'})
    elif isinstance(filelist, list):
        filelist = (CCDData.read(fname, unit='adu') for fname in filelist)

    # must convert again. ccd_reader does not process masks
    if normw:
        # wolly masks have form: (top, bottom, total)
        tmask = mask[-1]
    else:
        tmask = mask

    filelist = [CCDData(f, unit=f.unit, mask=tmask) for f in filelist]

    c = Combiner(filelist)

    header = filelist[0].header
    header.add_history(
        '%s - %s' %
        (os.path.basename(__file__), Time(Time.now(), format='fits')))

    # divide by exptime, unless normw specified
    if norm:
        if normw:
            #warn(RuntimeWarning('--norm parameter ignored in --normw mode.  Wolly halves will be normalized to 1.'))
            pass
        else:
            c.scaling = [1. / get_exptime(f.header) for f in filelist]
            header['NORMALZD'] = (True, 'Images normalized by exposure time')
            header.add_history('         - normalized by exptime')
    else:
        header['NORMALZD'] = (False, 'Images normalized by exposure time')

    if clip == 'ccdclip':
        if clip_lo is None and clip_hi is None:
            clip_lo = 10
            clip_hi = 10
            warn(
                RuntimeWarning(
                    '-cval not specified. Default values uses for %s are: %.1f %.1f'
                    % (clip, clip_lo, clip_hi)))
        c.clip_extrema(int(clip_lo), int(clip_hi))
    elif clip == 'minmax':
        if clip_lo is None and clip_hi is None:
            clip_lo = 0.
            clip_hi = 50000.
            warn(
                RuntimeWarning(
                    '-cval not specified. Default values uses for %s are: %.1f %.1f'
                    % (clip, clip_lo, clip_hi)))
        c.minmax_clipping(clip_lo, clip_hi)

    elif clip == 'sigclip':
        if clip_lo is None and clip_hi is None:
            clip_lo = 3.  # default 3 stdev
            clip_hi = 3.
            warn(
                RuntimeWarning(
                    '-cval not specified. Default values uses for %s are: %.1f %.1f'
                    % (clip, clip_lo, clip_hi)))
        c.sigma_clipping(clip_lo, clip_hi)

    elif clip is None:
        pass
    else:
        raise NotImplementedError('clip method "%s" not implemented' % clip)

    if method == 'mean':
        ccd = c.average_combine()
    elif method == 'median':
        ccd = c.median_combine()
    elif method == 'sum':
        ccd = c.sum_combine()
    else:
        raise NotImplementedError('imcombine method %s not implemented' %
                                  method)

    header['NCOMBINE'] = (len(filelist), 'Num images combined')
    header['NMETHOD'] = (method, 'Image stack method')
    header.add_history('         - stacked %i images' % len(filelist))

    if clip:
        header['NCLIPMET'] = (clip, 'Image clip method')
        header['NCLIPLO'] = (clip_lo, 'Clip lo')
        header['NCLIPHI'] = (clip_hi, 'Clip hi')
        header.add_history(
            '         - clipped with method %s, clip_lo=%.2f, clip_hi=%.2f' %
            (clip, clip_lo, clip_hi))

    if normw:
        # normalize by wolly masks separately
        masktop = np.ma.array(ccd.data, mask=mask[0])
        maskbot = np.ma.array(ccd.data, mask=mask[1])
        ccd.data[~mask[0]] /= np.ma.mean(masktop)
        ccd.data[~mask[1]] /= np.ma.mean(maskbot)

        # set all unmasked values to nan (wolly overscan)
        ccd.data[mask[2]] = np.nan
        ccd.mask = None

        header['NORMALWS'] = (True, 'Images normalized to 1 by Wolly split')
        header.add_history('         - normalized to 1 by Wolly split')
        #header['IMAGETYP'] = 'Flat Frame'

    header.add_history('         - %s combined images' % method)

    ccd.header = header
    #return ccd
    return ccd.to_hdu()
Beispiel #21
0
def swarp(hdus, reference_hdu, rate, hdu_idx=None, stacking_mode="MEAN", **kwargs):
    """
    use the WCS to project all image to the 'reference_hdu' shifting the the CRVAL of each image by rate*dt
    :param stacking_mode: what process to use for combining images MEAN or MEDIAN
    :param hdu_idx: which HDU in each HDUList listed in hdus is the ImageData in?
    :param hdus: list of HDUList
    :param reference_hdu: reference HDUList in hdus
    :param rate: dictionary with the ra/dec shift rates.
    :return: fits.HDUList
    """
    # Project the input images to the same grid using interpolation
    # logging.debug(f"Called with {kwargs}")
    if stacking_mode not in ['MEDIAN', 'MEAN']:
        logging.warning(f'{stacking_mode} not available for swarp stack. Setting to MEAN')
        stacking_mode = 'MEAN'
    if hdu_idx is None:
        hdu_idx = HSC_HDU_MAP
    reference_date = mid_exposure_mjd(reference_hdu[0])
    reference_header = kwargs['astheads'][reference_hdu[0].header['IMAGE']]
    reference_wcs = WCS(reference_header)
    stack_input = {}
    logging.info(f'stacking at rate/angle set: {rate}')
    ccd_data = {}
    
    for image in hdus:
        # logging.info(f"Opening {image} to add to stack")
        # with fits.open(image, mode='update') as hdu:
            hdu = hdus[image]
            wcs_header = kwargs['astheads'][hdu[0].header['IMAGE']]
            # wcs_header = hdu[1].header.copy()
            dt = (mid_exposure_mjd(hdu[0]) - reference_date)
            if rate is not None:
                wcs_header['CRVAL1'] -= (rate['dra'] * dt).to('degree').value*numpy.cos(numpy.deg2rad(wcs_header['CRVAL2']))
                wcs_header['CRVAL2'] -= (rate['ddec'] * dt).to('degree').value
            for layer in hdu_idx:
                data = hdu[hdu_idx[layer]].data
                if layer == 'variance':
                    data = VarianceUncertainty(data)
                elif layer == 'mask':
                    data = bitfield_to_boolean_mask(data, ignore_flags=STACK_MASK, flip_bits=True)
                ccd_data[layer] = data
            logging.info(f'Adding {hdu[0].header["IMAGE"]} to projected stack.')
            # reference_header = referece_hdu[1].header
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", category=RuntimeWarning)
                stack_input[image] = wcs_project(CCDData(ccd_data['image'],
                                                         mask=ccd_data['mask'],
                                                         header=wcs_header,
                                                         wcs=WCS(wcs_header),
                                                         unit='adu',
                                                         uncertainty=ccd_data['variance']),
                                                 reference_wcs)

    if rate is not None:
        combiner = Combiner(stack_input.values())
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', category=RuntimeWarning)
            if stacking_mode == 'MEDIAN':
                stacked_image = combiner.median_combine()
            else:
                stacked_image = combiner.average_combine()
        return fits.HDUList([fits.PrimaryHDU(header=reference_hdu[0].header),
                             fits.ImageHDU(data=stacked_image.data, header=reference_header)])
    else:
        return stack_input
Beispiel #22
0
def create_super_dark(input_images, out_path, super_name, super_bias_name):
    first_image = ccdproc.CCDData.read(input_images[0][0], unit='adu')
    last_image = ccdproc.CCDData.read(input_images[-1][-1], unit='adu')
    super_image = []
    super_image_sigma = []
    num = 0
    inputs = []
    print('SD:  ', len(input_images), input_images)
    super_bias_img = ccdproc.CCDData.read(out_path + super_bias_name,
                                          ignore_missing_end=True,
                                          unit='adu')
    while len(input_images) > 0:
        inputs = []
        print('SD chunk:  ', len(input_images[0]), input_images[0])
        len_input = len(input_images[0])
        for img in range(len_input):
            print(input_images[0][img])
            corr_dark = ccdproc.subtract_bias(
                (ccdproc.CCDData.read(input_images[0][img], unit='adu')),
                super_bias_img)
            im = corr_dark
            im.data = im.data.astype(np.float32)
            inputs.append(im)
            num += 1
        combiner = Combiner(inputs)
        if len(inputs) > 9:
            im_temp = combiner.sigma_clipping(low_thresh=2,
                                              high_thresh=3,
                                              func=np.ma.mean)
        else:
            im_temp = combiner.sigma_clipping(low_thresh=2,
                                              high_thresh=3,
                                              func=np.ma.mean)
        im_temp = combiner.average_combine()
        im_temp.data = im_temp.data.astype(np.float32)
        print(im_temp.data[2][3])
        #breakpoint()
        super_image.append(im_temp)
        combiner = None  #get rid of big data no longer needed.
        inputs = None

        input_images.pop(0)
    #Now we combint the outer data to make the master
    combiner = Combiner(super_image)
    if len(super_image) > 9:
        super_img = combiner.sigma_clipping(low_thresh=2,
                                            high_thresh=3,
                                            func=np.ma.mean)
    else:
        super_img = combiner.sigma_clipping(low_thresh=2,
                                            high_thresh=3,
                                            func=np.ma.mean)
    super_img = combiner.average_combine()
    combiner = None
    super_img.data = super_img.data.astype(np.float32)
    super_img.meta = first_image.meta
    mn, std = image_stats(super_img)
    super_img.meta = first_image.meta
    super_img.meta['NCOMBINE'] = num
    super_img.meta['BSCALE'] = 1.0
    super_img.meta['BZERO'] = 0.0  #NB This does not appear to go into headers.
    super_img.meta['BUNIT'] = 'adu'
    super_img.meta['CNTRMEAN'] = mn
    super_img.meta['CNTRSTD'] = std
    wstring = str(out_path + super_name)
    super_img.write(wstring, overwrite=True)
    super_image = None  #Again get rid of big stale data
    #hot and cold pix here.
    return
Beispiel #23
0
def combine(filelist,method,clip=None,clip_lo=None,clip_hi=None,norm=False,mask=None,normw=False):
    '''Combine input files with specified method'''
    if isinstance(filelist,ImageFileCollection):
        filelist = filelist.ccds(ccd_kwargs={'unit':'adu'})
    elif isinstance(filelist,list):
        filelist = (CCDData.read(fname,unit='adu') for fname in filelist)

    # must convert again. ccd_reader does not process masks
    if normw:
        # wolly masks have form: (top, bottom, total)
        tmask = mask[-1]
    else:
        tmask = mask

    filelist = [CCDData(f,unit=f.unit,mask=tmask) for f in filelist]

    c = Combiner(filelist)

    header = filelist[0].header
    header.add_history('%s - %s' % (os.path.basename(__file__),Time(Time.now(),format='fits')))

    # divide by exptime, unless normw specified
    if norm:
        if normw:
            #warn(RuntimeWarning('--norm parameter ignored in --normw mode.  Wolly halves will be normalized to 1.'))
            pass
        else:
            c.scaling = [1./get_exptime(f.header) for f in filelist]
            header['NORMALZD'] = (True,'Images normalized by exposure time')
            header.add_history('         - normalized by exptime')
    else:
        header['NORMALZD'] = (False,'Images normalized by exposure time')

    if clip == 'ccdclip':
        if clip_lo is None and clip_hi is None:
            clip_lo = 10
            clip_hi = 10
            warn(RuntimeWarning('-cval not specified. Default values uses for %s are: %.1f %.1f'% (clip,clip_lo,clip_hi)))
        c.clip_extrema(int(clip_lo),int(clip_hi))
    elif clip == 'minmax':
        if clip_lo is None and clip_hi is None:
            clip_lo = 0.
            clip_hi = 50000.
            warn(RuntimeWarning('-cval not specified. Default values uses for %s are: %.1f %.1f'% (clip,clip_lo,clip_hi)))
        c.minmax_clipping(clip_lo,clip_hi)
        
    elif clip == 'sigclip':
        if clip_lo is None and clip_hi is None:
            clip_lo = 3. # default 3 stdev
            clip_hi = 3.
            warn(RuntimeWarning('-cval not specified. Default values uses for %s are: %.1f %.1f'% (clip,clip_lo,clip_hi)))
        c.sigma_clipping(clip_lo,clip_hi)
        
    elif clip is None:
        pass
    else:
        raise NotImplementedError('clip method "%s" not implemented'%clip)

    
    if method == 'mean':
        ccd = c.average_combine()
    elif method == 'median':
        ccd = c.median_combine()
    elif method == 'sum':
        ccd = c.sum_combine()
    else:
        raise NotImplementedError('imcombine method %s not implemented'%method)
        
    header['NCOMBINE'] = (len(filelist),'Num images combined')
    header['NMETHOD'] = (method,'Image stack method')
    header.add_history('         - stacked %i images' % len(filelist))
    
    if clip:
        header['NCLIPMET'] = (clip,'Image clip method')
        header['NCLIPLO'] = (clip_lo,'Clip lo')
        header['NCLIPHI'] = (clip_hi,'Clip hi')
        header.add_history('         - clipped with method %s, clip_lo=%.2f, clip_hi=%.2f' % (clip,clip_lo,clip_hi))

    if normw:
        # normalize by wolly masks separately
        masktop = np.ma.array(ccd.data, mask = mask[0])
        maskbot = np.ma.array(ccd.data, mask = mask[1])
        ccd.data[~mask[0]] /= np.ma.mean(masktop)
        ccd.data[~mask[1]] /= np.ma.mean(maskbot)

        # set all unmasked values to nan (wolly overscan)
        ccd.data[mask[2]] = np.nan
        ccd.mask = None
        
        header['NORMALWS'] = (True,'Images normalized to 1 by Wolly split')
        header.add_history('         - normalized to 1 by Wolly split')
        #header['IMAGETYP'] = 'Flat Frame'
        
    header.add_history('         - %s combined images' % method)

    ccd.header = header
    #return ccd
    return ccd.to_hdu()
Beispiel #24
0
def create_super_bias(input_images, oPath, super_name):
    num = len(input_images)
    first_image = ccdproc.CCDData.read(input_images[0])
    input_images = chunkify(input_images)
    super_image = []
    while len(input_images) > 0:
        inputs = []
        print('SB:  ', len(input_images[0]), input_images[0], super_name)
        len_input = len(input_images[0])
        for img in range(len_input):
            print(input_images[0][img])
            im = ccdproc.CCDData.read(input_images[0][img])
            im.data = im.data.astype(np.float32)
            im_offset = imageOffset(im, p_median=True)
            im_offset = float(im_offset)
            im.data -= im_offset
            inputs.append(im)  # - im_offset)  #, unit="adu"))
            print('Size of inputs:  ', get_size(inputs), im_offset)
        print(inputs)
        combiner = Combiner(inputs)
        im_temp = combiner.median_combine()

        im_temp.data = im_temp.data.astype(np.float32)

        if len_input > 9:
            super_image.append(im_temp)
        else:
            super_image.append(im_temp)  #Change to sigma-clip
        combiner = None
        inputs = None
        print('Size of inputs:  ', get_size(inputs))
        print('Size of super:  ', get_size(super_image))
        input_images.pop(0)
    print('SI:  ', super_image)
    combiner = Combiner(super_image)
    super_img = combiner.median_combine()
    super_image = None
    combiner = None
    super_img.data = super_img.data.astype(np.float32)
    print('Size of final super data:  ', get_size(super_img.data))
    #    try:
    #        os.mkdir(path[:-9]+ '\\lng\\')
    #
    #    except:
    #        pass
    super_img.meta = first_image.meta  #Just pick up forst header
    first_image = None
    mn, std = imageStats(super_img)
    super_img.meta['COMBINE'] = (num, 'No of images ussed')
    super_img.meta['BSCALE'] = 1.0
    super_img.meta['BZERO'] = 0.0  #NB This does not appear to go into headers.
    super_img.meta['CNTRMEAN'] = mn
    super_img.meta['CNTRSTD'] = std
    super_img.write(oPath + str(super_name), overwrite=True)

    #    s_name = str(super_name).split('\\')
    #    print('s_name_split:  ', s_name)
    s_name = super_name.split('.')
    print('s_name_split:  ', s_name[0])
    tstring = datetime.datetime.now().isoformat().split('.')[0].split(':')
    wstring = str(oPath + '\\' + s_name[0] + '_' + \
                        tstring[0]+tstring[1]+tstring[2] + \
                        '.fits')
    print('wstring:  ', str(super_name), wstring)
    print('Size of final super mata:  ', get_size(super_img.meta))
    super_img.write(wstring, overwrite=True)  #this is per day dir copy
    super_img = None
    #makeLng(path[:-9]+ '\\lng', s_name[0])
    '''
    Need to combine temperatures and keep track of them.
    Need to form appropriate averages.

    The above is a bit sloppy.  We should writ the per day dir version of the
    superbias first, then based on if there exists a prior lng superbias that
    a new combined weighted bias is created.  The prior N <=4 days biases are
    kept then aged (1*5 + 2*4 + 3*3 + 4*2 + 5*1)/16

    Need to examine for hot pixels and hot columns and make entries in the 1:!
    resolution bad pixel mask.
    '''
    return
Beispiel #25
0
def process_fits(fitspath,
                 *,
                 obstype=None,
                 object=None,
                 exposure_times=None,
                 percentile=None,
                 percentile_min=None,
                 percentile_max=None,
                 window=None,
                 darks=None,
                 cosmic_ray=False,
                 cosmic_ray_kwargs={},
                 gain=None,
                 readnoise=None,
                 normalise=False,
                 normalise_func=np.ma.average,
                 combine_type=None,
                 sigma_clip=False,
                 low_thresh=3,
                 high_thresh=3):
    """Combine all FITS images of a given type and exposure time from a given directory.

    Parameters
    ----------
    fitspath: str
        Path to the FITS images to process. Can be a path to a single file, or a path to a
        directory. If the latter the directory will be searched for FITS files and checked
        against criteria from obstype, object, exposure_times critera.
    obstype: str, optional
        Observation type, an 'OBSTYPE' FITS header value e.g. 'DARK', 'OBJ'. If given only files
        with matching OBSTYPE will be processed.
    object: str, optional
        Object name, i.e. 'OBJECT' FITS header value. If given only files with matching OBJECT
        will be processed.
    exposure_times: float or sequence, optional
        Exposure time(s), i.e 'TOTALEXP' FITS header value(s). If given only files with matching
        TOTALEXP will be processed.
    percentile: float, optional
        If given will only images whose percentile value fall between percentile_min and
        percentile_max will be processed, e.g. set to 50.0 to select images by median value,
        set to 99.5 to select images by their 99.5th percentile value.
    percentile_min: float, optional
        Minimum percentile value.
    percentile_max: float, optional
        Maximum percentil value.
    window: (int, int, int, int), optional
        If given will trim images to the window defined as (x0, y0, x1, y1), where (x0, y0)
        and (x1, y1) are the coordinates of the bottom left and top right corners.
    darks: str or sequence, optional
        Filename(s) of dark frame(s) to subtract from the image(s). If given a dark frame with
        matching TOTALEXP will be subtracted from each image during processing.
    cosmic_ray: bool, optional
        Whether to perform single image cosmic ray removal, using the lacosmic algorithm,
        default False. Requires both gain and readnoise to be set.
    cosmic_ray_kwargs: dict, optional
        Additional keyword arguments to pass to the ccdproc.cosmicray_lacosmic function.
    gain: str or astropy.units.Quantity, optional
        Either a string indicating the FITS keyword corresponding to the (inverse gain), or
        a Quantity containing the gain value to use. If both gain and read noise are given
        an uncertainty frame will be created.
    readnoise: str or astropy.units.Quantity, optional
        Either a string indicating the FITS keyword corresponding to read noise, or a Quantity
        containing the read noise value to use. If both read noise and gain are given then an
        uncertainty frame will be created.
    normalise: bool, optional
        If True each image will be normalised. Default False.
    normalise_func: callable, optional
        Function to use for normalisation. Each image will be divided by normalise_func(image).
        Default np.ma.average.
    combine_type: str, optional
        Type of image combination to use, 'MEAN' or 'MEDIAN'. If None the individual
        images will be processed but not combined and the return value will be a list of
        CCDData objects. Default None.
    sigma_clip: bool, optional
        If True will perform sigma clipping on the image stack before combining, default=False.
    low_thresh: float, optional
        Lower threshold to use for sigma clipping, in standard deviations. Default is 3.0.
    high_thresh: float, optional
        Upper threshold to use for sigma clipping, in standard deviations. Default is 3.0.


    Returns
    -------
    master: ccdproc.CCDData
        Combined image.

    """
    if exposure_times:
        try:
            # Should work for any sequence or iterable type
            exposure_times = set(exposure_times)
        except TypeError:
            # Not a sequence or iterable, try using as a single value.
            exposure_times = {
                float(exposure_times),
            }

    if darks:
        try:
            dark_filenames = set(darks)
        except TypeError:
            dark_filenames = {
                darks,
            }
        dark_dict = {}
        for filename in dark_filenames:
            try:
                dark_data = CCDData.read(filename)
            except ValueError:
                # Might be no units in FITS header. Assume ADU.
                dark_data = CCDData.read(filename, unit='adu')
            dark_dict[dark_data.header['totalexp']] = dark_data

    if combine_type and combine_type not in ('MEAN', 'MEDIAN'):
        raise ValueError(
            "combine_type must be 'MEAN' or 'MEDIAN', got '{}''".format(
                combine_type))

    fitspath = Path(fitspath)
    if fitspath.is_file():
        # FITS path points to a single file, turn into a list.
        filenames = [
            fitspath,
        ]
    elif fitspath.is_dir():
        # FITS path is a directory. Find FITS file and collect values of selected FITS headers
        ifc = ImageFileCollection(fitspath, keywords='*')
        if len(ifc.files) == 0:
            raise RuntimeError("No FITS files found in {}".format(fitspath))
        # Filter by observation type.
        if obstype:
            try:
                ifc = ifc.filter(obstype=obstype)
            except FileNotFoundError:
                raise RuntimeError(
                    "No FITS files with OBSTYPE={}.".format(obstype))
        # Filter by object name.
        if object:
            try:
                ifc = ifc.filter(object=object)
            except FileNotFoundError:
                raise RuntimeError(
                    "No FITS files with OBJECT={}.".format(object))
        filenames = [
            Path(ifc.location).joinpath(filename) for filename in ifc.files
        ]
    else:
        raise ValueError(
            "fitspath '{}' is not an accessible file or directory.".format(
                fitspath))

    # Load image(s) and process them.
    images = []
    for filename in filenames:
        try:
            ccddata = CCDData.read(filename)
        except ValueError:
            # Might be no units in FITS header. Assume ADU.
            ccddata = CCDData.read(filename, unit='adu')
        # Filtering by exposure times here because it's hard filter ImageFileCollection
        # with an indeterminate number of possible values.
        if not exposure_times or ccddata.header['totalexp'] in exposure_times:
            if window:
                ccddata = ccdproc.trim_image(ccddata[window[1]:window[3] + 1,
                                                     window[0]:window[2] + 1])

            if percentile:
                # Check percentile value is within specified range, otherwise skip to next image.
                percentile_value = np.percentile(ccddata.data, percentile)
                if percentile_value < percentile_min or percentile_value > percentile_max:
                    continue

            if darks:
                try:
                    ccddata = ccdproc.subtract_dark(
                        ccddata,
                        dark_dict[ccddata.header['totalexp']],
                        exposure_time='totalexp',
                        exposure_unit=u.second)
                except KeyError:
                    raise RuntimeError(
                        "No dark with matching totalexp for {}.".format(
                            filename))

            if gain:
                if isinstance(gain, str):
                    egain = ccddata.header[gain]
                    egain = egain * u.electron / u.adu
                elif isinstance(gain, u.Quantity):
                    try:
                        egain = gain.to(u.electron / u.adu)
                    except u.UnitsError:
                        egain = (1 / gain).to(u.electron / u.adu)
                else:
                    raise ValueError(
                        f"gain must be a string or Quantity, got {gain}.")

            if readnoise:
                if isinstance(readnoise, str):
                    rn = ccddata.header[readnoise]
                    rn = rn * u.electron
                elif isinstance(readnoise, u.Quantity):
                    try:
                        rn = readnoise.to(u.electron / u.pixel)
                    except u.UnitsError:
                        rn = (readnoise * u.pixel).to(u.electron)
                else:
                    raise ValueError(
                        f"readnoise must be a string or Quantity, got {readnoise}."
                    )

            if gain and readnoise:
                ccddata = ccdproc.create_deviation(ccddata,
                                                   gain=egain,
                                                   readnoise=rn,
                                                   disregard_nan=True)

            if gain:
                ccddata = ccdproc.gain_correct(ccddata, gain=egain)

            if cosmic_ray:
                if not gain and readnoise:
                    raise ValueError(
                        "Cosmic ray removal required both gain & readnoise.")

                ccddata = ccdproc.cosmicray_lacosmic(
                    ccddata,
                    gain=1.0,  # ccddata already gain corrected
                    readnoise=rn,
                    **cosmic_ray_kwargs)

            if normalise:
                ccddata = ccddata.divide(normalise_func(ccddata.data))

            images.append(ccddata)

    n_images = len(images)
    if n_images == 0:
        msg = "No FITS files match exposure time criteria"
        raise RuntimeError(msg)

    if n_images == 1 and combine_type:
        warn(
            "Combine type '{}' selected but only 1 matching image, skipping image combination.'"
        )
        combine_type = None

    if combine_type:
        combiner = Combiner(images)

        # Sigma clip data
        if sigma_clip:
            if combine_type == 'MEAN':
                central_func = np.ma.average
            else:
                # If not MEAN has to be MEDIAN, checked earlier that it was one or the other.
                central_func = np.ma.median
            combiner.sigma_clipping(low_thresh=low_thresh,
                                    high_thresh=high_thresh,
                                    func=central_func)

        # Stack images.
        if combine_type == 'MEAN':
            master = combiner.average_combine()
        else:
            master = combiner.median_combine()

        # Populate header of combined image with metadata about the processing.
        master.header['fitspath'] = str(fitspath)
        if obstype:
            master.header['obstype'] = obstype
        if exposure_times:
            if len(exposure_times) == 1:
                master.header['totalexp'] = float(exposure_times.pop())
            else:
                master.header['totalexp'] = tuple(exposure_times)
        master.header['nimages'] = n_images
        master.header['combtype'] = combine_type
        master.header['sigclip'] = sigma_clip
        if sigma_clip:
            master.header['lowclip'] = low_thresh
            master.header['highclip'] = high_thresh

    else:
        # No image combination, just processing indivudal image(s)
        if n_images == 1:
            master = images[0]
        else:
            master = images

    return master
Beispiel #26
0
    prepath.mkdir()
else:
    print("Prepath 저장 폴더가 이미 있어서 새로 만들지 않음")

if os.path.exists(prepath / bias_fname):
    mbias = fits.getdata(prepath / bias_fname)
    print("이전에 만든 bias 사용")
else:
    images = []
    for i in range(len(biastab)):
        cc = CCDData(fits.getdata(biastab[i]['FILE']), unit=u.adu)

        cc = yfu.CCDData_astype(cc, dtype='float32')
        images.append(cc)

    cc = Combiner(images)

    mbias = cc.median_combine()
    cc = fits.getheader(biastab[0]['FILE'])

    mbias.header = cc
    mbias.header.add_history(
        f"{len(biastab)} image(s) median combined bias frames")
    mbias = yfu.CCDData_astype(mbias, dtype='float32')
    mbias.write(prepath / bias_fname, overwrite=True)
#%%
mdark_fdic = {}
for k in range(len(exptlist)):
    if os.path.exists(prepath / dark_fdic[exptlist[k]]):
        mdark_fdic[exptlist[k]] = fits.getdata(prepath /
                                               dark_fdic[exptlist[k]])