Beispiel #1
0
def flat_file_by_file(fname, flatname, datahdus=0):
    hdulist = fits.open(fname)
    hduflat = fits.open(flatname)
    nhdus = len(hdulist)
    if nhdus > 1:
        istart = 1
    else:
        istart = 0
    hduindexes = list(range(nhdus))[istart:]
    if datahdus != 0:
        hduindexes = datahdus
    for i in hduindexes:
        data1 = ccdproc.CCDData(hdulist[i].data, unit="adu")
        data1.header = hdulist[i].header
        flat1 = ccdproc.CCDData(hduflat[i].data, unit="adu")
        flat1.header = hduflat[i].header
        if i == 1:
            flatscale = np.mean(flat1)
        # flat1 = flat1/flatscale
        commentstr = "Flat image is " + flatname + " with scale" + str(
            flatscale)
        proc1 = ccdproc.flat_correct(data1,
                                     flat1,
                                     add_keyword={
                                         'flat': True,
                                         'calstat': 'OTZF',
                                         'history': commentstr
                                     })
        fits.update(fname, proc1.data, header=proc1.header, ext=i)
        # fits.update(fname, proc1.data, ext=i)
    hdulist.close()
    hduflat.close()
    mylog("Flat corrected {0} with {1}".format(fname, flatname))
    return
Beispiel #2
0
def blueprocess(hdu):
    """Pre-processing necessary for blue files
 
    """
    #first thing first, split the images
    rccd = ccdproc.CCDData(1.0 * hdu[0].data[:, 0:1050], unit=u.adu)
    lccd = ccdproc.CCDData(1.0 * hdu[0].data[:, 1050:], unit=u.adu)

    #get the gain values
    gain = hdu[0].header['GAIN'].split()
    #print gain

    #process the data
    rccd = process(rccd, float(gain[0]), rccd[:, 0:26], '[27:,:]')
    lccd = process(lccd, float(gain[1]), lccd[:, -26:], '[:1024,:]')

    #put it back together
    ry, rx = rccd.shape
    ly, lx = lccd.shape
    shape = (ry, rx + lx)
    data = np.zeros(shape)
    data[:, 0:rx] = rccd.data
    data[:, rx:rx + lx] = lccd.data

    hdu[0].data = data[::-1, ::-1]
    return hdu
Beispiel #3
0
def bias_file_by_file(fname, biasname, datahdus=0):
    hdulist = fits.open(fname)
    hdubias = fits.open(biasname)
    nhdus = len(hdulist)
    if nhdus > 1:
        istart = 1
    else:
        istart = 0
    hduindexes = list(range(nhdus))[istart:]
    if datahdus != 0:
        hduindexes = datahdus
    for i in hduindexes:
        data1 = ccdproc.CCDData(hdulist[i].data, unit="adu")
        data1.header = hdulist[i].header
        bias1 = ccdproc.CCDData(hdubias[i].data, unit="adu")
        bias1.header = hdubias[i].header
        commentstr = "Bias image is " + biasname
        # proc1 = ccdproc.subtract_bias(data1,bias1,add_keyword={'bias': True, 'calstat': 'OTZ', 'history':commentstr} )
        proc1 = ccdproc.subtract_bias(data1,
                                      bias1,
                                      add_keyword={
                                          'bias': True,
                                          'calstat': 'OTZ'
                                      })
        fits.update(fname, proc1.data, header=proc1.header, ext=i)
        # fits.update(fname, proc1.data, ext=i)
    hdulist.close()
    hdubias.close()
    mylog("Bias corrected {0} with {1}".format(fname, biasname))
    return
Beispiel #4
0
def merge_m4k_one_img(hdulist):
    hdulist = fits.open(fname)
    data1 = ccdproc.CCDData(hdulist[1].data, unit="adu")
    data1.header = hdulist[1].header
    data2 = ccdproc.CCDData(hdulist[2].data, unit="adu")
    merged = np.concatenate((data1, np.fliplr(data2)), axis=1)
    # assume we don't have to change any WCS parameters from ext 1
    hdu_new = fits.PrimaryHDU(merged)
    hdu_new.header = hdulist[0].header
    hdulist_new = fits.HDUList([hdu_new])

    return hdulist_new
Beispiel #5
0
def merge_fitsfd(hdulist):

    data1 = ccdproc.CCDData(hdulist[1].data, unit="adu")
    data1.header = hdulist[1].header
    data2 = ccdproc.CCDData(hdulist[2].data, unit="adu")
    merged = np.concatenate((data1, np.fliplr(data2)), axis=1)
    # assume we don't have to change any WCS parameters from ext 1
    hdu_new = fits.PrimaryHDU(merged)
    hdu_new.header = hdulist[0].header
    hdulist_new = fits.HDUList([hdu_new])
    # Use basename to keep from breaking if the names in imagelist are
    # full path rather than just a file

    return hdulist_new
Beispiel #6
0
    def make_image_real(self, noiseless, exp_time, subtract_dark=False):
        """
        Given a noiseless simulated image in electrons per pixel add dark current,
        Poisson noise and read noise, and convert to ADU using the predefined gain.
        """
        # Scale photoelectron rates by exposure time
        data = noiseless.data * noiseless.unit * exp_time
        # Add dark current
        data += self.dark_frame * exp_time
        # Force to electron units
        data = data.to(u.electron)
        # Apply Poisson noise. This is not unit-aware, need to restore them manually
        data = (poisson.rvs(data / u.electron)).astype('float64') * u.electron
        # Apply read noise. Again need to restore units manually
        data += norm.rvs(scale=self.read_noise / u.electron,
                         size=data.shape) * u.electron
        # Optionally subtract a Perfect Dark
        if subtract_dark:
            data -= (self.dark_frame * exp_time).to(u.electron)
        # Convert to ADU
        data /= self.gain
        # Force to adu (just here to catch unit problems)
        data = data.to(u.adu)
        # 'Analogue to digital conversion'
        data = np.where(data < 2**16 * u.adu, data, (2**16 - 1) * u.adu)
        data = data.astype('uint16')
        # Data type conversion strips units so need to put them back manually
        image = ccdproc.CCDData(data, wcs=noiseless.wcs, unit=u.adu)
        image.header['EXPTIME'] = exp_time
        image.header['DARKSUB'] = subtract_dark

        return image
Beispiel #7
0
	def combine_flats(self, flat_list, master_dark, method="median"):
		"""Combine and reduce a series of flat frames into a normalized flatfield via CCDPROC"""

		if method == "median":
			print("Combining flats by median")
			combined_flat = ccdproc.combine(flat_list, method="median", unit="adu", mem_limit=6e9)

		elif method == "mean":
			print("Combining flats by mean")
			combined_flat = ccdproc.combine(flat_list, method="mean", unit="adu", mem_limit=6e9)

		else:
			print("Combining flats by median")
			combined_flat = ccdproc.combine(flat_list, method="median", unit="adu", mem_limit=6e9)

		print("Subtracting master dark from combined flat")
		master_flat = ccdproc.subtract_dark(combined_flat, master_dark, data_exposure=combined_flat.header["exposure"]*u.second, dark_exposure=master_dark.header["exposure"]*u.second, scale=True)

		print("Reading master flat data")
		master_flat_data = np.asarray(master_flat)

		print("Creating normalized flatfield")
		flatfield_data = master_flat_data / np.mean(master_flat_data)

		print("Converting flatfield data to CCDData")
		flatfield = ccdproc.CCDData(flatfield_data, unit="adu")

		return flatfield
Beispiel #8
0
    def setUp(self):
        #setting different input objects
        self.ndarrayformat = 'numpy_array'
        self.ndarray_img_obj = np.empty((100, 100))

        self.ccddataformat = 'CCDData'
        self.ccddata_img_obj = ccdproc.CCDData(data=self.ndarray_img_obj,
                                               unit='adu')
Beispiel #9
0
    def make_master_bias(self, list_of_biasfiles):
        bias_list = [0]*len(list_of_biasfiles)
        for ii, kk in enumerate(list_of_biasfiles):
            fitsfile = fits.open(kk)
            bias = ccdproc.CCDData(data=fitsfile[1].data, meta=fitsfile[1].header, unit="adu")
            bias_list[ii] = ccdproc.subtract_overscan(bias, fits_section='[5:35, :]')

        self.master_bias = ccdproc.combine(bias_list, method="median")
        self.master_bias.write(self.output_dir+"/"+self.filename+"_masterbias.fits", overwrite=True)
Beispiel #10
0
 def _createLists(self, **kwargs):
     self.imas_list = []
     self.names_list = []
     for ima, fname in self._getImageCollection().hdus(return_fname=True,
                                                       **kwargs):
         meta = ima.header
         meta['filename'] = fname
         self.imas_list.append((ccdproc.CCDData(data=ima.data,
                                                meta=meta,
                                                unit="adu")))
         self.names_list.append(fname)
Beispiel #11
0
def write_image(im,
                filename=None,
                dir_work="",
                prefix="_twodspec_",
                suffix=".fits",
                delete=False):
    if filename is None:
        # tempfile in dir_work
        tf_im = NamedTemporaryFile(dir=dir_work,
                                   prefix=prefix,
                                   suffix=suffix,
                                   delete=delete)
        ccdproc.CCDData(im, unit='adu').write(tf_im.name, clobber=True)
        print("writing to {}".format(tf_im.name))
        tf_im.close()
        return tf_im.name
    else:
        # filename
        ccdproc.CCDData(im, unit='adu').write(filename, clobber=True)
        print("writing to {}".format(filename))
        return filename
Beispiel #12
0
    def make_science(self, list_of_sciencefiles):


        science_list = [0]*len(list_of_sciencefiles)
        science_names = [0]*len(list_of_sciencefiles)
        for ii, kk in enumerate(list_of_sciencefiles):
            fitsfile = fits.open(kk)
            science = ccdproc.CCDData(data=fitsfile[1].data, meta=fitsfile[1].header, unit="adu")
            # science_cos = ccdproc.cosmicray_lacosmic(science, verbose = True)
            overscan_sub = ccdproc.subtract_overscan(science, fits_section='[5:35, :]')
            bias_sub = ccdproc.subtract_bias(overscan_sub, self.master_bias)
            flat_corr = ccdproc.flat_correct(bias_sub, self.master_flat)

            # science_list[ii] = flat_corr
            flat_corr.write(self.output_dir+"/"+self.filename+"_"+str(ii)+".fits", overwrite=True)
            science_names[ii] = self.output_dir+"/"+self.filename+"_"+str(ii)+".fits"



        # science_list = [0]*len(list_of_sciencefiles)
        coverages = [0]*len(list_of_sciencefiles)
        for ii, kk in enumerate(science_names):
            fitsfile = fits.open(kk)
            fitsfile_common, coverage = reproject_interp(fitsfile, fits.open(science_names[0])[0].header, hdu_in=0)
            science = ccdproc.CCDData(data=fitsfile_common, meta=fitsfile[0].header, unit="adu")
            science_list[ii] = science
            coverages[ii] = ccdproc.CCDData(data=coverage, meta=fitsfile[0].header, unit="adu")

        self.combined_science = ccdproc.combine(science_list, method="median")
        # self.combined_coverage = ccdproc.combine(coverages, method="sum")


        header0 = fits.open(list_of_sciencefiles[0])[0].header
        for key in header0:
            # print(str(key), header0[str(key)])
            if "COMMENT" in key:
                continue
            self.combined_science.header[str(key)] = header0[str(key)]

        self.combined_science.write(self.output_dir+"/"+self.filename+".fits", overwrite=True)
Beispiel #13
0
    def make_master_flat(self, list_of_flatfiles):
        flat_list = [0]*len(list_of_flatfiles)
        for ii, kk in enumerate(list_of_flatfiles):
            fitsfile = fits.open(kk)
            flat = ccdproc.CCDData(data=fitsfile[1].data, meta=fitsfile[1].header, unit="adu")
            flat_scan = ccdproc.subtract_overscan(flat, fits_section='[5:35, :]')
            flat_bias = ccdproc.subtract_bias(flat_scan, self.master_bias)
            flat_bias.data /= np.median(flat_bias.data[100:-100, 100:-100])
            flat_list[ii] = flat_bias

        self.master_flat = ccdproc.combine(flat_list, method="average", sigma_clip=True)


        self.master_flat.write(self.output_dir+"/"+self.filename+"_masterflat.fits", overwrite=True)
Beispiel #14
0
    def action(self):
        if not self.image_collection:
            raise ValueError("No images to reduce")
        self.progress_bar.visible = True

        # Refresh in case files have been added since the widget was created.
        self.image_collection.refresh()

        reduced_images = []
        # Suppress warnings that come up here...mostly about HIERARCH keywords
        warnings.filterwarnings('ignore')
        try:
            n_files = \
                len(self.image_collection.files_filtered(**self.apply_to))
            current_file = 0
            for hdu, fname in self.image_collection.hdus(
                    return_fname=True,
                    save_location=self.destination,
                    **self.apply_to):
                current_file += 1
                try:
                    unit = hdu.header['BUNIT']
                except KeyError:
                    unit = DEFAULT_IMAGE_UNIT
                ccd = ccdproc.CCDData(data=hdu.data,
                                      meta=hdu.header,
                                      unit=unit)

                for child in self.container.children:
                    if not child.toggle.value:
                        # Nothing to do for this child, so keep going.
                        continue
                    ccd = child.action(ccd)
                hdu_tmp = ccd.to_hdu()[0]
                hdu.header = hdu_tmp.header
                hdu.data = hdu_tmp.data
                reduced_images.append(ccd)
                self.progress_bar.description = \
                    ("Processed file {} of {}".format(current_file, n_files))
                self.progress_bar.value = current_file / n_files
        except IOError:
            print("One or more of the reduced images already exists. Delete "
                  "those files and try again. This notebook will NOT "
                  "overwrite existing files.")
        finally:
            self.progress_bar.visible = False

        self._reduced_images = reduced_images
def realtimeRed(storePath, analyPath, masterDark):
    neos = ccdproc.ImageFileCollection(location=analyPath)
    neoList = []
    for neo, fname in neos.hdus(return_fname=True):
        meta = neo.header
        meta['filename'] = fname
        neoList.append(ccdproc.CCDData(data=neo.data, header=meta, unit="adu"))
    masterBias_e = ccdproc.gain_correct(masterBias, gain=1 * u.electron / u.adu)
    masterDark_e = ccdproc.gain_correct(masterDark, gain=1 * u.electron / u.adu)
    masterFlat_e = ccdproc.gain_correct(masterFlat, gain=1 * u.electron / u.adu)
    for neo in neoList:
        neo_red = ccdproc.ccd_process(neo, master_bias=masterBias_e, dark_frame=masterDark_e, master_flat=masterFlat_e
                                       , gain=1 * u.electron / u.adu, readnoise=readnoise, min_value=1.
                                      , dark_exposure=darkExp * u.second, data_exposure=neo.header['exptime'] * u.second
                                      , exposure_unit=u.second, dark_scale=True)
        baseName = os.path.basename(neo.header['filename'])
        fits.writeto("{}{}_red.fits".format(storePath, baseName.split('.')[0]), neo_red.data, header=neo_red.header, overwrite=False)
Beispiel #16
0
def oscan_trim_file(fname, datahdus=0):
    hdulist = fits.open(fname)
    nhdus = len(hdulist)
    if nhdus > 1:
        istart = 1
    else:
        istart = 0
    # loop from first-data to last HDU, unless datahdus is set
    hduindexes = list(range(nhdus))[istart:]
    if datahdus != 0:
        hduindexes = datahdus
    for i in hduindexes:
        hdulist = fits.open(fname)
        data1 = ccdproc.CCDData(hdulist[i].data, unit="adu")
        data1.header = hdulist[i].header
        # What happens if file is already overscan-subtracted?
        # We should probably default to using a model
        if modeling:
            oscan1 = ccdproc.subtract_overscan(
                data1,
                fits_section=data1.header['BIASSEC'],
                add_keyword={
                    'overscan': True,
                    'calstat': 'O'
                },
                model=models.Polynomial1D(1))
        else:
            oscan1 = ccdproc.subtract_overscan(
                data1,
                fits_section=data1.header['BIASSEC'],
                add_keyword={
                    'overscan': True,
                    'calstat': 'O'
                },
                model=None)

        trim1 = ccdproc.trim_image(oscan1,
                                   fits_section=oscan1.header['TRIMSEC'],
                                   add_keyword={
                                       'trimmed': True,
                                       'calstat': 'OT'
                                   })
        fits.update(fname, trim1.data, header=trim1.header, ext=i)
    hdulist.close()
    mylog("Overscan and trim {0}".format(fname))
    return
Beispiel #17
0
def oscan_trim_file(fname):
    hdulist = fits.open(fname)
    nhdus = len(hdulist)
    if nhdus > 1:
        istart = 1
    else:
        istart = 0
    # loop from first-data to last HDU.
    for i in range(nhdus)[istart:]:
        hdulist = fits.open(fname)
        data1 = ccdproc.CCDData(hdulist[i].data, unit="adu")
        data1.header = hdulist[i].header
        # What happens if file is already overscan-subtracted?
        if modeling:
            oscan1 = ccdproc.subtract_overscan(
                data1,
                fits_section=data1.header['BIASSEC'],
                add_keyword={
                    'overscan': True,
                    'calstat': 'O'
                },
                model=models.Polynomial1D(1))
        else:
            oscan1 = ccdproc.subtract_overscan(
                data1,
                fits_section=data1.header['BIASSEC'],
                add_keyword={
                    'overscan': True,
                    'calstat': 'O'
                },
                model=None)

        trim1 = ccdproc.trim_image(oscan1,
                                   fits_section=oscan1.header['TRIMSEC'],
                                   add_keyword={
                                       'trimmed': True,
                                       'calstat': 'OT'
                                   })
        fits.update(fname, trim1.data, header=trim1.header, ext=i)
    hdulist.close()
    return
Beispiel #18
0
    def make_noiseless_image(self, centre, time, f):
        """
        Function to create a noiseless simulated image for a given image centre and observation time.
        """
        electrons = np.zeros(
            (self.wcs._naxis2, self.wcs._naxis1)) * u.electron / u.second

        # Calculate observed zodiacal light background.
        # Get relative zodical light brightness for each pixel
        # Note, side effect of this is setting centre of self.wcs
        pixel_coords = self.get_pixel_coords(centre)
        zl_rel = zl.relative_brightness(pixel_coords, time)

        # TODO: calculate area of each pixel, for now use nominal pixel scale^2
        # Finally multiply to get an observed zodical light image
        zl_obs = self.zl_obs_ep * zl_rel * self.pixel_scale**2

        electrons += zl_obs

        noiseless = ccdproc.CCDData(electrons, wcs=self.wcs)

        return noiseless
Beispiel #19
0
    def _action_for_one_group(self, filter_dict=None):
        combined_dict = self.apply_to.copy()
        if filter_dict is not None:
            combined_dict.update(filter_dict)

        images = []

        for hdu in self.image_source.hdus(**combined_dict):
            try:
                unit = hdu.header['BUNIT']
            except KeyError:
                unit = DEFAULT_IMAGE_UNIT
            images.append(
                ccdproc.CCDData(data=hdu.data, meta=hdu.header, unit=unit))
        combiner = ccdproc.Combiner(images, dtype=images[0].dtype)
        if self._clipping_widget.toggle.value:
            if self._clipping_widget.min_max:
                combiner.minmax_clipping(
                    min_clip=self._clipping_widget.min_max.min,
                    max_clip=self._clipping_widget.min_max.max)
            if self._clipping_widget.sigma_clip:
                combiner.sigma_clipping(
                    low_thresh=self._clipping_widget.sigma_clip.min,
                    high_thresh=self._clipping_widget.sigma_clip.max,
                    func=np.ma.median)
        if self._combine_method.scaling_func:
            combiner.scaling = self._combine_method.scaling_func
        if self._combine_method.method == 'Average':
            combined = combiner.average_combine()
        elif self._combine_method.method == 'Median':
            combined = combiner.median_combine()
        combined.header = images[0].header
        combined.header['master'] = True
        if combined.data.dtype != images[0].dtype:
            combined.data = np.array(combined.data, dtype=images[0].dtype)
        return combined
Beispiel #20
0
def combine_list_to_file(listname,
                         outname,
                         read_from_file=False,
                         combine='median',
                         datahdus=0):
    if read_from_file == True:
        listfile = open(listname, 'r')
        flist = listfile.read().splitlines()
        listfile.close()
    else:
        flist = listname
    if len(flist) == 0:
        mylog('Failed write, trying to overwrite: {0}'.format(listname))
        return
    # combine_list will be a list of lists of data objects
    combine_list = []
    ifirst = True
    for line in flist:
        fname = line.strip()
    hdulist = fits.open(fname)
    if ifirst == True:
        try:
            # This should propagate header and any HDUs that don't
            # get combined (ie are not in datahdus list) to the output.
            hdulist.writeto(outname)
        except:
            mylog('Failed write, trying to overwrite: {0}'.format(outname))
            hdulist.writeto(outname, overwrite=True)
        ifirst = False
        nhdus = len(hdulist)
        # Data of this image will be listed in a single entry in combine_list
        tmplist = []
        if nhdus > 1:
            istart = 1
        else:
            istart = 0
        hduindexes = list(range(nhdus))[istart:]
        if datahdus != 0:
            hduindexes = datahdus
        for i in hduindexes:
            data1 = ccdproc.CCDData(hdulist[i].data, unit="adu")
            head1 = hdulist[i].header
            head1['filename'] = fname
            data1.header = head1
            tmplist.append(data1)
            # combine_list.append(ccdproc.CCDData(data=data1, meta=head1, unit="adu"))
        hdulist.close()
        combine_list.append(tmplist)

    for i in hduindexes:
        # Take the i-1'th data extension (0-based) from each image and
        # put these into a list to combine
        tmplist = [elem[i - 1] for elem in combine_list]
        combo = ccdproc.Combiner(tmplist)
        if combine == 'average':
            output1 = combo.average_combine()
        else:
            output1 = combo.median_combine()
        # Header output by combine is minimal and causes failure
        # fits.update(outname, output1.data, header=output1.header, ext=i)
        # but outputting without header field leaves only 6 line header
        # fits.update(outname, output1.data, ext=i)
        # copying header from the first input image works.
        fits.update(outname, output1.data, header=tmplist[0].header, ext=i)
    mylog("Combined to output file {0}".format(outname))
    return
Beispiel #21
0
    def action(self):
        if not self.image_collection:
            raise ValueError("No images to reduce")
        self.progress_bar.visible = True
        self.progress_bar.layout.visbility = 'visible'
        self.progress_bar.layout.display = 'flex'

        # Refresh in case files have been added since the widget was created.
        self.image_collection.refresh()

        # Only refresh the master_source if it exists. No need to error check
        # the main image_collection because a sensible error is raised if it
        # does not exist.
        if self._master_source:
            self._master_source.refresh()

        # Suppress warnings that come up here...mostly about HIERARCH keywords
        warnings.filterwarnings('ignore')
        try:
            n_files = \
                len(self.image_collection.files_filtered(**self.apply_to))
            current_file = 0
            for hdu, fname in self.image_collection.hdus(
                    return_fname=True,
                    save_location=self.destination,
                    **self.apply_to):
                current_file += 1
                try:
                    unit = hdu.header['BUNIT']
                except KeyError:
                    unit = DEFAULT_IMAGE_UNIT
                ccd = ccdproc.CCDData(hdu.data, meta=hdu.header, unit=unit)
                for child in self.container.children:
                    if not child.toggle.value:
                        # Nothing to do for this child, so keep going.
                        continue
                    ccd = child.action(ccd)

                input_dtype = hdu.data.dtype.name
                hdu_tmp = ccd.to_hdu()[0]
                hdu.header = hdu_tmp.header
                hdu.data = hdu_tmp.data
                desired_dtype = REDUCE_IMAGE_DTYPE_MAPPING[str(input_dtype)]
                if desired_dtype != hdu.data.dtype:
                    hdu.data = hdu.data.astype(desired_dtype)

                # Workaround to ensure uint16 images are handled properly.
                if 'bzero' in hdu.header:
                    # Check for the unsigned int16 case, and if our data type
                    # is no longer uint16, delete BZERO and BSCALE
                    header_unsigned_int = ((hdu.header['bscale'] == 1)
                                           and (hdu.header['bzero'] == 32768))
                    if (header_unsigned_int
                            and (hdu.data.dtype != np.dtype('uint16'))):

                        del hdu.header['bzero'], hdu.header['bscale']

                self.progress_bar.description = \
                    ("Processed file {} of {}".format(current_file, n_files))
                self.progress_bar.value = current_file / n_files
        except IOError:
            print("One or more of the reduced images already exists. Delete "
                  "those files and try again. This notebook will NOT "
                  "overwrite existing files.")
        finally:
            self.progress_bar.visible = False
            self.progress_bar.layout.display = 'none'
        gain = 1.4 * u.electron / u.adu
        gainValue = 1.4
        readnoise = 8.2 * u.electron
        readnoiseValue = 8.2
        biasPath = "/LWTanaly/{}/bias/".format(date)
        darkPath_test = "/LWTanaly/{}/dark_test/".format(date)
        flatPath = "/LWTanaly/{}/flat/".format(date)

        # Create master bias frame.
        biases = ccdproc.ImageFileCollection(location=biasPath)
        biasList = []
        for bias, fname in biases.hdus(return_fname=True):
            meta = bias.header
            meta['filename'] = fname
            biasList.append(ccdproc.CCDData(data=bias.data, meta=meta, unit='adu'))
        masterBias = ccdproc.combine(biasList, output_file="/LWTanaly/{}/bias/Master_Bias.fits".format(date), method='median'
                                      , clip_extrema=True, nlow=0, nhigh=1)

        # Create master dark frame for each NEO.
        darkPaths = [i for i in glob("/LWTanaly/{}/dark_*".format(date)) if 'test' not in i]
        masterDark = []
        for path in darkPaths:
            darks = ccdproc.ImageFileCollection(location=path+'/')
            darkList = []
            for dark, fname in darks.hdus(return_fname=True):
                meta = dark.header
                meta['filename'] = fname
                darkList.append(ccdproc.CCDData(data=dark.data, meta=meta, unit="adu"))
            darkList_b = []
            for dark in darkList:
Beispiel #23
0
 def run(self):
     """ Runs the calibrating algorithm. The calibrated data is
         returned in self.dataout
     """
     ### Preparation
     # Load bias files if necessary
     if not self.biasloaded or self.getarg('reload'):
         self.loadbias()
     # Else: check data for correct instrument configuration - currently not in use(need improvement)
     else:
         for keyind in range(len(self.biasfitkeys)):
             if self.biaskeyvalues[keyind] != self.datain.getheadval(
                     self.biasfitkeys[keyind]):
                 self.log.warn(
                     'New data has different FITS key value for keyword %s'
                     % self.biasfitkeys[keyind])
     # Load dark files if necessary
     if not self.darkloaded or self.getarg('reload'):
         self.loaddark()
     # Else: check data for correct instrument configuration
     else:
         for keyind in range(len(self.darkfitkeys)):
             if self.darkkeyvalues[keyind] != self.datain.getheadval(
                     self.darkfitkeys[keyind]):
                 self.log.warn(
                     'New data has different FITS key value for keyword %s'
                     % self.darkfitkeys[keyind])
     # Load flat files if necessary
     if not self.flatloaded or self.getarg('reload'):
         self.loadflat()
     # Else: check data for correct instrument configuration
     else:
         for keyind in range(len(self.flatfitkeys)):
             if self.flatkeyvalues[keyind] != self.datain.getheadval(
                     self.flatfitkeys[keyind]):
                 self.log.warn(
                     'New data has different FITS key value for keyword %s'
                     % self.flatfitkeys[keyind])
     #convert self.datain to CCD Data object
     image = ccdproc.CCDData(self.datain.image, unit='adu')
     image.header = self.datain.header
     #subtract bias from image
     image = ccdproc.subtract_bias(image, self.bias, add_keyword=False)
     #subtract dark from image
     image = ccdproc.subtract_dark(image,
                                   self.dark,
                                   scale=True,
                                   exposure_time='EXPTIME',
                                   exposure_unit=u.second,
                                   add_keyword=False)
     #apply flat correction to image
     image = ccdproc.flat_correct(image, self.flat, add_keyword=False)
     # copy calibrated image into self.dataout - make sure self.dataout is a pipedata object
     self.dataout = DataFits(config=self.datain.config)
     self.dataout.image = image.data
     self.dataout.header = image.header
     self.dataout.filename = self.datain.filename
     ### Finish - cleanup
     # Update DATATYPE
     self.dataout.setheadval('DATATYPE', 'IMAGE')
     # Add bias, dark files to History
     self.dataout.setheadval('HISTORY', 'BIAS: %s' % self.biasname)
     self.dataout.setheadval('HISTORY', 'DARK: %s' % self.darkname)
     self.dataout.setheadval('HISTORY', 'FLAT: %s' % self.flatname)
Beispiel #24
0
def stacking_procedure(images: [Image]):
    """stacks images in list into single one and writes file
    header will be from middle image in the list.
    Time will be between start of first and end of last exposure with exposure length of this difference.

    :param images list of Image list objects to stack
    """

    # go through images and collect info
    names = []
    i = 0
    half = len(images) // 2

    tfirst = images[0].get_time_jd()
    tlast = images[-1].get_time_jd() + images[-1].get_exposure() / 86000
    timejd_mid = (tfirst + tlast) / 2
    timecoverage = (tlast - tfirst) * 86000  # s

    for image in images:  # get stack names
        if "stack" in image.processing_parameters:
            for name in image.get_stack():
                names.append(name)
        else:
            names.append(str(image.get_path()))
        i += 1
        if i == half:
            header = fits.getheader(image.get_path())

    midpoint_WCS = WCS(header)

    reprojected = []
    # reproject all images onto the middle one
    for image in images:
        data = fits.getdata(image.get_path())
        header_wcs = fits.getheader(image.get_path())
        ccddata = ccdproc.CCDData(data, wcs=WCS(header_wcs), unit="adu")
        reprojected.append(wcs_project(ccddata, midpoint_WCS))

    combiner = ccdproc.Combiner(reprojected)
    final_image_data = combiner.average_combine()
    final_image_data.wcs = WCS(header)
    header["EXPTIME"] = timecoverage
    header["EXPOSURE"] = timecoverage
    header["JD"] = tfirst

    filename = "stack-" + str(timejd_mid) + "-e" + "{:.0f}".format(
        timecoverage) + ".fits"

    path = Path("/tmp")
    path = path / filename

    if os.path.exists(path):
        os.remove(path)
    fits.writeto(path, final_image_data, header)

    stacked_image = Image(fixed_parameters={
        "path": path,
        "exposure": timecoverage,
        "time_jd": tfirst,
        "type": "data",
        "id": filename
    },
                          processing_parameters={
                              "flat": True,
                              "dark": True,
                              "stack": names
                          })
    return stacked_image
Beispiel #25
0
            
    full_image=np.concatenate((imageup,imagedo),axis=0)
    return full_image
#------------------------------------------------------------------------------    
    
    
#-------------------------------------------------------------------------------------------------
#   Main to test the library					
#-------------------------------------------------------------------------------------------------
if __name__ == "__main__":
	in_masterbias_filename='masterbias1.fits'
	out_masterbias_filename='masterbias1_outtest.fits'
	
	hdu_list = fits.open(in_masterbias_filename)
	hdu_list.info()

	header=hdu_list[0].header
	meta=header
	
	print '- Header read from file :: '
	print header
	
	# all CCDPROC data collector : each channel as a list of biases data
	allccd = []
	for chan in range(1,NB_OF_CHANNELS+1,1):
		ccd_chan =  ccdproc.CCDData(hdu_list[chan].data,meta=header,unit="adu")
		allccd.append(ccd_chan)

	SaveCCDListIntoFitsFile(allccd,out_masterbias_filename,meta)
	hdu_list.close()
Beispiel #26
0
def find_apertures(im, start_col=2100, max_drift=5, max_apwidth=10,
                   n_pix_goodap=1500, n_adj=7, n_smooth=3, n_sep=5, c=3):
    """ find apertures from image

    Parameters
    ----------
    im:
        image
    start_col:
        start column
    max_drift:
        max_drift in finding an aperture
    max_apwidth:
        local comparison width
    n_pix_goodap:
        a good aperture should be more than this number of pixels

    Returns
    -------
    ymmax_goodap: ndarray
        the y-pixel values for good apertures
    """
    # gaussian smooth if c>0
    if c > 0:
        im = ccdproc.CCDData(gaussian(im, sigma=(c, 0)), unit=im.unit)

    # find max & min
    smmax, smmin = find_mmax_mmin(im, start_col=start_col, n_adj=n_adj,
                                  n_smooth=n_smooth, n_sep=n_sep)

    # initialize results
    ymmax = np.zeros((len(smmax), im.shape[1]))
    ymmax[:, start_col] = smmax
    ymmin = np.zeros((len(smmin), im.shape[1]))
    ymmin[:, start_col] = smmin

    # tracing apertures
    for i_ap in range(ymmax.shape[0]):
        for i_col in np.arange(start_col + 1, im.shape[1]):
            y0 = ymmax[i_ap, i_col - 1]
            y1 = np.argmax(im[np.max((0, y0 - max_apwidth)):np.min(
                (im.shape[0], y0 + 1 + max_apwidth)), i_col].data *
                           im[np.max((0, y0 - max_apwidth)):np.min(
                               (im.shape[0], y0 + 1 + max_apwidth)),
                           i_col - 1].data) + y0 - max_apwidth
            if np.abs(y1 - y0) < max_drift:
                # good ap, continue
                ymmax[i_ap, i_col] = y1
            else:
                break
        for i_col in np.arange(start_col - 1, 0, -1):
            y0 = ymmax[i_ap, i_col + 1]
            y1 = np.argmax(im[np.max((0, y0 - max_apwidth)):np.min(
                (im.shape[0], y0 + 1 + max_apwidth)), i_col].data *
                           im[np.max((0, y0 - max_apwidth)):np.min(
                               (im.shape[0], y0 + 1 + max_apwidth)),
                           i_col + 1].data) + y0 - max_apwidth
            if np.abs(y1 - y0) < max_drift:
                # good ap, continue
                ymmax[i_ap, i_col] = y1
            else:
                break
        print("@Cham: tracing aperture [%s] " % i_ap)
    #
    # for i_ap in range(ymmin.shape[0]):
    #     for i_col in np.arange(start_col + 1, im.shape[1]):
    #         y0 = ymmin[i_ap, i_col - 1]
    #         y1 = np.argmin(im[np.max((0, y0 - max_apwidth)):np.min(
    #             (im.shape[0], y0 + 1 + max_apwidth)), i_col].data *
    #                        im[np.max((0, y0 - max_apwidth)):np.min(
    #                            (im.shape[0], y0 + 1 + max_apwidth)),
    #                        i_col - 1].data) + y0 - max_apwidth
    #         if np.abs(y1 - y0) < max_drift:
    #             # good ap, continue
    #             ymmin[i_ap, i_col] = y1
    #         else:
    #             break
    #     for i_col in np.arange(start_col - 1, 0, -1):
    #         y0 = ymmin[i_ap, i_col + 1]
    #         y1 = np.argmin(im[np.max((0, y0 - max_apwidth)):np.min(
    #             (im.shape[0], y0 + 1 + max_apwidth)), i_col].data *
    #                        im[np.max((0, y0 - max_apwidth)):np.min(
    #                            (im.shape[0], y0 + 1 + max_apwidth)),
    #                        i_col + 1].data) + y0 - max_apwidth
    #         if np.abs(y1 - y0) < max_drift:
    #             # good ap, continue
    #             ymmin[i_ap, i_col] = y1
    #         else:
    #             break
    #     print i_ap

    ind_goodap = np.sum(ymmax > 0, axis=1) > n_pix_goodap
    ymmax_goodap = ymmax[ind_goodap, :]
    print("@Cham: number of good aps (max) = %s " % np.sum(ind_goodap))

    #    ind_goodap = np.sum(ymmax>0, axis=1)>1000
    #    ymmin_goodap = ymmin[ind_goodap, :]
    #    print("@Cham: gumber of good aps (min)", np.sum(ind_goodap))

    return ymmax_goodap  # , ymmin_goodap
Beispiel #27
0
def main():

    import time

    start = time.time()
    print("<STATUS> Starting clock ...")

    input_dir = do_argparse()

    log = open("log.txt", "w")
    print("<STATUS> Opening data log ...")

    #
    # BIAS ANALYSIS
    #
    bias_dir = input_dir + "bias"
    print("<STATUS> Bias directory defined as", str(bias_dir), "...")

    bias_list = []

    os.chdir(bias_dir)
    print("<STATUS> Changing directory to", str(bias_dir), "...")

    for frame in glob.glob("*.fit"):
        bias_list.append(frame)
        print("<STATUS> Appending", str(frame), "to bias list ...")

    print("<STATUS> Bias list defined as", str(bias_list), "...")

    # Define 1st bias
    bias_1 = bias_list[0]
    print("<STATUS> Reading test bias 1 as", str(bias_1), "...", type(bias_1))

    bias_1_fits = fits.open(str(bias_dir) + "/" + str(bias_1))
    print("<STATUS> Opening", str(bias_1), "as FITS ...", type(bias_1_fits))

    bias_1_array = bias_1_fits[0].data
    print("<STATUS> Reading", str(bias_1), "data ...", type(bias_1_array))

    bias_1_array = bias_1_array.astype(float)
    print("<STATUS> Converting", str(bias_1), "data to float ...",
          type(bias_1_array))

    # Define 2nd bias
    bias_2 = bias_list[1]
    print("<STATUS> Reading test bias 2 as", str(bias_2), "...", type(bias_2))

    bias_2_fits = fits.open(str(bias_dir) + "/" + str(bias_2))
    print("<STATUS> Opening", str(bias_2), "as FITS ...", type(bias_2_fits))

    bias_2_array = bias_2_fits[0].data
    print("<STATUS> Reading", str(bias_2), "data ...", type(bias_2_array))

    bias_2_array = bias_2_array.astype(float)
    print("<STATUS> Converting", str(bias_2), "data to float ...",
          type(bias_2_array))

    # Subtract two bias frames from list
    diff_bias_array = np.subtract(bias_1_array, bias_2_array)
    print("<STATUS> Subtracting", str(bias_1), "and", str(bias_2), "...",
          type(diff_bias_array))

    # Calculate readout noise from difference bias frames
    diff_stddev = np.std(diff_bias_array)
    print("<STATUS> Calculating standard deviation from difference ...")

    readout_noise = diff_stddev / math.sqrt(2)
    print("<STATUS> Calcuating readout noise ...")

    log.write("Bias RN = " + str(readout_noise) + " ADU\n\n")
    print("<STATUS> Writing result to log ...")

    # Median combine bias frames
    master_bias = ccdproc.combine(bias_list, method="median", unit="adu")
    print("<STATUS> Creating master bias ...", type(master_bias))

    # Histogram of bias
    master_bias_array = master_bias[0].data
    print("<STATUS> Reading master bias data ...", type(master_bias_array))

    f_bias_array = master_bias_array.flatten()
    print("<STATUS> Flattening array ...")

    bias_min = np.min(f_bias_array)
    print("<STATUS> Calculating master bias minimum ...")

    bias_max = np.max(f_bias_array)
    print("<STATUS> Calculating master bias maximum ...")

    print("<STATUS> Producing histogram of master bias ...")
    bias_xrange = (bias_min - 10, bias_max + 10)
    nbins = np.arange(bias_min, bias_max + 1, 1)
    font = {"fontname": "Monospace", "size": 10}
    plt.hist(f_bias_array, range=bias_xrange, bins=nbins, histtype="step")
    plt.title("Histogram of Master Bias\nnbins=" + str(len(nbins)), **font)
    plt.xticks(**font)
    plt.yticks(**font)
    plt.savefig(str(input_dir) + "/bias-hist.png")

    #
    # DARK ANALYSIS
    #
    dark_dir = input_dir + "dark"
    print("<STATUS> Dark directory defined as", str(dark_dir), "...")

    dark_list = []

    os.chdir(dark_dir)
    print("<STATUS> Changing directory to", str(dark_dir), "...")

    for frame in glob.glob("*.fit"):
        dark_list.append(frame)
        print("<STATUS> Appending", str(frame), "to dark list ...")

    # Dark frame 1
    dark_1 = dark_list[0]
    print("<STATUS> Reading dark 1 as", str(dark_1), "...", type(dark_1))

    dark_1_fits = fits.open(str(dark_dir) + "/" + str(dark_1))
    print("<STATUS> Opening", str(dark_1), "as FITS...", type(dark_1_fits))

    dark_1_array = dark_1_fits[0].data
    print("<STATUS> Reading", str(dark_1), "data...", type(dark_1_array))

    dark_1_array = dark_1_array.astype(float)
    print("<STATUS> Converting", str(dark_1), "data to float...",
          type(dark_1_array))

    dark_min_bias_1_array = np.subtract(dark_1_array, master_bias)
    print("<STATUS> Subtracting bias from", str(dark_1), "...",
          type(dark_min_bias_1_array))

    # Dark frame 2
    dark_2 = dark_list[1]
    print("<STATUS> Reading dark 2 as", str(dark_2), "...", type(dark_2))

    dark_2_fits = fits.open(str(dark_dir) + "/" + str(dark_2))
    print("<STATUS> Opening", str(dark_1), "as FITS...", type(dark_2_fits))

    dark_2_array = dark_2_fits[0].data
    print("<STATUS> Reading", str(dark_2), "data...", type(dark_2_array))

    dark_2_array = dark_2_array.astype(float)
    print("<STATUS> Converting", str(dark_2), "data to float...",
          type(dark_2_array))

    dark_min_bias_2_array = np.subtract(dark_2_array, master_bias)
    print("<STATUS> Subtracting bias from", str(dark_2), "...",
          type(dark_min_bias_2_array))

    # Dark sum
    dark_sum = np.add(dark_min_bias_1_array, dark_min_bias_2_array)
    print("<STATUS> Adding corrected", str(dark_1), "and corrected",
          str(dark_2), "...")

    dark_1_exptime = dark_1_fits[0].header["EXPTIME"]
    print("<STATUS> Reading dark 1 exposure time ...", str(dark_1_exptime),
          "s")

    dark_2_exptime = dark_2_fits[0].header["EXPTIME"]
    print("<STATUS> Reading dark 2 exposure time ...", str(dark_2_exptime),
          "s")

    total_exp = dark_1_exptime + dark_2_exptime
    print("<STATUS> Adding exposure times...", str(total_exp), "s")

    # Dark current
    dark_current_array = dark_sum / total_exp
    print("<STATUS> Creating dark current frame ...", type(dark_current_array))

    #
    # FLAT ANALYSIS
    #
    flat_dir = input_dir + "flat"
    print("<STATUS> Flat directory defined as", str(flat_dir), "...")

    flat_half_dir = flat_dir + "/flat_half"
    print("<STATUS> Half-saturation flat directory defined as",
          str(flat_half_dir), "...")

    flat_linear_dir = flat_dir + "/flat_linear"
    print("<STATUS> Linear flat directory defined as", str(flat_linear_dir),
          "...")

    flat_half_list = []

    os.chdir(flat_half_dir)
    print("<STATUS> Changing directory to", str(flat_half_dir), "...")

    for frame in glob.glob("*.fit"):
        flat_half_list.append(frame)
        print("<STATUS> Appending", str(frame),
              "to half-saturation flat list ...")

    flat_half_fits = fits.open(flat_half_list[0])
    print("<STATUS> Opening", str(flat_half_list[0]), "as FITS...",
          type(flat_half_fits))

    flat_half_exptime = flat_half_fits[0].header["EXPTIME"]
    print("<STATUS> Reading", str(flat_half_list[0]), "exposure time...",
          str(flat_half_exptime), "s")

    flat_half_combined = ccdproc.combine(flat_half_list,
                                         method="median",
                                         unit="adu")
    print("<STATUS> Creating median-combined flat ...",
          type(flat_half_combined))

    flat_min_dark_array = flat_half_combined - (dark_current_array *
                                                flat_half_exptime)
    print("<STATUS> Subtracting scaled dark from combined flat ...",
          type(flat_min_dark_array))

    flat_avg_value = np.mean(flat_min_dark_array)
    print("<STATUS> Calculating mean of dark-subtracted flat ...",
          str(flat_avg_value), "ADU")

    flatfield_array = flat_min_dark_array / flat_avg_value
    print("<STATUS> Creating flatfield array ... ", type(flatfield_array))

    flatfield = ccdproc.CCDData(flatfield_array, unit="adu")
    print("<STATUS> Converting flatfield array to CCDData object ...",
          type(flatfield))

    # Histogram of flatfield
    flat_ff_array = flatfield_array.flatten()
    print("<STATUS> Flattening array ...")

    print("<STATUS> Producing histogram of flatfield ...")
    nbins = 500
    font = {"fontname": "Monospace", "size": 10}

    plt.hist(flat_ff_array, bins=nbins, histtype="step")
    plt.xlim(0.95, 1.05)
    plt.title("Histogram of Flatfield\nnbins=" + str(nbins), **font)
    plt.xticks(**font)
    plt.yticks(**font)
    plt.savefig(str(input_dir) + "/flat-hist.png")

    # Initialize lists for (sum) mean vs (diff) variance plot
    sum_mean_list = []
    sum_std_list = []
    diff_std_list = []

    # Initialize lists for linearity plot
    int_time_list = []
    mean_val_list = []
    stddev_list = []

    for x in os.walk(flat_linear_dir):

        os.chdir(x[0])
        print("<STATUS> Changing directory to", str(x[0]), "...")

        data_list = []

        for frame in glob.glob("*.fit"):

            frame_fits = fits.open(frame)
            print("<STATUS> Opening", str(frame), "as FITS...",
                  type(frame_fits))

            frame_exptime = frame_fits[0].header["EXPTIME"]
            print("<STATUS> Reading", str(frame), "exposure time...",
                  str(frame_exptime), "s")

            if frame_exptime not in int_time_list:
                int_time_list.append(frame_exptime)

            frame_array = frame_fits[0].data
            print("<STATUS> Reading", str(frame), "data...", type(frame_array))

            frame_array = frame_array - master_bias_array - (
                dark_current_array * frame_exptime)
            print("<STATUS> Calibrating",
                  str(frame), "with master bias and dark current at",
                  str(frame_exptime), "s ...", type(frame_array))

            data_list.append(frame_array)
            print("<STATUS> Appending calibrated", str(frame),
                  "to data list...")

        if len(data_list) != 0:

            data1 = data_list[0]
            data2 = data_list[1]

            data_sum = data1 + data2
            data_diff = data1 - data2

            # Data for mean vs variance plot
            if frame_exptime < 80:

                sum_mean = np.mean(data_sum)
                sum_mean_list.append(sum_mean)

                sum_std = np.std(data_sum)
                sum_std_list.append(sum_std)

                diff_std = np.std(data_diff)
                diff_std_list.append(diff_std)

            # Data for linearity measurement
            data_mean = np.mean(data_sum / 2)
            mean_val_list.append(data_mean)

            data1_std = np.std(data1)
            data2_std = np.std(data2)

            data_std = math.sqrt(data1_std**2 + data2_std**2)
            stddev_list.append(data_std)

    # Calculate variance of difference frames
    diff_var_list = []
    for value in diff_std_list:
        value = value**2
        diff_var_list.append(value)

    # Convert lists to arrays
    sum_mean_array = np.asarray(sum_mean_list)
    diff_var_array = np.asarray(diff_var_list)
    sum_std_array = np.asarray(sum_std_list)

    # Define model fn (linear: y = mx + b)
    def f(x, m, b):
        return (m * x) + b

    #
    # Unweighted fit calculation
    popt, pcov = curve_fit(f, diff_var_array, sum_mean_array)
    yfit = f(diff_var_array, *popt)

    print()
    print("(Unweighted) fit parameters:", popt)
    log.write("(Unweighted) fit parameters: " + str(popt) + "\n\n")

    print("(Unweighted) covariance matrix:", pcov)
    log.write("(Unweighted) covariance matrix: " + str(pcov) + "\n\n")

    # Slope
    m = popt[0]
    delta_m = math.sqrt(pcov[0][0])

    # y-intercept
    b = popt[1]
    delta_b = math.sqrt(pcov[1][1])

    # Gain
    g = 1 / m
    delta_g = (g**2) * delta_m

    # Readout noise
    sig = math.sqrt(g * b / (-2))
    delta_sig = math.sqrt(
        ((sig**2) / 4) * ((delta_b / b)**2 - (delta_g / g)**2))

    R = sig / g
    delta_R = R * math.sqrt((delta_sig / sig)**2 + (delta_g / g)**2)

    print("m =", "%.4f" % m, "+/-", "%.4f" % delta_m)
    log.write("m = " + str(m) + " +/- " + str(delta_m) + "\n\n")

    print("b =", "%.4f" % b, "+/-", "%.4f" % delta_b)
    log.write("b = " + str(b) + " +/- " + str(delta_b) + "\n\n")

    print("g = (", "%.4f" % g, "+/-", "%.4f" % delta_g, ") ADU/e")
    log.write("g = (" + str(g) + " +/- " + str(delta_g) + ") ADU/e\n\n")

    print("RN = (", "%.4f" % sig, "+/-", "%.4f" % delta_sig, ") ADU")
    log.write("RN = (" + str(sig) + " +/- " + str(delta_sig) + ") ADU\n\n")

    print("RN = (", "%.4f" % R, "+/-", "%.4f" % delta_R, ") e")
    log.write("RN = (" + str(R) + " +/- " + str(delta_R) + ") e\n\n")

    print()

    #
    # Weighted fit calculation
    popt2, pcov2 = curve_fit(f,
                             diff_var_array,
                             sum_mean_array,
                             sigma=sum_std_array,
                             absolute_sigma=True)
    yfit2 = f(diff_var_array, *popt2)

    print("(Weighted) fit parameters:", popt2)
    log.write("(Weighted) fit parameters: " + str(popt2) + "\n\n")

    print("(Weighted) covariance matrix:", pcov2)
    log.write("(Weighted) covariance matrix: " + str(pcov2) + "\n\n")

    # Slope
    m = popt2[0]
    delta_m = math.sqrt(pcov2[0][0])

    # y-intercept
    b = popt2[1]
    delta_b = math.sqrt(pcov2[1][1])

    # Gain
    g = 1 / m
    delta_g = (g**2) * delta_m

    # Readout noise
    sig = math.sqrt(g * b / (-2))
    delta_sig = math.sqrt(
        ((sig**2) / 4) * ((delta_b / b)**2 - (delta_g / g)**2))

    R = sig / g
    delta_R = R * math.sqrt((delta_sig / sig)**2 + (delta_g / g)**2)

    print("m =", "%.4f" % m, "+/-", "%.4f" % delta_m)
    log.write("m = " + str(m) + " +/- " + str(delta_m) + "\n\n")

    print("b =", "%.4f" % b, "+/-", "%.4f" % delta_b)
    log.write("b = " + str(b) + " +/- " + str(delta_b) + "\n\n")

    print("g = (", "%.4f" % g, "+/-", "%.4f" % delta_g, ") ADU/e")
    log.write("g = (" + str(g) + " +/- " + str(delta_g) + ") ADU/e\n\n")

    print("RN = (", "%.4f" % sig, "+/-", "%.4f" % delta_sig, ") ADU")
    log.write("RN = (" + str(sig) + " +/- " + str(delta_sig) + ") ADU\n\n")

    print("RN = (", "%.4f" % R, "+/-", "%.4f" % delta_R, ") e")
    log.write("RN = (" + str(R) + " +/- " + str(delta_R) + ") e\n\n")

    print()

    # Plotting
    plt.clf()
    font = {"fontname": "Monospace", "size": 10}
    plt.errorbar(diff_var_array,
                 sum_mean_array,
                 yerr=sum_std_array,
                 fmt="o",
                 linewidth=0.5,
                 markersize=0.5,
                 capsize=2,
                 capthick=0.5)
    plt.plot(diff_var_array, yfit, "--", linewidth=0.5, label="Unweighted fit")
    plt.plot(diff_var_array, yfit2, "--", linewidth=0.5, label="Weighted fit")
    plt.title("Mean Pixel Value vs Variance \n CTMO SBIG ST-8300M", **font)
    plt.xlabel("Difference Frame Variance (ADU$^2$)", **font)
    plt.ylabel("Sum Frame Mean (ADU)", **font)
    plt.xticks(**font)
    plt.yticks(**font)
    plt.legend()
    plt.savefig(str(input_dir) + "mean_vs_variance.png", dpi=300)

    plt.clf()
    font = {"fontname": "Monospace", "size": 10}
    plt.errorbar(int_time_list,
                 mean_val_list,
                 yerr=stddev_list,
                 fmt="o",
                 linewidth=0.5,
                 markersize=0.5,
                 capsize=2,
                 capthick=0.5)
    plt.title("Mean Pixel Value vs Integration Time \n CTMO SBIG ST-8300M",
              **font)
    plt.xlabel("Integration Time (s)", **font)
    plt.ylabel("Mean Frame Value (ADU)", **font)
    plt.xticks(**font)
    plt.yticks(**font)
    plt.savefig(str(input_dir) + "linearity.png", dpi=300)

    log.close()

    end = time.time()
    print(str(end - start) + " seconds to complete.")
Beispiel #28
0
def ccd_rot90(ccd, k=1):
    return ccdproc.CCDData(np.rot90(ccd, k=k), unit=ccd.unit)
Beispiel #29
0
flat1_bias_trim = ccdproc.trim_image(flat1_bias[:, :4096])
flat2_bias_trim = ccdproc.trim_image(flat2_bias[:, :4096])
flat3_bias_trim = ccdproc.trim_image(flat3_bias[:, :4096])


""" combine flat """
flat_list = [flat1_bias_trim, flat2_bias_trim, flat3_bias_trim]

# find & combine & group apertures
ap_comb = combine_apertures(flat_list, n_jobs=10)
cheb_coefs, ap_uorder_interp = group_apertures(ap_comb, start_col=2100, order_dist=10)

# combine flat
flat_comb, flat_origin = combine_flat(flat_list, ap_uorder_interp, sat_count=45000, p=95)
flat_comb = ccdproc.CCDData(flat_comb, unit='adu')

# scattered light substraction
flat_comb_sl = substract_scattered_light(flat_comb, ap_uorder_interp, ap_width=10, shrink=.85)
flat1d = extract_1dspec(flat_comb_sl, ap_uorder_interp, ap_width=7)[0]


""" thar """
ind_thar1 = (logt['obj']=='thar')*(logt['exp_time']==30)
ind_thar2 = (logt['obj']=='thar')*(logt['exp_time']==60)
ind_thar3 = (logt['obj']=='thar')*(logt['exp_time']==120)

thar1 = ccdproc.combine(','.join(logt['filename'][ind_thar1]), unit='adu', method='average')
thar2 = ccdproc.combine(','.join(logt['filename'][ind_thar2]), unit='adu', method='average')
thar3 = ccdproc.CCDData.read(','.join(logt['filename'][ind_thar3]), unit='adu', method='average')
Beispiel #30
0
def hrs_process(image_name,
                ampsec=[],
                oscansec=[],
                trimsec=[],
                masterbias=None,
                error=False,
                bad_pixel_mask=None,
                flip=False,
                rdnoise=None,
                oscan_median=True,
                oscan_model=None):
    """Processing required for HRS observations.  If the images have multiple
       amps, then this will process each part of the image and recombine them
       into for the final results

    Parameters
    ----------
    image_name: str
       Name of file to be processed

    ampsec: list
       List of ampsections.  This list should have the same length as the
       number of amps in the data set.  The sections should be given
       in the format of fits_sections (see below).

    oscansec: list
       List of overscan sections.  This list should have the same length as the
       number of amps in the data set.  The sections should be given
       in the format of fits_sections (see below).

    trimsec: list
       List of overscan sections.  This list should have the same length as the
       number of amps in the data set.  The sections should be given
       in the format of fits_sections (see below).

    error: boolean
        If True, create an uncertainty array for ccd

    masterbias: None, `~numpy.ndarray`,  or `~ccdproc.CCDData`
        A materbias frame to be subtracted from ccd.

    bad_pixel_mask: None or `~numpy.ndarray`
        A bad pixel mask for the data. The bad pixel mask should be in given
        such that bad pixels havea value of 1 and good pixels a value of 0.


    flip: boolean
        If True, the image will be flipped such that the orders run from the
        bottom of the image to the top and the dispersion runs from the left
        to the right.

    rdnoise: None or `~astropy.Quantity`
        Read noise for the observations.  The read noise should be in
        `~astropy.units.electron`

    oscan_median :  bool, optional
        If true, takes the median of each line.  Otherwise, uses the mean

    oscan_model :  `~astropy.modeling.Model`, optional
        Model to fit to the data.  If None, returns the values calculated
        by the median or the mean.

    Returns
     -------
    ccd: `~ccdproc.CCDData`
        Data processed and


    Notes
    -----

    The format of the `fits_section` string follow the rules for slices that
    are consistent with the FITS standard (v3) and IRAF usage of keywords like
    TRIMSEC and BIASSEC. Its indexes are one-based, instead of the
    python-standard zero-based, and the first index is the one that increases
    most rapidly as you move through the array in memory order, opposite the
    python ordering.

    The 'fits_section' argument is provided as a convenience for those who are
    processing files that contain TRIMSEC and BIASSEC. The preferred, more
    pythonic, way of specifying the overscan is to do it by indexing the data
    array directly with the `overscan` argument.

    """
    # read in the data
    ccd = ccdproc.CCDData.read(image_name, unit=u.adu)

    try:
        namps = ccd.header['CCDAMPS']
    except KeyError:
        namps = ccd.header['CCDNAMPS']

    # thow errors for the wrong number of amps
    if len(ampsec) != namps:
        raise ValueError('Number of ampsec does not equal number of amps')
    if len(oscansec) != namps:
        raise ValueError('Number of oscansec does not equal number of amps')
    if len(trimsec) != namps:
        raise ValueError('Number of trimsec does not equal number of amps')

    if namps == 1:
        gain = float(ccd.header['gain'].split()[0]) * u.electron / u.adu
        nccd = ccd_process(ccd,
                           oscan=oscansec[0],
                           trim=trimsec[0],
                           error=error,
                           masterbias=masterbias,
                           bad_pixel_mask=bad_pixel_mask,
                           gain=gain,
                           rdnoise=rdnoise,
                           oscan_median=oscan_median,
                           oscan_model=oscan_model)
    else:
        ccd_list = []
        xsize = 0
        for i in range(namps):
            cc = ccdproc.trim_image(ccd, fits_section=ampsec[i])

            gain = float(ccd.header['gain'].split()[i]) * u.electron / u.adu
            ncc = ccd_process(cc,
                              oscan=oscansec[i],
                              trim=trimsec[i],
                              error=False,
                              masterbias=None,
                              gain=gain,
                              bad_pixel_mask=None,
                              rdnoise=rdnoise,
                              oscan_median=oscan_median,
                              oscan_model=oscan_model)
            xsize = xsize + ncc.shape[1]
            ysize = ncc.shape[0]
            ccd_list.append(ncc)

        # now recombine the processed data
        ncc = ccd_list[0]
        data = np.zeros((ysize, xsize))
        if ncc.mask is not None:
            mask = np.zeros((ysize, xsize))
        else:
            mask = None
        if ncc.uncertainty is not None:
            raise NotImplementedError(
                'Support for uncertainties not implimented yet')
        else:
            uncertainty = None

        x1 = 0
        for i in range(namps):
            x2 = x1 + ccd_list[i].data.shape[1]
            data[:, x1:x2] = ccd_list[i].data
            if mask is not None:
                mask[:, x1:x2] = ccd_list[i].mask
            x1 = x2

        nccd = ccdproc.CCDData(data,
                               unit=ncc.unit,
                               mask=mask,
                               uncertainty=uncertainty)
        nccd.header = ccd.header
        nccd = ccd_process(nccd,
                           masterbias=masterbias,
                           error=error,
                           gain=None,
                           rdnoise=rdnoise,
                           bad_pixel_mask=bad_pixel_mask)

    if flip:
        nccd.data = nccd.data[::-1, ::-1]
        if (nccd.mask is not None):
            nccd.mask = nccd.mask[::-1, ::-1]
        if (nccd.uncertainty is not None):
            nccd.uncertainty = nccd.uncertainty[::-1, ::-1]

    return nccd