Esempio n. 1
0
    def run(cls, config):
        """Execute the sky compress step, opening the input FITS file first.
        Need to override the PixCorrectImStep version since this step has no
        output version of the image.
        """
        in_fname = config.get(cls.step_name, 'in')
        try:
            ccdnum = config.getint(cls.step_name, 'ccdnum')
            image = DESImage.load(in_fname, ccdnum=ccdnum)
        except NoOptionError:
            image = DESImage.load(in_fname)

        ret_code = cls.step_run(image, config)
        return ret_code
Esempio n. 2
0
    def test_nullop_im(self):
        with temp_pixcorrect_test_dir() as temp_dir:
            config = self.new_config(temp_dir)
            self.add_nullop_im_config(config)
            pix_corrector = PixCorrectIm(config)
            logger.debug('Doing nullop_im')
            pix_corrector()

            test_im = DESImage.load( config.get('pixcorrect_im', 'out') )
            ref_im = DESImage.load( path.join(ref_dir, 'scix.fits') )
            im_cmp = ref_im.compare( test_im )
            logger.info("Nullop_Im results")
            logger.debug(str(im_cmp.header))
            im_cmp.log(logger, ref_im)
            self.assertTrue(im_cmp.match())
Esempio n. 3
0
    def test_bias(self):
        with temp_pixcorrect_test_dir() as temp_dir:
            config = self.new_config(temp_dir)
            self.add_bpm_config(config)
            self.add_bias_config(config)
            pix_corrector = PixCorrectIm(config)
            logger.info('Doing bias correction')
            pix_corrector()

            test_im = DESImage.load( config.get('pixcorrect_im', 'out') )
            ref_im = DESImage.load( path.join(ref_dir, 'post_bias.fits') )
            in_im = DESImage.load( path.join(ref_dir, 'scix.fits') )
            im_cmp = ref_im.compare( test_im )
            logger.debug(str(im_cmp.header))
            im_cmp.log(logger, ref_im)
            self.assertTrue(im_cmp.match())
Esempio n. 4
0
    def __call__(self):
        """
        Run row_interp and null_weights in one step, we run the tasks
        by calling step_run in each class
        """

        t0 = time.time()
        # Get the science image
        input_image = self.config.get(self.config_section, 'in')
        self.sci = DESImage.load(input_image)

        # Run null_weights
        t1 = time.time()
        logger.info("Running null_weights on: %s", input_image)
        null_weights.step_run(self.sci, self.config)
        logger.info("Time NullWeights : %s", elapsed_time(t1))

        # Run row_interp
        t2 = time.time()
        logger.info("Running row_interp on: %s", input_image)
        row_interp.step_run(self.sci, self.config)
        logger.info("Time RowInterp : %s", elapsed_time(t2))

        # Write out the image
        output_image = self.config.get(self.config_section, 'out')
        self.sci.save(output_image)
        logger.info("Wrote new file: %s", output_image)
        logger.info("Time Total: %s", elapsed_time(t0))

        return 0
Esempio n. 5
0
    def change_head(self, FileN, catalog, image, outname, CCD, **args):

        ccdLen = len(CCD)

        #Getting the data and saving into an array
        o = open(FileN, 'r').read().splitlines()
        info_array = ['CRVAL1','CRVAL2','CRPIX1','CRPIX2','CD1_1','CD1_2','CD2_1','CD2_2',\
                  'PV1_0','PV1_1 ','PV1_2','PV1_4','PV1_5','PV1_6','PV1_7','PV1_8','PV1_9','PV1_10',\
                  'PV2_0','PV2_1 ','PV2_2','PV2_4','PV2_5','PV2_6','PV2_7','PV2_8','PV2_9','PV2_10']

        n = len(info_array)
        matrix = []
        for ii in o:
            for oo in info_array:
                if oo in ii.split('=')[0]:
                    #print ii.split('=')[0], ii.split('=')[1].split('/')[0]
                    matrix.append(ii.split('=')[1].split('/')[0])

        matrix = np.array(matrix)

        #changing the header
        cont = 0
        for i in range(ccdLen):
            ccdstring = "%02d" % int(CCD[i])
            args['ccd'] = ccdstring
            catalog1 = self.template_file.format(
                **args) + '_' + catalog + '.fits'
            image1 = self.template_file.format(**args) + '_' + image + '.fits'

            (fwhm_, ellip, count) = self.fwhm(catalog1, 0)

            h = DESImage.load(image1)
            h.header['FWHM'] = fwhm_
            h.header['ELLIPTIC'] = ellip
            h.header['SCAMPFLG'] = 0

            im = h.data
            iterate1 = stats.sigmaclip(im, 5, 5)[0]
            iterate2 = stats.sigmaclip(iterate1, 5, 5)[0]
            iterate3 = stats.sigmaclip(iterate2, 3, 3)[0]
            skybrite = np.median(iterate3)
            skysigma = np.std(iterate3)

            h.header['SKYBRITE'] = skybrite
            h.header['SKYSIGMA'] = skysigma
            h.header['CAMSYM'] = 'D'
            h.header['SCAMPCHI'] = 0.0
            h.header['SCAMPNUM'] = 0

            for j in range(n):
                h.header[info_array[j]] = float(matrix[cont])
                cont = cont + 1

                h.save(
                    self.template_file.format(**args) + '_' + outname +
                    '.fits')
Esempio n. 6
0
def change_head(File, catalog, image, CCD, **args):

        ccdLen = len(CCD)

        #Getting the data and saving into an array
        o = open(File,'r').read().splitlines()
        info_array = ['CRVAL1','CRVAL2','CRPIX1','CRPIX2','CD1_1','CD1_2','CD2_1','CD2_2',\
                      'PV1_0','PV1_1 ','PV1_2','PV1_4','PV1_5','PV1_6','PV1_7','PV1_8','PV1_9','PV1_10',\
                      'PV2_0','PV2_1 ','PV2_2','PV2_4','PV2_5','PV2_6','PV2_7','PV2_8','PV2_9','PV2_10']

        n = len(info_array)
        matrix = []
        for ii in o:
                for oo in info_array:
                        if oo in ii.split('=')[0] :
                                #print ii.split('=')[0], ii.split('=')[1].split('/')[0]
                                matrix.append(ii.split('=')[1].split('/')[0])

        matrix = np.array(matrix)

	#changing the header
	cont = 0
        for i in range(ccdLen):
		ccdstring="%02d"%int(CCD[i])
                args['ccd']=ccdstring
                catalog1 = template_file.format(**args)+'_'+catalog+'.fits'
                image1 = template_file.format(**args)+'_'+image+'.fits'

                fwhm_, ellip, count = fwhm(catalog1)

                h=DESImage.load(image1)
                h.header['FWHM'] = fwhm_
                h.header['ELLIPTIC'] = ellip
		h.header['SCAMPFLG'] = 0		


		h1=pyfits.open(image1)
                im=h1[0].data
                iterate1=stats.sigmaclip(im,5,5)[0]
                iterate2=stats.sigmaclip(iterate1,5,5)[0]
                iterate3=stats.sigmaclip(iterate2,3,3)[0]
                skybrite=np.median(iterate3)
                skysigma=np.std(iterate3)


                h.header['SKYBRITE'] = skybrite
                h.header['SKYSIGMA'] = skysigma
                h.header['CAMSYM'] = 'D'
                h.header['SCAMPCHI'] = 0.0
                h.header['SCAMPNUM'] = 0

                for j in range(n):
                        h.header[info_array[j]] = float(matrix[cont])
			cont =  cont + 1

                h.save(template_file.format(**args)+'_wcs.fits')
Esempio n. 7
0
    def step_run(cls, image, config):
        """Customized execution for application of the Bias

        :Parameters:
            - `image`: the DESImage on which to operate
            - `flat`: the bias image to apply

        """

        flat_fname = config.get(cls.step_name, 'flat')
        logger.info('Reading flat correction from %s' % flat_fname)
        flat_im = DESImage.load(flat_fname)

        ret_code = cls.__call__(image, flat_im)
        return ret_code
    def step_run(cls, image, config):
        """Customized execution for application of the Bias

        :Parameters:
            - `image`: the DESImage on which to operate
            - `bias`: the bias image to apply

        """

        bias_fname = config.get(cls.step_name, 'bias')
        logger.info('reading Bias from %s' % bias_fname)
        bias_im = DESImage.load(bias_fname)

        ret_code = cls.__call__(image, bias_im)
        return ret_code
Esempio n. 9
0
    def step_run(cls, image, config):
        """Customized execution for taking difference between an image and a comparison

        :Parameters:
            - `image`: the DESImage on which to operate
            - `comp`: the comparison image (to be subtracted)

        """

        comp_fname = config.get(cls.step_name, 'comp')
        logger.info('reading Comparison image from %s', comp_fname)
        comp_im = DESImage.load(comp_fname)

        ret_code = cls.__call__(image, comp_im)
        return ret_code
Esempio n. 10
0
    def step_run(cls, image, config):
        """Customized execution for addition of a weight plane.

        :Parameters:
            - `image`: the DESImage on which to operate
            - `config`: the configuration from which to get other parameters

        """
        logger.info('Weight will be added to %s' % image)

        flat_fname = config.get(cls.step_name, 'flat')
        logger.info('Reading flat correction from %s' % flat_fname)
        flat = DESImage.load(flat_fname)
        ret_code = cls.__call__(image, flat)
        return ret_code
Esempio n. 11
0
def run_updateWCS(args):

    # Attempt to populate FWHM, ELLIPTIC, NFWHMCNT keywords
    if args.fwhm:
        new_record = get_fwhm_from_catalog(args.fwhm,
                                           verbose=args.verbose,
                                           debug=args.debug)
    else:
        new_record = {}

    # Populate the new record with the XML and transalte into fitsio records format
    if args.xml:
        new_record = slurp_XML(args.xml,
                               new_record,
                               verbose=args.verbose,
                               debug=args.debug,
                               translate=True)
    else:
        new_record = []

    # Read in the input fits file using despyfits.DESImage
    input_image = DESImage.load(args.input)

    # run the main header updater
    input_image = run_update(input_image,
                             headfile=args.headfile,
                             hdupcfg=args.hdupcfg,
                             verbose=args.verbose,
                             new_record=new_record)

    # if desepoch option, we add a DESPOCH record only the SCI plane
    if args.desepoch:
        desepoch_rec = {
            'name': 'DESEPOCH',
            'value': args.desepoch,
            'comment': 'DES Observing epoch'
        }
        print(f"(updateWCS): Updating DESEPOCH={args.desepoch} to SCI header")
        input_image.header.add_record(desepoch_rec)

    # Saving the image as args.output, we compute the corners at write time
    print(f"(updateWCS): Closing/Saving image --> {args.output}")
    input_image.save(args.output)
    def step_run(cls, image, config):
        """Customized execution for application of the Flat

        :Parameters:
            - `image`: the DESImage on which to operate
            - `flat`: the bias image to apply

        """

        flat_fname = config.get(cls.step_name, 'flat')
        logger.info('Reading flat correction from %s' % flat_fname)
        flat_im = DESImage.load(flat_fname)

        #       At present the only way to acquire gains is when function is run through
        #       tandem operation with gain_correct.  In the absence of having relative gains
        #       an empty dictionary is passed here.
        rel_gain_for_flat = {}

        ret_code = cls.__call__(image, flat_im, rel_gain_for_flat)
        return ret_code
Esempio n. 13
0
def sextractorPSF(name, name1, outname, filepsf, CCD, **args):
    args['ccd'] = CCD

    h = DESImage.load(template_file.format(**args) + '_' + str(name) + '.fits')
    fwhm = 0.263 * float(h.header['FWHM'])


    cmd = 'sex ' + template_file.format(**args)+'_'+name+'.fits[0]'+\
        ' -PSF_NAME ' + template_file.format(**args)+'_'+filepsf+\
        ' -c  ' + configFile2 + ' -FILTER_NAME ' + sexconvFile2 + ' -STARNNW_NAME ' +sexnnwFile + ' -CATALOG_NAME  ' + template_file.format(**args)+'_'+outname+'.fits'+\
        ' -FLAG_IMAGE '  + template_file.format(**args)+'_'+name+'.fits[1] -PARAMETERS_NAME ' + sexparamFile_2 +\
        ' -INTERP_TYPE VAR_ONLY  -INTERP_MAXXLAG 4 -INTERP_MAXYLAG 4  -SEEING_FWHM ' + str(fwhm) +\
 ' -DETECT_THRESH 1.5 -SATUR_KEY SATURATE  -CATALOG_TYPE FITS_LDAC -WEIGHT_IMAGE '+\
        template_file.format(**args)+'_'+name1+'.fits[2],'+template_file.format(**args)+'_'+name1+'.fits[2]'+\
        '  -WEIGHT_TYPE MAP_WEIGHT  -CHECKIMAGE_NAME ' + template_file.format(**args)+'_segmap.fits -CHECKIMAGE_TYPE SEGMENTATION'

    print '\n', cmd, '\n'

    retval = subprocess.call(cmd.split(), stderr=subprocess.STDOUT)
    if retval != 0:
        sys.exit(1)
    return
Esempio n. 14
0
    def step_run(cls, image, config):
        """Customized execution for sky subtraction

        :Parameters:
            - `config`: the configuration from which to get other parameters

        """

        # Passing config to the class
        cls.config = config

        if config.has_option(cls.step_name, 'fitfilename'):
            fit_filename = config.get(cls.step_name, 'fitfilename')
        else:
            fit_filename = None

        if config.has_option(cls.step_name, 'pcfilename'):
            pc_filename = config.get(cls.step_name, 'pcfilename')
        else:
            pc_filename = None

        weight = config.get(cls.step_name, 'weight')

        if config.has_option(cls.step_name, 'domefilename'):
            dome_filename = config.get(cls.step_name, 'domefilename')
            dome = DESImage.load(dome_filename)
        else:
            dome = None

        if config.has_option(cls.step_name, 'skymodel'):
            skymodel_filename = config.get(cls.step_name, 'skymodel')
        else:
            skymodel_filename = None

        logger.info('Sky fitting output to %s', image)

        ret_code = cls.__call__(image, fit_filename, pc_filename, weight, dome,
                                skymodel_filename)
        return ret_code
Esempio n. 15
0
    def __call__(self):
        """
        Run row_zipper and null_weights in one step, we run the tasks
        by calling step_run in each class
        """
        t0 = time.time()

        # Check if we want special multi-epoch weighting, and which bits we want to 'save'
        me_wgt_keepmask = get_safe_boolean('me_wgt_keepmask', self.config, self.config_section)

        # Get verbose
        try:
            verbose = self.config.get(self.config_section, 'verbose')
        except:
            verbose = False

        # Get the science image
        input_image = self.config.get(self.config_section, 'in')
        self.sci = DESImage.load(input_image)

        # In case a streak table is provided -- we proceed with the extra STREAK maskinh
        streak_file = self.config.get(self.config_section, 'streak_file')
        if os.path.exists(streak_file):
            add_width = self.config.getfloat(self.config_section, 'add_width')
            add_length = self.config.getfloat(self.config_section, 'add_length')
            max_extrapolate = self.config.getfloat(self.config_section, 'max_extrapolate')
            self.streakMask(streak_file,
                            addWidth=add_width,
                            addLength=add_length,
                            maxExtrapolate=max_extrapolate)

        # Add TILENAME and TILEID to sci header (optional) if required
        self.update_sci_header(input_image)

        # Update the header wcs if both headfile and hdupcfg are present (optional)
        self.update_wcs_header(input_image, verbose=verbose)

        # Check if want to create the custon weight for SWArp/SExtractor combination
        if me_wgt_keepmask:
            self.custom_weight(input_image)

        # Run null_weights
        t1 = time.time()
        logger.info("Running null_weights on: %s", input_image)
        null_weights.step_run(self.sci, self.config)
        logger.info("Time NullWeights : %s", elapsed_time(t1))

        # Run row_zipper
        t2 = time.time()
        logger.info("Running row_zipper on: %s", input_image)
        row_zipper.step_run(self.sci, self.config)
        logger.info("Time ZipperInterp : %s", elapsed_time(t2))

        # Null the sci image only if null_mask_sci !=0
        self.null_sci(input_image)

        output_image = self.config.get(self.config_section, 'out')
        # Special write out
        if me_wgt_keepmask:
            self.custom_write(output_image)
        else:
            self.sci.save(output_image)

        logger.info("Wrote new file: %s", output_image)
        logger.info("Time Total: %s", elapsed_time(t0))

        return 0
Esempio n. 16
0
    def __call__(self):
        """Do image-by-image pixel level corrections
        """
        # All the code here, asside from one call for each step, should
        # be assiciated with shoveling data between steps. Everything else should
        # take inside the code for its respective step.

        # Get the science image
        self.sci = DESImage.load(self.config.get('pixcorrect_cp', 'in'))

        # Bias subtraction
        if self.do_step('bias'):
            self._check_return(bias_correct(self.sci, self.bias))
        self.clean_im('bias')

        # Linearization
        if self.do_step('lincor'):
            lincor_fname = self.config.get('pixcorrect_cp', 'lincor')
            self._check_return(linearity_correct(self.sci, lincor_fname))

        # Make the mask plane and mark saturated pixels.  Note that flags
        # are set to mark saturated pixels and keep any previously existing mask bits.
        if self.do_step('bpm'):
            self._check_return(
                make_mask(self.sci, self.bpm, saturate=True, clear=False))

        flat_gaincorrect = self.config.getboolean('pixcorrect_cp',
                                                  'flat_gaincorrect')
        # get gains ahead of time so that jump can be removed from a flat with no gain correction
        gain_preserve = {}
        if flat_gaincorrect:
            tmp_gains = {}
            avg_gain = 0.0
            for amp in decaminfo.amps:
                tmp_gains[amp] = self.sci['GAIN' + amp]
                avg_gain = avg_gain + tmp_gains[amp]
            for amp in decaminfo.amps:
                gain_preserve[amp] = 2.0 * tmp_gains[amp] / avg_gain


#            print avg_gain
#            print gain_preserve

        if self.do_step('gain'):
            self._check_return(gain_correct(self.sci))

        # B/F correction
        if self.do_step('bf'):
            bf_fname = self.config.get('pixcorrect_cp', 'bf')
            self._check_return(
                bf_correct(self.sci, bf_fname, bfinfo.DEFAULT_BFMASK))

        # If done with the BPM; let python reclaim the memory
        if not self.do_step('fixcol'):
            self.clean_im('bpm')

        # Flat field
        if self.do_step('flat'):
            #            allow_mismatch = self.config.get('pixcorrect_cp','flat_gaincorrect')
            print("flat_gaincorrect: ", flat_gaincorrect)
            #            for amp in decaminfo.amps:
            #                self.flat[gain_preserve[amp]['sec']]*=gain_preserve[amp]['cor']
            self._check_return(
                flat_correct_cp(self.sci, self.flat, gain_preserve))
            if not self.do_step('sky'):
                self.clean_im('flat')

        # Fix columns
        if self.do_step('fixcols'):
            self._check_return(fix_columns(self.sci, self.bpm))
            self.clean_im('bpm')

        # Make mini-sky image
        if self.do_step('mini'):
            mini = self.config.get('pixcorrect_cp', 'mini')
            blocksize = self.config.getint('pixcorrect_cp', 'blocksize')
            self._check_return(
                sky_compress(self.sci, mini, blocksize,
                             skyinfo.DEFAULT_SKYMASK))

        # Subtract sky and make weight plane - forcing option to do "sky-only" weight
        if self.do_step('sky'):
            sky_fname = self.config.get('pixcorrect_cp', 'sky')
            fit_fname = self.config.get('pixcorrect_cp', 'skyfit')
            self._check_return(
                sky_subtract(self.sci, fit_fname, sky_fname, 'sky', self.flat))
            if not self.do_step('addweight'):
                self.clean_im('flat')

        # Star flatten
        if self.do_step('starflat'):
            self._check_return(starflat_correct(self.sci, self.starflat))
        self.clean_im('starflat')

        ### Do add_weight before null_weight step, else it will overwrite the nulls
        if self.do_step('addweight'):
            self._check_return(add_weight(self.sci, self.flat))
        self.clean_im('flat')

        # This new call should take care of both --resaturate and --null_mask
        if self.do_step('null_mask') or self.do_step('resaturate'):
            # We need to fix the step_name if we want to call 'step_run'
            null_weights.__class__.step_name = self.config_section
            logger.info("Running null_weights")
            self._check_return(null_weights.step_run(self.sci, self.config))

        out_fname = self.config.get('pixcorrect_cp', 'out')
        self.sci.save(out_fname)

        return 0
Esempio n. 17
0
    def __call__(cls,
                 streak_list,
                 image_list,
                 streak_name_in,
                 streak_name_out,
                 image_name_in,
                 image_name_out,
                 add_width,
                 max_extrapolate,
                 plotfile=None):
        """
        Read input list of streak detections and predict where a streak
        crossed a CCD but was missed.  Then create new copies of images,
        altering masks to set STREAK bit in new streaks.

        :Parameters:
            - `streak_list`: list of input streak file names
            - `image_list`: list of names of image files to be updated
            - `streak_name_in`: string to replace in input streak filenames
            - `streak_name_out`: replacement string for output streak filenames
            - `image_name_in`: string to replace in input image filenames
            - `image_name_out`: replacement string for output image filenames
            - `add_width`:  number of pixels to grow (or shrink) streak width
            - `max_extrapolate`: farthest to start a new streak from endpoint of an existing one (degrees)
            - `plotfile`: if given, a diagram of streaks is drawn into this file
        """

        logger.info('Reading {:d} streak files'.format(len(streak_list)))

        # Read in all the streak RA/Dec, into a dictionary keyed by CCDNUM,
        # which should be in the primary header.  Also save a dictionary of
        # the file names for these
        streak_corners = {}
        streak_names = {}
        for streakfile in streak_list:
            logger.info(f"Reading streak file {streakfile}")
            with fitsio.FITS(streakfile, 'r') as fits:
                ccdnum = fits[0].read_header()['CCDNUM']
                streak_names[ccdnum] = streakfile
                tab = fits[1].read()
                if len(tab) > 0:
                    streak_corners[ccdnum] = fits[1].read()['CORNERS_WCS']

        logger.info('Reading WCS from {:d} CCDs'.format(len(image_list)))

        # Read in the WCS for each CCD for which we have an image,
        # also put into dicts keyed by CCDNUM
        # Will get these directly from FITS instead of using DESImage in order
        # to save reading all of the data.
        wcs = {}
        crval1 = []
        crval2 = []
        for imgfile in image_list:
            try:
                hdr = fitsio.read_header(imgfile, 0)
                ccd = hdr['CCDNUM']
                crval1.append(hdr['CRVAL1'])
                crval2.append(hdr['CRVAL2'])
                # Due to a bug in fitsio 1.0.0rc1+0, we need to clean up the
                # header before feeding it to wcsutil and remove the 'None' and other problematic items
                for k in hdr:
                    # Try to access the item, if failed we have to remove it
                    if not k:
                        hdr.delete(k)
                        continue
                    try:
                        _ = hdr[k]
                    except:
                        logger.info(
                            "Removing keyword: {:s} from header".format(k))
                        hdr.delete(k)
                wcs[ccd] = wcsutil.WCS(hdr)
            except Exception as e:
                print(e)  ###
                logger.error('Failure reading WCS from {:s}'.format(imgfile))
                return 1

        # Determine a center for local gnomonic projection
        ra0 = np.median(crval1)
        dec0 = np.median(crval2)

        # Calculate upper and lower bounds of each CCD in the local
        # gnomonic system.
        ccd_x1 = np.zeros(63, dtype=float)
        ccd_x2 = np.zeros(63, dtype=float)
        ccd_y1 = np.zeros(63, dtype=float)
        ccd_y2 = np.zeros(63, dtype=float)

        ccd_xmin = 1.
        ccd_xmax = 2048.
        ccd_ymin = 1.
        ccd_ymax = 4096.
        ccd_corners_xpix = np.array([ccd_xmin, ccd_xmin, ccd_xmax, ccd_xmax])
        ccd_corners_ypix = np.array([ccd_ymin, ccd_ymax, ccd_ymax, ccd_ymin])
        for ccd, w in wcs.items():
            ra, dec = w.image2sky(ccd_corners_xpix, ccd_corners_ypix)
            x_corners, y_corners = gnomonic(ra, dec, ra0, dec0)
            ccd_x1[ccd] = np.min(x_corners)
            ccd_y1[ccd] = np.min(y_corners)
            ccd_x2[ccd] = np.max(x_corners)
            ccd_y2[ccd] = np.max(y_corners)

        # Now collect information on all of the streak segments that we have
        ccdnum = []
        ra_corner = []
        dec_corner = []

        for ccd, streaks in streak_corners.items():
            if ccd not in wcs:
                # Skip segments on CCDs that have no WCS
                logger.warning(
                    'No WCS found for streaks on CCD {:d}'.format(ccd))
                continue
            n1, _, _ = streaks.shape
            for i in range(n1):
                ccdnum.append(ccd)
                ra_corner.append(streaks[i, :, 0])
                dec_corner.append(streaks[i, :, 1])
        # Put streak corners into gnomonic system for this exposure
        x1, y1 = gnomonic(np.array([r[0] for r in ra_corner], dtype=float),
                          np.array([d[0] for d in dec_corner], dtype=float),
                          ra0, dec0)
        x2, y2 = gnomonic(np.array([r[1] for r in ra_corner], dtype=float),
                          np.array([d[1] for d in dec_corner], dtype=float),
                          ra0, dec0)
        x3, y3 = gnomonic(np.array([r[2] for r in ra_corner], dtype=float),
                          np.array([d[2] for d in dec_corner], dtype=float),
                          ra0, dec0)
        x4, y4 = gnomonic(np.array([r[3] for r in ra_corner], dtype=float),
                          np.array([d[3] for d in dec_corner], dtype=float),
                          ra0, dec0)
        ccdnum = np.array(ccdnum, dtype=int)

        # Describe each segmet by two endpoints at the midpoints of short sides
        # Will need to decide which is the short side
        d12 = np.hypot(x2 - x1, y2 - y1)
        d23 = np.hypot(x3 - x2, y3 - y2)
        xleft = np.where(d12 < d23, 0.5 * (x1 + x2), 0.5 * (x2 + x3))
        yleft = np.where(d12 < d23, 0.5 * (y1 + y2), 0.5 * (y2 + y3))
        xright = np.where(d12 < d23, 0.5 * (x3 + x4), 0.5 * (x4 + x1))
        yright = np.where(d12 < d23, 0.5 * (y3 + y4), 0.5 * (y4 + y1))
        dx = xright - xleft
        dy = yright - yleft
        # Calculate a width as 2x the
        # largest perp distance from a vertex to this line
        w1 = np.abs(dx * (y1 - yleft) - dy * (x1 - xleft)) / np.hypot(dx, dy)
        w2 = np.abs(dx * (y2 - yleft) - dy * (x2 - xleft)) / np.hypot(dx, dy)
        w3 = np.abs(dx * (y3 - yleft) - dy * (x3 - xleft)) / np.hypot(dx, dy)
        w4 = np.abs(dx * (y4 - yleft) - dy * (x4 - xleft)) / np.hypot(dx, dy)
        wmax = np.maximum(w1, w2)
        wmax = np.maximum(wmax, w3)
        wmax = np.maximum(wmax, w4)
        wmax = 2 * wmax

        # Rearrange so that xleft <= xright
        swapit = xright < xleft
        tmp = np.where(swapit, xleft, xright)
        xleft = np.where(swapit, xright, xleft)
        xright = np.array(tmp)
        tmp = np.where(swapit, yleft, yright)
        yleft = np.where(swapit, yright, yleft)
        yright = np.array(tmp)

        # Get the crossing points of the lines into CCDs
        xc1, xc2, yc1, yc2 = boxCross(xleft, yleft, dx, dy, ccd_x1[ccdnum],
                                      ccd_x2[ccdnum], ccd_y1[ccdnum],
                                      ccd_y2[ccdnum])

        # Get rid of segments that appear to miss their host CCDs
        miss = xc2 < xc1

        # Take 1st crossing point instead of left point if it has higher x, or vertical
        # with higher y, i.e. truncate the track segment at the edge of the CCD.
        replace = np.where(dx == 0, yc1 > yleft, xc1 > xleft)
        xc1 = np.where(replace, xc1, xleft)
        yc1 = np.where(replace, yc1, yleft)
        # Likewise truncate segment at right-hand crossing
        replace = np.where(dx == 0, yc2 < yright, xc2 < xright)
        xc2 = np.where(replace, xc2, xright)
        yc2 = np.where(replace, yc2, yright)

        # Backfill the non-intersections again - note that above
        # maneuvers will leave xc2<xc1 for streaks that miss their CCDs,
        # unless vertical ???
        xc1[miss] = 0.
        xc2[miss] = -1.

        # Get a final verdict on hit or miss
        miss = np.where(dx == 0, yc2 < yc1, xc2 < xc1)

        # Save information on all valid streaks
        xc1 = xc1[~miss]
        xc2 = xc2[~miss]
        yc1 = yc1[~miss]
        yc2 = yc2[~miss]
        wmax = wmax[~miss]
        ccdnum = ccdnum[~miss]

        # Express segments as slopes and midpoints
        dx = xc2 - xc1
        dy = yc2 - yc1
        mx = dx / np.hypot(dx, dy)
        my = dy / np.hypot(dx, dy)

        # Mark segments that are probably spurious edge detections
        EDGE_SLOPE = 2.  # Degrees from horizontal for edge streaks
        EDGE_DISTANCE = 0.005  # Max degrees from streak center to CCD edge for spurious streaks
        horizontal = np.abs(my) < np.sin(EDGE_SLOPE * np.pi / 180.)
        ymid = 0.5 * (yc1 + yc2)
        nearedge = np.logical_or(ccd_y2[ccdnum] - ymid < EDGE_DISTANCE,
                                 ymid - ccd_y1[ccdnum] < EDGE_DISTANCE)
        nearedge = np.logical_and(nearedge, horizontal)

        # Check short edges too
        vertical = np.abs(mx) < np.sin(EDGE_SLOPE * np.pi / 180.)
        xmid = 0.5 * (xc1 + xc2)
        tmp = np.logical_or(ccd_x2[ccdnum] - xmid < EDGE_DISTANCE,
                            xmid - ccd_x1[ccdnum] < EDGE_DISTANCE)
        nearedge = np.logical_or(nearedge, np.logical_and(tmp, vertical))

        # Decide which segments are "friends" of each other.
        # To be a friend, the center of each must be close
        # to the extension of the line of the other.
        # Accumulate a list of tracks, each track is a list of
        # individual streaks that are friends of friends
        tracks = []

        for i in range(len(xc1)):
            if nearedge[i]:
                continue  # Do not use edge tracks
            itstrack = [i]  # start new track with just this
            for t in tracks:
                # Search other tracks for friends
                for j in t:
                    if friends(xc1, xc2, yc1, yc2, mx, my, ccdnum, i, j):
                        itstrack += t  # Merge track
                        tracks.remove(t)  # Get rid of old one
                        break  # No need to check others
            tracks.append(itstrack)

        # Now iterate through tracks, seeing if they have missing segments
        # Create arrays to hold information on new tracks
        new_ccdnum = []
        new_xc1 = []
        new_xc2 = []
        new_yc1 = []
        new_yc2 = []
        new_ra1 = []
        new_ra2 = []
        new_dec1 = []
        new_dec2 = []
        new_width = []
        new_extrapolated = []
        new_nearest = []

        for t in tracks:
            if len(t) < 2:
                continue  # Do not extrapolate singlet tracks
            ids = np.array(
                t)  # Make an array of indices of segments in this track
            # Fit a quadratic path to the streak endpoints
            xx = np.concatenate((xc1[ids], xc2[ids]))
            yy = np.concatenate((yc1[ids], yc2[ids]))

            # If the track slope is mostly along x, then we'll have the independent
            # variable xx be x and dependent yy will be y.  But if track
            # is more vertical, then we'll look at functions x(y) instead.
            xOrder = np.median(np.abs(mx[ids])) > np.median(np.abs(my[ids]))
            if not xOrder:
                xx, yy = yy, xx

            # Record limits of detected tracks' independent variable
            xxmin = np.min(xx)
            xxmax = np.max(xx)

            # Fit a quadratic to the points, or
            # linear if only one streak
            # Allow up to nclip points to clip
            RESID_TOLERANCE = 6. / 3600.  # Clip >6" deviants
            nclip = 2
            for i in range(nclip + 1):
                if len(xx) > 2:
                    A = np.vstack((np.ones_like(xx), xx, xx * xx))
                else:
                    A = np.vstack((np.ones_like(xx), xx))
                coeffs = np.linalg.lstsq(A.T, yy, rcond=None)[0]
                resid = yy - np.dot(A.T, coeffs)
                j = np.argmax(np.abs(resid))
                if i == nclip or np.abs(resid[j]) < RESID_TOLERANCE:
                    break
                xx = np.delete(xx, j)
                yy = np.delete(yy, j)

            # Calculate the y(x1),y(x2) where tracks
            # cross the left/right of every CCD, then
            # find the ones that will cross CCD's y.

            # These are CCD bounds, with xx being the quadratic's argument
            if xOrder:
                xx1 = ccd_x1
                xx2 = ccd_x2
                yy1 = ccd_y1
                yy2 = ccd_y2
            else:
                xx1 = ccd_y1
                xx2 = ccd_y2
                yy1 = ccd_x1
                yy2 = ccd_x2

            if len(coeffs) == 2:
                A2 = np.vstack((np.ones_like(xx2), xx2)).T
                A1 = np.vstack((np.ones_like(xx1), xx1)).T
            else:
                A2 = np.vstack((np.ones_like(xx2), xx2, xx2 * xx2)).T
                A1 = np.vstack((np.ones_like(xx1), xx1, xx1 * xx1)).T

            # yyc[12] are the dependent coordinate at crossings of xx[12] bounds
            yyc1 = np.dot(A1, coeffs)
            yyc2 = np.dot(A2, coeffs)
            # Now we ask whether the y value of streak at either edge crossing
            # is in the y range of a CCD
            missed = np.logical_or(
                np.maximum(yyc1, yyc2) < yy1,
                np.minimum(yyc1, yyc2) > yy2)
            # Also skip any CCD where we already have a streak
            for iccd in ccdnum[ids]:
                missed[iccd] = True
            missed[0] = True  # There is no CCD0
            missed[61] = True  # Never use this one either, it's always dead

            # Now find intersection of new streaks with edges of their CCDs
            # Define a function for the streak path that we'll use for solving
            def poly(x, coeffs, ysolve):
                y = coeffs[0] + x * coeffs[1]
                if len(coeffs) > 2:
                    y += coeffs[2] * x * x
                return y - ysolve

            EDGE_TOLERANCE = 0.2 / 3600.  # Find x/y of edge to this accuracy (0.2 arcsec)
            for iccd in np.where(~missed)[0]:
                # This is a loop over every CCD that the track crosses but has no detected segment
                # Determine an (xx,yy) pair for its entry and exit from the CCD
                new_yy1 = yyc1[iccd]
                new_yy2 = yyc2[iccd]
                new_xx1 = xx1[iccd]
                new_xx2 = xx2[iccd]
                # left side:
                if new_yy1 < yy1[iccd]:
                    new_xx1 = newton(poly,
                                     new_xx1,
                                     args=(coeffs, yy1[iccd]),
                                     tol=EDGE_TOLERANCE)
                elif new_yy1 > yy2[iccd]:
                    new_xx1 = newton(poly,
                                     new_xx1,
                                     args=(coeffs, yy2[iccd]),
                                     tol=EDGE_TOLERANCE)
                new_yy1 = poly(new_xx1, coeffs, 0.)
                # right side
                if new_yy2 < yy1[iccd]:
                    new_xx2 = newton(poly,
                                     new_xx2,
                                     args=(coeffs, yy1[iccd]),
                                     tol=EDGE_TOLERANCE)
                elif new_yy2 > yy2[iccd]:
                    new_xx2 = newton(poly,
                                     new_xx2,
                                     args=(coeffs, yy2[iccd]),
                                     tol=EDGE_TOLERANCE)
                new_yy2 = poly(new_xx2, coeffs, 0.)
                # Does the solution lie outside the input streaks?
                extrapolated = new_xx1 < xxmin or new_xx2 > xxmax
                width = np.median(wmax[ids])

                # Calculate distance to nearest unclipped streak member
                nearest = min(np.min(np.hypot(xx - new_xx1, yy - new_yy1)),
                              np.min(np.hypot(xx - new_xx2, yy - new_yy2)))

                if not xOrder:
                    # swap xx,yy back if we had y as the independent variable
                    new_xx1, new_yy1 = new_yy1, new_xx1
                    new_xx2, new_yy2 = new_yy2, new_xx2

                # Project the coordinates back to RA, Dec
                ra1, dec1 = gnomonicInverse(new_xx1, new_yy1, ra0, dec0)
                ra2, dec2 = gnomonicInverse(new_xx2, new_yy2, ra0, dec0)

                # Append this streak to list of new ones
                new_ccdnum.append(iccd)
                new_xc1.append(new_xx1)
                new_xc2.append(new_xx2)
                new_yc1.append(new_yy1)
                new_yc2.append(new_yy2)
                new_ra1.append(ra1)
                new_ra2.append(ra2)
                new_dec1.append(dec1)
                new_dec2.append(dec2)
                new_width.append(width)
                new_extrapolated.append(extrapolated)
                new_nearest.append(nearest)

        # Make all lists into arrays
        new_ccdnum = np.array(new_ccdnum, dtype=int)
        new_xc1 = np.array(new_xc1, dtype=float)
        new_xc2 = np.array(new_xc2, dtype=float)
        new_yc1 = np.array(new_yc1, dtype=float)
        new_yc2 = np.array(new_yc2, dtype=float)
        new_ra1 = np.array(new_ra1, dtype=float)
        new_ra2 = np.array(new_ra2, dtype=float)
        new_dec1 = np.array(new_dec1, dtype=float)
        new_dec2 = np.array(new_dec2, dtype=float)
        new_width = np.array(new_width, dtype=float)
        new_extrapolated = np.array(new_extrapolated, dtype=bool)
        new_nearest = np.array(new_nearest, dtype=float)

        # Decide which new segments will be masked
        maskit = np.logical_or(~new_extrapolated,
                               new_nearest <= max_extrapolate)

        logger.info('Identified {:d} missing streak segments for masking'.format(\
                    np.count_nonzero(maskit)))

        # Make the diagnostic plot if desired
        if plotfile is not None:
            pl.figure(figsize=(6, 6))
            pl.xlim(-1.1, 1.1)
            pl.ylim(-1.1, 1.1)
            pl.gca().set_aspect('equal')

            # Draw CCD outlines and numbers
            for ccd, w in wcs.items():
                ra, dec = w.image2sky(ccd_corners_xpix, ccd_corners_ypix)
                x_corners, y_corners = gnomonic(ra, dec, ra0, dec0)
                x = x_corners.tolist()
                y = y_corners.tolist()
                x.append(x[0])
                y.append(y[0])
                pl.plot(x, y, 'k-', label=None)
                x = np.mean(x_corners)
                y = np.mean(y_corners)
                pl.text(x,
                        y,
                        str(ccd),
                        horizontalalignment='center',
                        verticalalignment='center',
                        fontsize=14)

            # Draw input streaks marked as edge
            labelled = False
            for i in np.where(nearedge)[0]:
                x = (xc1[i], xc2[i])
                y = (yc1[i], yc2[i])
                if not labelled:
                    pl.plot(x, y, 'm-', lw=2, label='edge')
                    labelled = True
                else:
                    pl.plot(x, y, 'm-', lw=2, label=None)

            # Draw linked tracks
            s = set()
            for t in tracks:
                if len(t) > 1:
                    s = s.union(set(t))
            labelled = False
            for i in s:
                x = (xc1[i], xc2[i])
                y = (yc1[i], yc2[i])
                if not labelled:
                    pl.plot(x, y, 'b-', lw=2, label='connected')
                    labelled = True
                else:
                    pl.plot(x, y, 'b-', lw=2, label=None)

            # Draw singleton tracks as those that are neither edge nor connected
            s = s.union(set(np.where(nearedge)[0]))
            single = set(range(len(xc1)))
            single = single.difference(s)
            labelled = False
            for i in single:
                x = (xc1[i], xc2[i])
                y = (yc1[i], yc2[i])
                if not labelled:
                    pl.plot(x, y, 'c-', lw=2, label='unconnected')
                    labelled = True
                else:
                    pl.plot(x, y, 'c-', lw=2, label=None)

            # Draw missed tracks that will be masked
            labelled = False
            for i in np.where(maskit)[0]:
                x = (new_xc1[i], new_xc2[i])
                y = (new_yc1[i], new_yc2[i])
                if not labelled:
                    pl.plot(x, y, 'r-', lw=2, label='new masked')
                    labelled = True
                else:
                    pl.plot(x, y, 'r-', lw=2, label=None)

            # Draw missed tracks that will not be masked
            labelled = False
            for i in np.where(~maskit)[0]:
                x = (new_xc1[i], new_xc2[i])
                y = (new_yc1[i], new_yc2[i])
                if not labelled:
                    pl.plot(x, y, 'r:', lw=2, label='new skipped')
                    labelled = True
                else:
                    pl.plot(x, y, 'r:', lw=2, label=None)

            # legend
            pl.legend(framealpha=0.3, fontsize='small')
            pl.savefig(plotfile)

        # Now accumulate pixel coordinates of corners of all new streaks to mask
        added_streak_ccds = []
        added_streak_corners = []

        for id, ccd in enumerate(new_ccdnum):
            ccd = new_ccdnum[id]
            if not maskit[id]:
                continue  # Only proceed with the ones to be masked
            # Get a pixel scale from the WCS, in arcsec/pix
            xmid = np.mean(ccd_corners_xpix)
            ymid = np.mean(ccd_corners_ypix)
            ra, dec = wcs[ccd].image2sky(xmid, ymid)
            ra2, dec2 = wcs[ccd].image2sky(xmid + 1, ymid)
            pixscale = np.hypot(
                np.cos(dec * np.pi / 180.) * (ra - ra2), dec - dec2)

            # width of streak, in pixels
            w = new_width[id] / pixscale + add_width
            if w <= 0.:
                continue  # Don't mask streaks of zero width
            # Make RA/Dec of track endpoints
            x = np.array([new_xc1[id], new_xc2[id]])
            y = np.array([new_yc1[id], new_yc2[id]])
            ra, dec = gnomonicInverse(x, y, ra0, dec0)
            # Convert to pixel coordinates
            x, y = wcs[ccd].sky2image(ra, dec)
            line = Line(x[0], y[0], x[1], y[1])
            # Create bounding rectangle of track
            corners_pix = boxTrack(line, w, ccd_xmin, ccd_xmax, ccd_ymin,
                                   ccd_ymax)
            added_streak_ccds.append(ccd)
            added_streak_corners.append(np.array(corners_pix))

        added_streak_ccds = np.array(added_streak_ccds)

        # Make new copies of streak files, adding new ones
        logger.debug('Rewriting streak files')

        for ccd, streakfile_in in streak_names.items():
            nmatch = len(re.findall(streak_name_in, streakfile_in))
            if nmatch != 1:
                logger.error('Could not update streak file named <' +
                             streakfile_in + '>')
                return 1
            streakfile_out = re.sub(streak_name_in, streak_name_out,
                                    streakfile_in)
            # Use file system to make fresh copy of table's FITS file
            shutil.copy2(streakfile_in, streakfile_out)

            # Find new streaks for this ccd
            add_ids = np.where(added_streak_ccds == ccd)[0]
            if len(add_ids) > 0:
                # Open the table and add new streaks' info
                try:
                    fits = fitsio.FITS(streakfile_out, 'rw')
                    addit = np.recarray(len(add_ids),
                                        dtype=[('LABEL', '>i4'),
                                               ('CORNERS', '>f4', (4, 2)),
                                               ('CORNERS_WCS', '>f8', (4, 2))])
                    if fits[1]['LABEL'][:]:
                        first_label = np.max(fits[1]['LABEL'][:]) + 1
                    else:
                        first_label = 1
                    addit.LABEL = np.arange(first_label,
                                            first_label + len(addit))

                    for i, id in enumerate(add_ids):
                        corners_pix = added_streak_corners[id]
                        addit.CORNERS[i] = corners_pix
                        ra, dec = wcs[ccd].image2sky(corners_pix[:, 0],
                                                     corners_pix[:, 1])
                        addit.CORNERS_WCS[i] = np.vstack((ra, dec)).T

                    fits[1].append(addit)
                    fits.close()
                except Exception as e:
                    print(e)
                    logger.error('Failure updating streak file <{:s}>'.format(
                        streakfile_out))
                    return 1

        logger.debug('Remasking images')

        for imgfile_in in image_list:
            # Make the name needed for output
            nmatch = len(re.findall(image_name_in, imgfile_in))
            if nmatch != 1:
                logger.error(
                    'Could not create output name for image file named <' +
                    imgfile_in + '>')
                return 1
            imgfile_out = re.sub(image_name_in, image_name_out, imgfile_in)

            logger.info(f"Loading image: {imgfile_in}")
            sci = DESImage.load(imgfile_in)
            ccd = sci.header['CCDNUM']

            # Find added streaks for this ccd
            add_ids = np.where(added_streak_ccds == ccd)[0]
            if len(add_ids) > 0:
                shape = sci.mask.shape
                yy, xx = np.indices(shape)
                points = np.vstack((xx.flatten(), yy.flatten())).T
                inside = None

                for id in add_ids:
                    # From Alex's immask routine: mark interior pixels
                    # for each added streak
                    v = added_streak_corners[id]
                    vertices = [(v[0, 0], v[0, 1]), (v[1, 0], v[1, 1]),
                                (v[2, 0], v[2, 1]), (v[3, 0], v[3, 1]),
                                (v[0, 0], v[0, 1])]
                    path = matplotlib.path.Path(vertices)

                    if inside is None:
                        inside = path.contains_points(points)
                    else:
                        inside = np.logical_or(inside,
                                               path.contains_points(points))

                # Make the list of masked pixels
                if inside is None:
                    ymask, xmask = np.array(0, dtype=int), np.array(0,
                                                                    dtype=int)
                else:
                    ymask, xmask = np.nonzero(inside.reshape(shape))

                sci.mask[ymask, xmask] |= parse_badpix_mask('STREAK')

            # Write something into the image header

            sci['DESCNCTS'] = time.asctime(time.localtime()) + \
                            ' Mask {:d} new streaks'.format(len(add_ids))
            #            sci['HISTORY'] = time.asctime(time.localtime()) + \
            #                             ' Mask {:d} new streaks'.format(len(add_ids))
            logger.info(f"Saving to: {imgfile_out}")
            sci.save(imgfile_out)

        logger.info('Finished connecting streaks')
        ret_code = 0
        return ret_code
Esempio n. 18
0
    def __call__(self):
        """Do image-by-image pixel level corrections
        """

        # All the code here, asside from one call for each step, should
        # be assiciated with shoveling data between steps. Everything else should
        # take inside the code for its respective step.

        # Get the science image
        self.sci = DESImage.load(self.config.get('pixcorrect_im', 'in'))

        # Bias subtraction
        if self.do_step('bias'):
            self._check_return(bias_correct(self.sci, self.bias))
        self.clean_im('bias')

        # Linearization
        if self.do_step('lincor'):
            lincor_fname = self.config.get('pixcorrect_im', 'lincor')
            self._check_return(linearity_correct(self.sci, lincor_fname))

        # Make the mask plane and mark saturated pixels.  Note that flags
        # are set to mark saturated pixels and keep any previously existing mask bits.
        if self.do_step('bpm'):
            self._check_return(
                make_mask(self.sci, self.bpm, saturate=True, clear=False))

        if self.do_step('gain'):
            self._check_return(gain_correct(self.sci))

        # If done with the BPM; let python reclaim the memory
        if not self.do_step('fixcols'):
            self.clean_im('bpm')

        # Fix columns
        if self.do_step('fixcols'):
            self._check_return(fix_columns(self.sci, self.bpm))
            self.clean_im('bpm')

        # B/F correction
        if self.do_step('bf'):
            bf_fname = self.config.get('pixcorrect_im', 'bf')
            self._check_return(
                bf_correct(self.sci, bf_fname, bfinfo.DEFAULT_BFMASK))

        # Flat field
        if self.do_step('flat'):
            self._check_return(flat_correct(self.sci, self.flat))
            if not self.do_step('sky'):
                self.clean_im('flat')

        # LightBulb
        if self.do_step('lightbulb'):
            self._check_return(lightbulb(self.sci))

        # CTI Check
        if self.do_step('cticheck'):
            self._check_return(cticheck(self.sci))

        # Make mini-sky image
        if self.do_step('mini'):
            mini = self.config.get('pixcorrect_im', 'mini')
            blocksize = self.config.getint('pixcorrect_im', 'blocksize')
            self._check_return(
                sky_compress(self.sci, mini, blocksize,
                             skyinfo.DEFAULT_SKYMASK))

        # Subtract sky and make weight plane - forcing option to do "sky-only" weight
        if self.do_step('sky'):
            sky_fname = self.config.get('pixcorrect_im', 'sky')
            fit_fname = self.config.get('pixcorrect_im', 'skyfit')
            self._check_return(
                sky_subtract(self.sci, fit_fname, sky_fname, 'sky', self.flat))
            if not self.do_step('addweight'):
                self.clean_im('flat')

        # Star flatten
        if self.do_step('starflat'):
            self._check_return(starflat_correct(self.sci, self.starflat))
        self.clean_im('starflat')

        ### Do add_weight before null_weight step, else it will overwrite the nulls
        if self.do_step('addweight'):
            self._check_return(add_weight(self.sci, self.flat))
        self.clean_im('flat')

        # This new call should take care of both --resaturate and --null_mask
        if self.do_step('null_mask') or self.do_step('resaturate'):
            # We need to fix the step_name if we want to call 'step_run'
            null_weights.__class__.step_name = self.config_section
            logger.info("Running null_weights")
            self._check_return(null_weights.step_run(self.sci, self.config))

        out_fname = self.config.get('pixcorrect_im', 'out')
        self.sci.save(out_fname)

        return 0
Esempio n. 19
0
    def __call__(cls, inlist, ccdnorm, ampborder):
        """Apply a flat field correction to an image

        :Parameters:
            - `inlist`: list of input and output flat DESImage(s) to normalize
            - `flat_im`:  the flat correction image to apply

        Applies the correction to each input and writes a separate output file.
        """

        logger.info('Initial Read of Flat Field Headers')
        #
        norm_list = []
        scalmean_list = []
        normval = None
        #
        try:
            f1 = open(inlist, 'r')
            for line in f1:
                line = line.strip()
                columns = line.split()
                if os.path.isfile(columns[0]):
                    tmp_dict = {}
                    tmp_dict['fname'] = columns[0]
                    tmp_dict['oname'] = columns[1]
                    if tmp_dict['fname'][-2:] == "fz":
                        sci_hdu = 1  # for .fz
                    else:
                        sci_hdu = 0  # for .fits (or .gz)
                    temp_fits = fitsio.FITS(tmp_dict['fname'], 'r')
                    temp_head = temp_fits[sci_hdu].read_header()
                    #
                    #                   Get the CCD number
                    #
                    try:
                        tmp_dict['ccdnum'] = int(temp_head['CCDNUM'])

                    except:
                        if ccdnorm < 1:
                            tmp_dict['ccdnum'] = -1

                        else:
                            print(
                                "Warning: image {:s} did not have a CCDNUM keyword!"
                                .format(tmp_dict['fname']))

#
#                   Get the SCALMEAN value
#
                    try:
                        tmp_dict['scalmean'] = float(temp_head['SCALMEAN'])
                    except:
                        raise ValueError(
                            "Image %s did not have a SCALMEAN keyword. Aborting!"
                            % tmp_dict['fname'])
#
#                   Finished first header census
#                   Save file info and scalmean's to a list
#
                    norm_list.append(tmp_dict)
                    scalmean_list.append(tmp_dict['scalmean'])
                    temp_fits.close()
            f1.close()
        except:
            #
            #           Input file was not present.
            #
            #            (type, value, trback)=sys.exc_info()
            #            print("{:s} {:s} {:s} \n".format(inlist,type,value))
            raise IOError("File not found.  Missing input list %s " % inlist)
#
#       All information is now present. Determine the value that will be used in normalization.
#
        if ccdnorm > 1:
            for tmp_rec in norm_list:
                if normval is None:
                    if tmp_rec['ccdnum'] == ccdnorm:
                        normval = tmp_rec['ccdnum']
                else:
                    if tmp_rec['ccdnum'] == ccdnorm:
                        print(
                            "Warning: More than one image with CCDNUM={:d} identified"
                        )
            if normval is None:
                raise ValueError(
                    "No image with CCDNUM=%d found among input list. Aborting!"
                    % ccdnorm)
            logger.info('Normaliztion: %.2f set based on value from CCD %d ',
                        normval, ccdnorm)
        else:
            a_scalmean = np.array(scalmean_list)
            normval = np.median(a_scalmean)
            logger.info(
                'Normaliztion: %.2f set based on median value of the ensemble ',
                normval)


#
#       Go ahead and normalize the set
#
        logger.info('Normalizing list')
        for tmp_record in norm_list:
            logger.info('Working on image: %s ', tmp_record['fname'])
            image = DESImage.load(tmp_record['fname'])
            nfactor = tmp_record['scalmean'] / normval
            nfactor2 = nfactor * nfactor
            logger.info(' CCD: %2d, relative normalization factor: %.5f ',
                        tmp_record['ccdnum'], nfactor)
            image.data *= nfactor
            image.weight *= nfactor2
            #
            #           Create keywords that reflect the median value of the flat on each amp.
            #
            for amp in decaminfo.amps:
                datasecn = scan_fits_section(image, 'DATASEC' + amp)
                datasecn[0] += ampborder
                datasecn[1] -= ampborder
                datasecn[2] += ampborder
                datasecn[3] -= ampborder
                image['FLATMED' + amp] = np.median(
                    image.data[datasecn[2]:datasecn[3] + 1,
                               datasecn[0]:datasecn[1] + 1])

            DESImage.save(image, tmp_record['oname'])

        logger.debug('Finished applying Flat')
        ret_code = 0

        return ret_code