Пример #1
0
    def compute_offset_crosscor(self, arrs, region, subpixshape, refine=False):
        offsets_xy = offsets_from_crosscor(arrs, region, refine=refine, order='xy')
        self.logger.debug("offsets_xy cross-corr %s", offsets_xy)
        # Offsets in numpy order, swaping
        offsets_fc = offsets_xy[:, ::-1]
        offsets_fc_t = numpy.round(offsets_fc).astype('int')

        self.logger.info('Computing relative offsets from cross-corr')
        finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)

        return finalshape, offsetsp, offsets_xy
Пример #2
0
    def compute_offset_wcs_imgs(self, imgs, baseshape, subpixshape):

        refpix = numpy.divide(numpy.array([baseshape], dtype='int'), 2).astype('float')
        offsets_xy = offsets_from_wcs_imgs(imgs, refpix)
        self.logger.debug("offsets_xy %s", offsets_xy)
        # Offsets in numpy order, swaping
        offsets_fc = offsets_xy[:, ::-1]
        offsets_fc_t = numpy.round(offsets_fc).astype('int')

        self.logger.info('Computing relative offsets')
        finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)

        return finalshape, offsetsp, refpix, offsets_xy
Пример #3
0
    def compute_size(self, images_info, baseshape, user_offsets=None):

        # Reference pixel in the center of the frame
        refpix = numpy.array([[baseshape[0] / 2.0, baseshape[1] / 2.0]])

        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]

        if user_offsets is not None:
            self.logger.info('Using offsets from parameters')
            base_ref = numpy.asarray(user_offsets)
            list_of_offsets = -(base_ref - base_ref[0])
        else:
            self.logger.info('Computing offsets from WCS information')
            with nfcom.manage_fits(img.origin
                                   for img in target_info) as images:
                list_of_offsets = offsets_from_wcs_imgs(images, refpix)

        # FIXME: I am using offsets in row/columns
        # the values are provided in XY so flip-lr
        list_of_offsets = numpy.fliplr(list_of_offsets)

        # Insert pixel offsets between frames
        for iinfo, off in zip(target_info, list_of_offsets):
            # Insert pixel offsets between frames
            iinfo.pix_offset = off

            self.logger.debug('Frame %s, offset=%s', iinfo.label, off)

        self.logger.info('Computing relative offsets')
        offsets = [iinfo.pix_offset for iinfo in target_info]
        offsets = numpy.round(offsets).astype('int')

        finalshape, offsetsp = narray.combine_shape(baseshape, offsets)
        self.logger.debug("Relative offsetsp:\n%s", offsetsp)
        self.logger.info('Shape of resized array is (NAXIS2, NAXIS1) = %s',
                         finalshape)
        return finalshape, offsetsp, refpix, list_of_offsets
Пример #4
0
    def compute_size(self, images_info, baseshape, user_offsets=None):

        # Reference pixel in the center of the frame
        refpix = numpy.array([[baseshape[0] / 2.0, baseshape[1] / 2.0]])

        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]

        if user_offsets is not None:
            self.logger.info('Using offsets from parameters')
            base_ref = numpy.asarray(user_offsets)
            list_of_offsets = -(base_ref - base_ref[0])
        else:
            self.logger.debug('Computing offsets from WCS information')
            with nfcom.manage_fits(img.origin for img in target_info) as images:
                list_of_offsets = offsets_from_wcs_imgs(images, refpix)

        # FIXME: I am using offsets in row/columns
        # the values are provided in XY so flip-lr
        list_of_offsets = numpy.fliplr(list_of_offsets)

        # Insert pixel offsets between frames
        for iinfo, off in zip(target_info, list_of_offsets):
            # Insert pixel offsets between frames
            iinfo.pix_offset = off

            self.logger.debug('Frame %s, offset=%s',
                              iinfo.label, off)

        self.logger.info('Computing relative offsets')
        offsets = [iinfo.pix_offset for iinfo in target_info]
        offsets = numpy.round(offsets).astype('int')

        finalshape, offsetsp = narray.combine_shape(baseshape, offsets)
        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)
        return finalshape, offsetsp, refpix, list_of_offsets
Пример #5
0
    def process(self,
                ri,
                window=None,
                subpix=1,
                store_intermediate=True,
                target_is_sky=True,
                stop_after=PRERED):

        numpy.seterr(divide='raise')

        # FIXME: hardcoded instrument information
        keywords = {
            'airmass': 'AIRMASS',
            'exposure': 'EXPTIME',
            'imagetype': 'IMGTYP',
            'juliandate': 'MJD-OBS',
        }
        baseshape = [2048, 2048]
        channels = FULL

        if window is None:
            window = tuple((0, siz) for siz in baseshape)

        if store_intermediate:
            pass

        # States
        sf_data = None
        state = self.BASIC
        step = 0

        try:
            niteration = ri.iterations
        except KeyError:
            niteration = 1

        while True:
            if state == self.BASIC:
                _logger.info('Basic processing')

                # Basic processing

                # FIXME: add this
                # bpm = fits.getdata(ri.master_bpm)
                # bpm_corrector = BadPixelCorrector(bpm)

                if ri.master_bias:
                    mbias = fits.getdata(ri.master_bias)
                    bias_corrector = BiasCorrector(mbias)
                else:
                    bias_corrector = IdNode()

                mdark = fits.getdata(ri.master_dark.label)
                dark_corrector = DarkCorrector(mdark)

                mflat = fits.getdata(ri.master_flat.label)
                ff_corrector = FlatFieldCorrector(mflat)

                basicflow = SerialFlow([  # bpm_corrector,
                    bias_corrector, dark_corrector, ff_corrector
                ])

                for frame in ri.obresult.frames:
                    with fits.open(frame.label, mode='update') as hdulist:
                        hdulist = basicflow(hdulist)

                if stop_after == state:
                    break
                else:
                    state = self.PRERED
            elif state == self.PRERED:
                # Shape of the window
                windowshape = tuple((i[1] - i[0]) for i in window)
                _logger.debug('Shape of window is %s', windowshape)
                # Shape of the scaled window
                subpixshape = tuple((side * subpix) for side in windowshape)

                # Scaled window region
                scalewindow = tuple(
                    slice(*(subpix * i for i in p)) for p in window)
                # Window region
                window = tuple(slice(*p) for p in window)

                scaled_chan = clip_slices(channels, window, scale=subpix)

                # Reference pixel in the center of the frame
                refpix = numpy.divide(numpy.array([baseshape], dtype='int'),
                                      2).astype('float')

                # lists of targets and sky frames
                targetframes = []
                skyframes = []

                for frame in ri.obresult.frames:

                    # Getting some metadata from FITS header
                    hdr = fits.getheader(frame.label)
                    try:
                        frame.exposure = hdr[str(keywords['exposure'])]
                        # frame.baseshape = get_image_shape(hdr)
                        frame.airmass = hdr[str(keywords['airmass'])]
                        frame.mjd = hdr[str(keywords['juliandate'])]
                    except KeyError as e:
                        raise KeyError("%s in frame %s" %
                                       (str(e), frame.label))

                    frame.baselabel = os.path.splitext(frame.label)[0]
                    frame.mask = ri.master_bpm
                    # Insert pixel offsets between frames
                    frame.objmask_data = None
                    frame.valid_target = False
                    frame.valid_sky = False
                    frame.valid_region = scalewindow
                    # FIXME: hardcode itype for the moment
                    frame.itype = 'TARGET'
                    if frame.itype == 'TARGET':
                        frame.valid_target = True
                        targetframes.append(frame)
                        if target_is_sky:
                            frame.valid_sky = True
                            skyframes.append(frame)
                    if frame.itype == 'SKY':
                        frame.valid_sky = True
                        skyframes.append(frame)

                labels = [frame.label for frame in targetframes]

                if ri.offsets is None:
                    _logger.info('Computing offsets from WCS information')

                    list_of_offsets = offsets_from_wcs(labels, refpix)
                else:
                    _logger.info('Using offsets from parameters')
                    list_of_offsets = numpy.asarray(ri.offsets)

                # Insert pixel offsets between frames
                for frame, off in zip(targetframes, list_of_offsets):

                    # Insert pixel offsets between frames
                    frame.pix_offset = off
                    frame.scaled_pix_offset = subpix * off

                    _logger.debug('Frame %s, offset=%s, scaled=%s',
                                  frame.label, off, subpix * off)

                _logger.info('Computing relative offsets')
                offsets = [(frame.scaled_pix_offset) for frame in targetframes]
                offsets = numpy.round(offsets).astype('int')
                finalshape, offsetsp = combine_shape(subpixshape, offsets)
                _logger.info('Shape of resized array is %s', finalshape)

                # Resizing target frames
                self.resize(targetframes,
                            subpixshape,
                            offsetsp,
                            finalshape,
                            window=window,
                            scale=subpix)

                if not target_is_sky:
                    for frame in skyframes:
                        frame.resized_base = frame.label
                        frame.resized_mask = frame.mask

                # superflat
                _logger.info('Step %d, superflat correction (SF)', step)
                # Compute scale factors (median)
                self.update_scale_factors(ri.obresult.frames)

                # Create superflat
                superflat = self.compute_superflat(skyframes,
                                                   channels=scaled_chan,
                                                   step=step)

                # Apply superflat
                self.figure_init(subpixshape)
                self.apply_superflat(ri.obresult.frames, superflat)

                _logger.info('Simple sky correction')
                if target_is_sky:
                    # Each frame is the closest sky frame available

                    for frame in ri.obresult.frames:
                        self.compute_simple_sky_for_frame(frame, frame)
                else:
                    self.compute_simple_sky(targetframes, skyframes)

                # Combining the frames
                _logger.info("Step %d, Combining target frames", step)

                sf_data = self.combine_frames(targetframes,
                                              extinction=ri.extinction)

                self.figures_after_combine(sf_data)

                _logger.info('Step %d, finished', step)

                if stop_after == state:
                    break
                else:
                    state = self.CHECKRED
            elif state == self.CHECKRED:

                seeing_fwhm = None

                # self.check_position(images_info, sf_data, seeing_fwhm)
                recompute = False
                if recompute:
                    _logger.info('Recentering is needed')
                    state = self.PRERED
                else:
                    _logger.info('Recentering is not needed')
                    _logger.info('Checking photometry')
                    check_photometry(targetframes,
                                     sf_data,
                                     seeing_fwhm,
                                     figure=self._figure)

                    if stop_after == state:
                        break
                    else:
                        state = self.FULLRED
            elif state == self.FULLRED:

                # Generating segmentation image
                _logger.info('Step %d, generating segmentation image', step)
                objmask, seeing_fwhm = self.create_mask(sf_data,
                                                        seeing_fwhm,
                                                        step=step)
                step += 1
                # Update objects mask
                # For all images
                # FIXME:
                for frame in targetframes:
                    frame.objmask = name_object_mask(frame.baselabel, step)
                    _logger.info('Step %d, create object mask %s', step,
                                 frame.objmask)
                    frame.objmask_data = objmask[frame.valid_region]
                    fits.writeto(frame.objmask,
                                 frame.objmask_data,
                                 clobber=True)

                if not target_is_sky:
                    # Empty object mask for sky frames
                    bogus_objmask = numpy.zeros(windowshape, dtype='int')

                    for frame in skyframes:
                        frame.objmask_data = bogus_objmask

                _logger.info('Step %d, superflat correction (SF)', step)

                # Compute scale factors (median)
                self.update_scale_factors(ri.obresult.frames, step)

                # Create superflat
                superflat = self.compute_superflat(skyframes,
                                                   scaled_chan,
                                                   segmask=objmask,
                                                   step=step)

                # Apply superflat
                self.figure_init(subpixshape)

                self.apply_superflat(ri.obresult.frames,
                                     superflat,
                                     step=step,
                                     save=True)

                _logger.info('Step %d, advanced sky correction (SC)', step)
                self.compute_advanced_sky(targetframes,
                                          objmask,
                                          skyframes=skyframes,
                                          target_is_sky=target_is_sky,
                                          step=step)

                # Combining the images
                _logger.info("Step %d, Combining the images", step)
                # FIXME: only for science
                sf_data = self.combine_frames(targetframes,
                                              ri.extinction,
                                              step=step)
                self.figures_after_combine(sf_data)

                if step >= niteration:
                    state = self.COMPLETE
            else:
                break

        if sf_data is None:
            raise RecipeError(
                'no combined image has been generated at step %d', state)

        hdu = fits.PrimaryHDU(sf_data[0])
        hdr = hdu.header
        hdr.update('NUMXVER', __version__, 'Numina package version')
        hdr.update('NUMRNAM', self.__class__.__name__, 'Numina recipe name')
        hdr.update('NUMRVER', self.__version__, 'Numina recipe version')

        hdr.update('FILENAME', 'result.fits')
        hdr.update('IMGTYP', 'TARGET', 'Image type')
        hdr.update('NUMTYP', 'TARGET', 'Data product type')

        varhdu = fits.ImageHDU(sf_data[1], name='VARIANCE')
        num = fits.ImageHDU(sf_data[2], name='MAP')

        result = fits.HDUList([hdu, varhdu, num])

        _logger.info("Final frame created")

        return DataFrame(result), SourcesCatalog()
Пример #6
0
    def run_single(self, rinput):

        # Open all images
        obresult = rinput.obresult

        data_hdul = []
        for f in obresult.frames:
            img = f.open()
            data_hdul.append(img)

        use_errors = True
        # Initial checks
        baseimg = data_hdul[0]
        has_num_ext = 'NUM' in baseimg
        has_bpm_ext = 'BPM' in baseimg
        baseshape = baseimg[0].shape
        subpixshape = baseshape
        base_header = baseimg[0].header
        compute_sky = 'NUM-SK' not in base_header
        compute_sky_advanced = False

        self.logger.debug('base image is: %s',
                          self.datamodel.get_imgid(baseimg))
        self.logger.debug('images have NUM extension: %s', has_num_ext)
        self.logger.debug('images have BPM extension: %s', has_bpm_ext)
        self.logger.debug('compute sky is needed: %s', compute_sky)

        if compute_sky:
            sky_result = self.compute_sky_simple(data_hdul, use_errors=False)
            sky_data = sky_result[0].data
            self.logger.debug('sky image has shape %s', sky_data.shape)

            self.logger.info('sky correction in individual images')
            corrector = SkyCorrector(
                sky_data,
                self.datamodel,
                calibid=self.datamodel.get_imgid(sky_result))
            # If we do not update keyword SKYADD
            # there is no sky subtraction
            for m in data_hdul:
                m[0].header['SKYADD'] = True
            # this is a little hackish
            data_hdul_s = [corrector(m) for m in data_hdul]
            # data_arr_s = [m[0].data - sky_data for m in data_hdul]
            base_header = data_hdul_s[0][0].header
        else:
            sky_result = None
            data_hdul_s = data_hdul

        self.logger.info('Computing offsets from WCS information')

        finalshape, offsetsp, refpix, offset_xy0 = self.compute_offset_wcs_imgs(
            data_hdul_s, baseshape, subpixshape)

        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)

        # Resizing target imgs
        data_arr_sr, regions = resize_arrays([m[0].data for m in data_hdul_s],
                                             subpixshape,
                                             offsetsp,
                                             finalshape,
                                             fill=1)

        if self.intermediate_results:
            self.logger.debug('save resized intermediate img')
            for idx, arr_r in enumerate(data_arr_sr):
                self.save_intermediate_array(arr_r, 'interm_%s.fits' % idx)

        compute_cross_offsets = True
        if compute_cross_offsets:
            try:
                self.logger.debug("Compute cross-correlation of images")
                regions = self.compute_regions(finalshape,
                                               box=200,
                                               corners=True)

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    data_arr_sr, regions, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = offsets_xy_t[:, ::-1]
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s", offsetsp)
                self.logger.info('Shape of resized array (crosscorr) is %s',
                                 finalshape)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                data_arr_sr, regions = resize_arrays(
                    [m[0].data for m in data_hdul_s],
                    subpixshape,
                    offsetsp,
                    finalshape,
                    fill=1)

                if self.intermediate_results:
                    self.logger.debug('save resized intermediate2 img')
                    for idx, arr_r in enumerate(data_arr_sr):
                        self.save_intermediate_array(arr_r,
                                                     'interm2_%s.fits' % idx)

            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        if has_num_ext:
            self.logger.debug('Using NUM extension')
            masks = [
                numpy.where(m['NUM'].data, 0, 1).astype('int16')
                for m in data_hdul
            ]
        elif has_bpm_ext:
            self.logger.debug('Using BPM extension')
            #
            masks = [
                numpy.where(m['BPM'].data, 1, 0).astype('int16')
                for m in data_hdul
            ]
        else:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            masks = [false_mask for _ in data_arr_sr]

        self.logger.debug('resize bad pixel masks')
        mask_arr_r, _ = resize_arrays(masks,
                                      subpixshape,
                                      offsetsp,
                                      finalshape,
                                      fill=1)

        # Position of refpixel in final image
        refpix_final = refpix + offsetsp[0]
        self.logger.info('Position of refpixel in final image %s',
                         refpix_final)

        self.logger.info('Combine target images (final)')
        method = combine.median
        out = method(data_arr_sr, masks=mask_arr_r, dtype='float32')

        self.logger.debug('create result image')
        hdu = fits.PrimaryHDU(out[0], header=base_header)
        self.logger.debug('update result header')
        hdr = hdu.header
        self.set_base_headers(hdr)

        hdr['TSUTC2'] = data_hdul[-1][0].header['TSUTC2']
        # Update obsmode in header

        hdu.header['history'] = "Combined %d images using '%s'" % (
            len(data_hdul), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        # Update NUM-NCOM, sum of individual imagess
        ncom = 0
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))
            ncom += img[0].header.get('NUM-NCOM', 1)
        hdr['NUM-NCOM'] = ncom
        # Update WCS, approximate solution
        hdr['CRPIX1'] += offsetsp[0][0]
        hdr['CRPIX2'] += offsetsp[0][1]

        #
        if use_errors:
            varhdu = fits.ImageHDU(out[1], name='VARIANCE')
            num = fits.ImageHDU(out[2], name='MAP')
            hdulist = fits.HDUList([hdu, varhdu, num])
        else:
            hdulist = fits.HDUList([hdu])

        result = self.create_result(frame=hdulist, sky=sky_result)
        self.logger.info('end of dither recipe')
        return result
Пример #7
0
    def run_single(self, rinput):

        # FIXME: remove this, is deprecated

        obresult = rinput.obresult

        # just in case images are in result, instead of frames
        if not obresult.frames:
            frames = obresult.results
        else:
            frames = obresult.frames

        img_info = []
        data_hdul = []
        for f in frames:
            img = f.open()
            data_hdul.append(img)
            info = {}
            info['tstamp'] = img[0].header['tstamp']
            info['airmass'] = img[0].header['airmass']
            img_info.append(info)

        channels = FULL

        use_errors = True
        # Initial checks
        baseimg = data_hdul[0]
        has_num_ext = 'NUM' in baseimg
        has_bpm_ext = 'BPM' in baseimg
        baseshape = baseimg[0].shape
        subpixshape = baseshape
        base_header = baseimg[0].header
        compute_sky = 'NUM-SK' not in base_header
        compute_sky_advanced = False

        self.logger.debug('base image is: %s',
                          self.datamodel.get_imgid(baseimg))
        self.logger.debug('images have NUM extension: %s', has_num_ext)
        self.logger.debug('images have BPM extension: %s', has_bpm_ext)
        self.logger.debug('compute sky is needed: %s', compute_sky)

        if compute_sky:
            self.logger.info('compute sky simple')
            sky_result = self.compute_sky_simple(data_hdul, use_errors=False)
            self.save_intermediate_img(sky_result, 'sky_init.fits')
            sky_result.writeto('sky_init.fits', overwrite=True)
            sky_data = sky_result[0].data
            self.logger.debug('sky image has shape %s', sky_data.shape)

            self.logger.info('sky correction in individual images')
            corrector = proc.SkyCorrector(
                sky_data,
                self.datamodel,
                calibid=self.datamodel.get_imgid(sky_result))
            # If we do not update keyword SKYADD
            # there is no sky subtraction
            for m in data_hdul:
                m[0].header['SKYADD'] = True
            # this is a little hackish
            # sky corrected
            data_hdul_s = [corrector(m) for m in data_hdul]
            base_header = data_hdul_s[0][0].header
        else:
            sky_result = None
            data_hdul_s = data_hdul

        self.logger.info('Computing offsets from WCS information')

        finalshape, offsetsp, refpix, offset_xy0 = self.compute_offset_wcs_imgs(
            data_hdul_s, baseshape, subpixshape)

        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)

        # Resizing target imgs
        data_arr_sr, regions = narray.resize_arrays(
            [m[0].data for m in data_hdul_s],
            subpixshape,
            offsetsp,
            finalshape,
            fill=1)

        if has_num_ext:
            self.logger.debug('Using NUM extension')
            masks = [
                numpy.where(m['NUM'].data, 0, 1).astype('int16')
                for m in data_hdul
            ]
        elif has_bpm_ext:
            self.logger.debug('Using BPM extension')
            #
            masks = [
                numpy.where(m['BPM'].data, 1, 0).astype('int16')
                for m in data_hdul
            ]
        else:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            masks = [false_mask for _ in data_arr_sr]

        self.logger.debug('resize bad pixel masks')
        mask_arr_r, _ = narray.resize_arrays(masks,
                                             subpixshape,
                                             offsetsp,
                                             finalshape,
                                             fill=1)

        if self.intermediate_results:
            self.logger.debug('save resized intermediate img')
            for idx, arr_r in enumerate(data_arr_sr):
                self.save_intermediate_array(arr_r, 'interm1_%03d.fits' % idx)

        hdulist = self.combine2(data_arr_sr, mask_arr_r, data_hdul, offsetsp,
                                use_errors)

        self.save_intermediate_img(hdulist, 'result_initial1.fits')

        compute_cross_offsets = True
        if compute_cross_offsets:

            self.logger.debug("Compute cross-correlation of images")
            # regions_c = self.compute_regions(finalshape, box=200, corners=True)

            # Regions frm bright objects
            regions_c = self.compute_regions_from_objs(hdulist[0].data,
                                                       finalshape,
                                                       box=20)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    data_arr_sr, regions_c, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = offsets_xy_t[:, ::-1]
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape, offsetsp = narray.combine_shape(
                    subpixshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s", offsetsp)
                self.logger.info('Shape of resized array (crosscorr) is %s',
                                 finalshape)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                data_arr_sr, regions = narray.resize_arrays(
                    [m[0].data for m in data_hdul_s],
                    subpixshape,
                    offsetsp,
                    finalshape,
                    fill=1)

                if self.intermediate_results:
                    self.logger.debug('save resized intermediate2 img')
                    for idx, arr_r in enumerate(data_arr_sr):
                        self.save_intermediate_array(arr_r,
                                                     'interm2_%03d.fits' % idx)

                self.logger.debug('resize bad pixel masks')
                mask_arr_r, _ = narray.resize_arrays(masks,
                                                     subpixshape,
                                                     offsetsp,
                                                     finalshape,
                                                     fill=1)

                hdulist = self.combine2(data_arr_sr, mask_arr_r, data_hdul,
                                        offsetsp, use_errors)

                self.save_intermediate_img(hdulist, 'result_initial2.fits')
            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        catalog, objmask = self.create_object_catalog(hdulist[0].data,
                                                      border=50)

        data_arr_sky = [sky_result[0].data for _ in data_arr_sr]
        data_arr_0 = [(d[r] + s)
                      for d, r, s in zip(data_arr_sr, regions, data_arr_sky)]
        data_arr_r = [d.copy() for d in data_arr_sr]

        for inum in range(1, rinput.iterations + 1):
            # superflat
            sf_data = self.compute_superflat(data_arr_0, objmask, regions,
                                             channels)
            fits.writeto('superflat_%d.fits' % inum, sf_data, overwrite=True)
            # apply superflat
            data_arr_rf = data_arr_r
            for base, arr, reg in zip(data_arr_rf, data_arr_0, regions):
                arr_f = arr / sf_data
                #arr_f = arr
                base[reg] = arr_f

            # compute sky advanced
            data_arr_sky = []
            data_arr_rfs = []
            self.logger.info('Step %d, SC: computing advanced sky', inum)
            scale = rinput.sky_images_sep_time * 60
            tstamps = numpy.array([info['tstamp'] for info in img_info])
            for idx, hdu in enumerate(data_hdul):
                diff1 = tstamps - tstamps[idx]
                idxs1 = (diff1 > 0) & (diff1 < scale)
                idxs2 = (diff1 < 0) & (diff1 > -scale)
                l1, = numpy.nonzero(idxs1)
                l2, = numpy.nonzero(idxs2)
                limit1 = l1[-rinput.sky_images:]
                limit2 = l2[:rinput.sky_images]
                len_l1 = len(limit1)
                len_l2 = len(limit2)
                self.logger.info('For image %s, using %d-%d images)', idx,
                                 len_l1, len_l2)
                if len_l1 + len_l2 == 0:
                    self.logger.error('No sky image available for frame %d',
                                      idx)
                    raise ValueError('No sky image')
                skydata = []
                skymasks = []
                skyscales = []
                my_region = regions[idx]
                my_sky_scale = numpy.median(data_arr_rf[idx][my_region])
                for i in numpy.concatenate((limit1, limit2)):
                    region_s = regions[i]
                    data_s = data_arr_rf[i][region_s]
                    mask_s = objmask[region_s]
                    scale_s = numpy.median(data_s)
                    skydata.append(data_s)
                    skymasks.append(mask_s)
                    skyscales.append(scale_s)
                self.logger.debug('computing background with %d frames',
                                  len(skydata))
                sky, _, num = nacom.median(skydata, skymasks, scales=skyscales)
                # rescale
                sky *= my_sky_scale

                binmask = num == 0

                if numpy.any(binmask):
                    # We have pixels without
                    # sky background information
                    self.logger.warn(
                        'pixels without sky information when correcting %d',
                        idx)

                    # FIXME: during development, this is faster
                    # sky[binmask] = sky[num != 0].mean()
                    # To continue we interpolate over the patches
                    narray.fixpix2(sky, binmask, out=sky, iterations=1)

                name = 'sky_%d_%03d.fits' % (inum, idx)
                fits.writeto(name, sky, overwrite=True)
                name = 'sky_binmask_%d_%03d.fits' % (inum, idx)
                fits.writeto(name, binmask.astype('int16'), overwrite=True)

                data_arr_sky.append(sky)
                arr = numpy.copy(data_arr_rf[idx])
                arr[my_region] = data_arr_rf[idx][my_region] - sky
                data_arr_rfs.append(arr)
                # subtract sky advanced

            if self.intermediate_results:
                self.logger.debug('save resized intermediate img')
                for idx, arr_r in enumerate(data_arr_rfs):
                    self.save_intermediate_array(
                        arr_r, 'interm_%d_%03d.fits' % (inum, idx))

            hdulist = self.combine2(data_arr_rfs, mask_arr_r, data_hdul,
                                    offsetsp, use_errors)

            self.save_intermediate_img(hdulist, 'result_%d.fits' % inum)

            # For next step
            catalog, objmask = self.create_object_catalog(hdulist[0].data,
                                                          border=50)

            data_arr_0 = [
                (d[r] + s)
                for d, r, s in zip(data_arr_rfs, regions, data_arr_sky)
            ]
            data_arr_r = [d.copy() for d in data_arr_rfs]

        result = self.create_result(frame=hdulist)
        self.logger.info('end of dither recipe')
        return result
Пример #8
0
    def run(self, rinput):

        target_is_sky = True
        obresult = rinput.obresult
        sky_images = rinput.sky_images
        sky_images_sep_time = rinput.sky_images_sep_time
        baseshape = (2048, 2048)
        user_offsets = rinput.offsets
        extinction = rinput.extinction

        images_info = self.initial_classification(obresult, target_is_sky)

        # Resizing target frames
        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]
        finalshape, offsetsp, refpix, offset_fc0 = self.compute_size(
            target_info, baseshape, user_offsets)

        self.resize(target_info, baseshape, offsetsp, finalshape)

        result = self.process_basic(images_info,
                                    target_is_sky=target_is_sky,
                                    extinction=extinction)

        if rinput.refine_offsets:
            self.logger.debug("Compute cross-correlation of images")
            # regions_c = self.compute_regions(finalshape, box=200, corners=True)

            # Regions frm bright objects
            regions_c = self.compute_regions_from_objs(result[0].data,
                                                       finalshape,
                                                       box=40)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    images_info, regions_c, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offset_xy0 = numpy.fliplr(offset_fc0)
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = numpy.fliplr(offsets_xy_t)
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape2, offsetsp2 = narray.combine_shape(
                    baseshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s",
                                  offsetsp2)
                self.logger.info('Shape of resized array (crosscorr) is %s',
                                 finalshape2)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                self.resize(target_info, baseshape, offsetsp2, finalshape2)
            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        result = self.process_basic(images_info,
                                    target_is_sky=target_is_sky,
                                    extinction=extinction)

        step = 1

        while step <= rinput.iterations:
            result = self.process_advanced(images_info,
                                           result,
                                           step,
                                           target_is_sky,
                                           maxsep=sky_images_sep_time,
                                           nframes=sky_images,
                                           extinction=extinction)
            step += 1

        return self.create_result(result_image=result)
Пример #9
0
    def run_single(self, rinput):

        # FIXME: remove this, is deprecated

        obresult = rinput.obresult

        # just in case images are in result, instead of frames
        if not obresult.frames:
            frames = obresult.results
        else:
            frames = obresult.frames

        img_info = []
        data_hdul = []
        for f in frames:
            img = f.open()
            data_hdul.append(img)
            info = {}
            info['tstamp'] = img[0].header['tstamp']
            info['airmass'] = img[0].header['airmass']
            img_info.append(info)

        channels = FULL

        use_errors = True
        # Initial checks
        baseimg = data_hdul[0]
        has_num_ext = 'NUM' in baseimg
        has_bpm_ext = 'BPM' in baseimg
        baseshape = baseimg[0].shape
        subpixshape = baseshape
        base_header = baseimg[0].header
        compute_sky = 'NUM-SK' not in base_header
        compute_sky_advanced = False

        self.logger.debug('base image is: %s', self.datamodel.get_imgid(baseimg))
        self.logger.debug('images have NUM extension: %s', has_num_ext)
        self.logger.debug('images have BPM extension: %s', has_bpm_ext)
        self.logger.debug('compute sky is needed: %s', compute_sky)

        if compute_sky:
            self.logger.info('compute sky simple')
            sky_result = self.compute_sky_simple(data_hdul, use_errors=False)
            self.save_intermediate_img(sky_result, 'sky_init.fits')
            sky_result.writeto('sky_init.fits', overwrite=True)
            sky_data = sky_result[0].data
            self.logger.debug('sky image has shape %s', sky_data.shape)

            self.logger.info('sky correction in individual images')
            corrector = proc.SkyCorrector(
                sky_data,
                self.datamodel,
                calibid=self.datamodel.get_imgid(sky_result)
            )
            # If we do not update keyword SKYADD
            # there is no sky subtraction
            for m in data_hdul:
                m[0].header['SKYADD'] = True
            # this is a little hackish
            # sky corrected
            data_hdul_s = [corrector(m) for m in data_hdul]
            base_header = data_hdul_s[0][0].header
        else:
            sky_result = None
            data_hdul_s = data_hdul

        self.logger.info('Computing offsets from WCS information')

        finalshape, offsetsp, refpix, offset_xy0 = self.compute_offset_wcs_imgs(
            data_hdul_s,
            baseshape,
            subpixshape
        )

        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)

        # Resizing target imgs
        data_arr_sr, regions = narray.resize_arrays(
            [m[0].data for m in data_hdul_s],
            subpixshape,
            offsetsp,
            finalshape,
            fill=1
        )

        if has_num_ext:
            self.logger.debug('Using NUM extension')
            masks = [numpy.where(m['NUM'].data, 0, 1).astype('int16') for m in data_hdul]
        elif has_bpm_ext:
            self.logger.debug('Using BPM extension')
            #
            masks = [numpy.where(m['BPM'].data, 1, 0).astype('int16') for m in data_hdul]
        else:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            masks = [false_mask for _ in data_arr_sr]

        self.logger.debug('resize bad pixel masks')
        mask_arr_r, _ = narray.resize_arrays(masks, subpixshape, offsetsp, finalshape, fill=1)

        if self.intermediate_results:
            self.logger.debug('save resized intermediate img')
            for idx, arr_r in enumerate(data_arr_sr):
                self.save_intermediate_array(arr_r, 'interm1_%03d.fits' % idx)

        hdulist = self.combine2(data_arr_sr, mask_arr_r, data_hdul, offsetsp, use_errors)

        self.save_intermediate_img(hdulist, 'result_initial1.fits')

        compute_cross_offsets = True
        if compute_cross_offsets:

            self.logger.debug("Compute cross-correlation of images")
            # regions_c = self.compute_regions(finalshape, box=200, corners=True)

            # Regions frm bright objects
            regions_c = self.compute_regions_from_objs(hdulist[0].data, finalshape, box=20)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    data_arr_sr, regions_c, refine=True, tol=1
                )
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = offsets_xy_t[:, ::-1]
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape, offsetsp = narray.combine_shape(subpixshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s", offsetsp)
                self.logger.info('Shape of resized array (crosscorr) is %s', finalshape)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                data_arr_sr, regions = narray.resize_arrays(
                    [m[0].data for m in data_hdul_s],
                    subpixshape,
                    offsetsp,
                    finalshape,
                    fill=1
                )

                if self.intermediate_results:
                    self.logger.debug('save resized intermediate2 img')
                    for idx, arr_r in enumerate(data_arr_sr):
                        self.save_intermediate_array(arr_r, 'interm2_%03d.fits' % idx)

                self.logger.debug('resize bad pixel masks')
                mask_arr_r, _ = narray.resize_arrays(masks, subpixshape, offsetsp, finalshape, fill=1)

                hdulist = self.combine2(data_arr_sr, mask_arr_r, data_hdul, offsetsp, use_errors)

                self.save_intermediate_img(hdulist, 'result_initial2.fits')
            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s', error)


        catalog, objmask = self.create_object_catalog(hdulist[0].data, border=50)

        data_arr_sky = [sky_result[0].data for _ in data_arr_sr]
        data_arr_0 = [(d[r] + s) for d, r, s in zip(data_arr_sr, regions, data_arr_sky)]
        data_arr_r = [d.copy() for d in data_arr_sr]

        for inum in range(1, rinput.iterations + 1):
            # superflat
            sf_data = self.compute_superflat(data_arr_0, objmask, regions, channels)
            fits.writeto('superflat_%d.fits' % inum, sf_data, overwrite=True)
            # apply superflat
            data_arr_rf = data_arr_r
            for base, arr, reg in zip(data_arr_rf, data_arr_0, regions):
                arr_f = arr / sf_data
                #arr_f = arr
                base[reg] = arr_f

            # compute sky advanced
            data_arr_sky = []
            data_arr_rfs = []
            self.logger.info('Step %d, SC: computing advanced sky', inum)
            scale = rinput.sky_images_sep_time * 60
            tstamps = numpy.array([info['tstamp'] for info in img_info])
            for idx, hdu in enumerate(data_hdul):
                diff1 = tstamps - tstamps[idx]
                idxs1 = (diff1 > 0) & (diff1 < scale)
                idxs2 = (diff1 < 0) & (diff1 > -scale)
                l1, = numpy.nonzero(idxs1)
                l2, = numpy.nonzero(idxs2)
                limit1 = l1[-rinput.sky_images:]
                limit2 = l2[:rinput.sky_images]
                len_l1 =len(limit1)
                len_l2 = len(limit2)
                self.logger.info('For image %s, using %d-%d images)', idx,
                                 len_l1, len_l2)
                if len_l1 + len_l2 == 0:
                    self.logger.error(
                        'No sky image available for frame %d', idx)
                    raise ValueError('No sky image')
                skydata = []
                skymasks = []
                skyscales = []
                my_region = regions[idx]
                my_sky_scale = numpy.median(data_arr_rf[idx][my_region])
                for i in numpy.concatenate((limit1, limit2)):
                    region_s = regions[i]
                    data_s = data_arr_rf[i][region_s]
                    mask_s = objmask[region_s]
                    scale_s = numpy.median(data_s)
                    skydata.append(data_s)
                    skymasks.append(mask_s)
                    skyscales.append(scale_s)
                self.logger.debug('computing background with %d frames', len(skydata))
                sky, _, num = nacom.median(skydata, skymasks, scales=skyscales)
                # rescale
                sky *= my_sky_scale

                binmask = num == 0

                if numpy.any(binmask):
                    # We have pixels without
                    # sky background information
                    self.logger.warn('pixels without sky information when correcting %d',
                                 idx)

                    # FIXME: during development, this is faster
                    # sky[binmask] = sky[num != 0].mean()
                    # To continue we interpolate over the patches
                    narray.fixpix2(sky, binmask, out=sky, iterations=1)

                name = 'sky_%d_%03d.fits' % (inum, idx)
                fits.writeto(name, sky, overwrite=True)
                name = 'sky_binmask_%d_%03d.fits' % (inum, idx)
                fits.writeto(name, binmask.astype('int16'), overwrite=True)

                data_arr_sky.append(sky)
                arr = numpy.copy(data_arr_rf[idx])
                arr[my_region] = data_arr_rf[idx][my_region] - sky
                data_arr_rfs.append(arr)
                # subtract sky advanced

            if self.intermediate_results:
                self.logger.debug('save resized intermediate img')
                for idx, arr_r in enumerate(data_arr_rfs):
                    self.save_intermediate_array(arr_r, 'interm_%d_%03d.fits' % (inum, idx))


            hdulist = self.combine2(data_arr_rfs, mask_arr_r, data_hdul, offsetsp, use_errors)

            self.save_intermediate_img(hdulist, 'result_%d.fits' % inum)

            # For next step
            catalog, objmask = self.create_object_catalog(hdulist[0].data, border=50)

            data_arr_0 = [(d[r] + s) for d, r, s in zip(data_arr_rfs, regions, data_arr_sky)]
            data_arr_r = [d.copy() for d in data_arr_rfs]

        result = self.create_result(frame=hdulist)
        self.logger.info('end of dither recipe')
        return result
Пример #10
0
    def run(self, rinput):

        target_is_sky = True
        obresult = rinput.obresult
        sky_images = rinput.sky_images
        sky_images_sep_time = rinput.sky_images_sep_time
        baseshape = (2048, 2048)
        user_offsets = rinput.offsets
        extinction = rinput.extinction

        images_info = self.initial_classification(obresult, target_is_sky)

        # Resizing target frames
        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]
        finalshape, offsetsp, refpix, offset_fc0 = self.compute_size(
            target_info, baseshape, user_offsets
        )

        self.resize(target_info, baseshape, offsetsp, finalshape)

        result = self.process_basic(images_info, target_is_sky=target_is_sky,
                                     extinction=extinction)

        if rinput.refine_offsets:
            self.logger.debug("Compute cross-correlation of images")
            # regions_c = self.compute_regions(finalshape, box=200, corners=True)

            # Regions frm bright objects
            regions_c = self.compute_regions_from_objs(result[0].data, finalshape, box=40)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    images_info, regions_c, refine=True, tol=1
                )
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offset_xy0 = numpy.fliplr(offset_fc0)
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = numpy.fliplr(offsets_xy_t)
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape2, offsetsp2 = narray.combine_shape(baseshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s", offsetsp2)
                self.logger.info('Shape of resized array (crosscorr) is %s', finalshape2)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                self.resize(target_info, baseshape, offsetsp2, finalshape2)
            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s', error)

        result = self.process_basic(images_info, target_is_sky=target_is_sky,
                                     extinction=extinction)

        step = 1

        while step <= rinput.iterations:
            result = self.process_advanced(
                images_info, result, step, target_is_sky,
                maxsep=sky_images_sep_time, nframes=sky_images,
                extinction=extinction
            )
            step += 1

        return self.create_result(result_image=result)
Пример #11
0
    def run_single(self, rinput):

        # Open all images
        obresult = rinput.obresult

        data_hdul = []
        for f in obresult.frames:
            img = f.open()
            data_hdul.append(img)

        use_errors = True
        # Initial checks
        baseimg = data_hdul[0]
        has_num_ext = 'NUM' in baseimg
        has_bpm_ext = 'BPM' in baseimg
        baseshape = baseimg[0].shape
        subpixshape = baseshape
        base_header = baseimg[0].header
        compute_sky = 'NUM-SK' not in base_header
        compute_sky_advanced = False

        self.logger.debug('base image is: %s',
                          self.datamodel.get_imgid(baseimg))
        self.logger.debug('images have NUM extension: %s', has_num_ext)
        self.logger.debug('images have BPM extension: %s', has_bpm_ext)
        self.logger.debug('compute sky is needed: %s', compute_sky)

        if compute_sky:
            self.logger.info('compute sky simple')
            sky_result = self.compute_sky_simple(data_hdul, use_errors=False)
            self.save_intermediate_img(sky_result, 'sky_init.fits')
            sky_result.writeto('sky_init.fits', overwrite=True)
            sky_data = sky_result[0].data
            self.logger.debug('sky image has shape %s', sky_data.shape)

            self.logger.info('sky correction in individual images')
            corrector = proc.SkyCorrector(
                sky_data,
                self.datamodel,
                calibid=self.datamodel.get_imgid(sky_result))
            # If we do not update keyword SKYADD
            # there is no sky subtraction
            for m in data_hdul:
                m[0].header['SKYADD'] = True
            # this is a little hackish
            # sky corrected
            data_hdul_s = [corrector(m) for m in data_hdul]
            base_header = data_hdul_s[0][0].header
        else:
            sky_result = None
            data_hdul_s = data_hdul

        self.logger.info('Computing offsets from WCS information')

        finalshape, offsetsp, refpix, offset_xy0 = self.compute_offset_wcs_imgs(
            data_hdul_s, baseshape, subpixshape)

        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)

        # Resizing target imgs
        data_arr_sr, regions = resize_arrays([m[0].data for m in data_hdul_s],
                                             subpixshape,
                                             offsetsp,
                                             finalshape,
                                             fill=1)

        if self.intermediate_results:
            self.logger.debug('save resized intermediate img')
            for idx, arr_r in enumerate(data_arr_sr):
                self.save_intermediate_array(arr_r, 'interm1_%03d.fits' % idx)

        hdulist = self.combine(data_arr_sr, data_hdul, finalshape, offsetsp,
                               refpix, use_errors)

        self.save_intermediate_img(hdulist, 'result_initial1.fits')

        compute_cross_offsets = True
        if compute_cross_offsets:

            self.logger.debug("Compute cross-correlation of images")
            # regions = self.compute_regions(finalshape, box=200, corners=True)

            # Regions frm bright objects
            regions = self.compute_regions_from_objs(hdulist[0].data,
                                                     finalshape,
                                                     box=20)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    data_arr_sr, regions, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = offsets_xy_t[:, ::-1]
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s", offsetsp)
                self.logger.info('Shape of resized array (crosscorr) is %s',
                                 finalshape)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                data_arr_sr, regions = resize_arrays(
                    [m[0].data for m in data_hdul_s],
                    subpixshape,
                    offsetsp,
                    finalshape,
                    fill=1)

                if self.intermediate_results:
                    self.logger.debug('save resized intermediate2 img')
                    for idx, arr_r in enumerate(data_arr_sr):
                        self.save_intermediate_array(arr_r,
                                                     'interm2_%03d.fits' % idx)

                hdulist = self.combine(data_arr_sr, data_hdul, finalshape,
                                       offsetsp, refpix, use_errors)

                self.save_intermediate_img(hdulist, 'result_initial2.fits')
            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        result = self.create_result(frame=hdulist, sky=sky_result)
        self.logger.info('end of dither recipe')
        return result
Пример #12
0
def basic_processing_with_segmentation(rinput, flow,
                                          method=combine.mean,
                                          errors=True, bpm=None):

    odata = []
    cdata = []
    datamodel = EmirDataModel()
    try:
        _logger.info('processing input images')
        for frame in rinput.obresult.images:
            hdulist = frame.open()
            fname = datamodel.get_imgid(hdulist)
            _logger.info('input is %s', fname)
            final = flow(hdulist)
            _logger.debug('output is input: %s', final is hdulist)

            cdata.append(final)

            # Files to be closed at the end
            odata.append(hdulist)
            if final is not hdulist:
                odata.append(final)

        base_header = cdata[0][0].header.copy()

        baseshape = cdata[0][0].data.shape
        subpixshape = cdata[0][0].data.shape

        _logger.info('Computing offsets from WCS information')
        refpix = numpy.divide(numpy.array([baseshape], dtype='int'), 2).astype('float')
        offsets_xy = offsets_from_wcs(rinput.obresult.frames, refpix)
        _logger.debug("offsets_xy %s", offsets_xy)
        # Offsets in numpy order, swaping
        offsets_fc = offsets_xy[:, ::-1]
        offsets_fc_t = numpy.round(offsets_fc).astype('int')

        _logger.info('Computing relative offsets')
        finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)
        _logger.debug("offsetsp %s", offsetsp)

        _logger.info('Shape of resized array is %s', finalshape)
        # Resizing target frames
        rhduls, regions = resize_hdulists(cdata, subpixshape, offsetsp, finalshape)

        _logger.info("stacking %d images, with offsets using '%s'", len(cdata), method.__name__)
        data1 = method([d[0].data for d in rhduls], dtype='float32')

        segmap = segmentation_combined(data1[0])
        # submasks
        if bpm is None:
            masks = [(segmap[region] > 0) for region in regions]
        else:
            masks = [((segmap[region] > 0) & bpm) for region in regions]

        _logger.info("stacking %d images, with objects mask using '%s'", len(cdata), method.__name__)
        data2 = method([d[0].data for d in cdata], masks=masks, dtype='float32')
        hdu = fits.PrimaryHDU(data2[0], header=base_header)
        points_no_data = (data2[2] == 0).sum()

        _logger.debug('update result header')
        hdu.header['TSUTC2'] = cdata[-1][0].header['TSUTC2']
        hdu.header['history'] = "Combined %d images using '%s'" % (len(cdata), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(datetime.datetime.utcnow().isoformat())
        hdu.header['UUID'] = str(uuid.uuid1())
        _logger.info("missing points, total: %d, fraction: %3.1f", points_no_data, points_no_data / data2[2].size)

        if errors:
            varhdu = fits.ImageHDU(data2[1], name='VARIANCE')
            num = fits.ImageHDU(data2[2], name='MAP')
            result = fits.HDUList([hdu, varhdu, num])
        else:
            result = fits.HDUList([hdu])
    finally:
        _logger.debug('closing images')
        for hdulist in odata:
            hdulist.close()

    return result
Пример #13
0
    def run(self, rinput):

        target_is_sky = True
        obresult = rinput.obresult
        sky_images = rinput.sky_images
        sky_images_sep_time = rinput.sky_images_sep_time
        baseshape = (EMIR_NAXIS2, EMIR_NAXIS1)
        user_offsets = rinput.offsets
        extinction = rinput.extinction

        # protections
        if rinput.iterations == 0 and sky_images != 0:
            raise ValueError(
                'sky_images: {} not compatible with iterations: {}'.format(
                    sky_images, rinput.iterations))

        if rinput.iterations > 0 and sky_images == 0:
            raise ValueError('iterations != 0 requires sky_images > 0')

        # check combination method
        if rinput.method != 'sigmaclip':
            if rinput.method_kwargs != {}:
                raise ValueError('Unexpected method_kwargs={}'.format(
                    rinput.method_kwargs))
        # combination method and arguments
        method = getattr(nacom, rinput.method)
        method_kwargs = rinput.method_kwargs

        images_info = self.initial_classification(obresult, target_is_sky)

        # Resizing target frames
        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]
        finalshape, offsetsp, refpix, offset_fc0 = self.compute_size(
            target_info, baseshape, user_offsets)

        self.resize(target_info, baseshape, offsetsp, finalshape)

        step = 0

        result = self.process_basic(images_info,
                                    step=step,
                                    target_is_sky=target_is_sky,
                                    extinction=extinction,
                                    method=method,
                                    method_kwargs=method_kwargs)

        if rinput.refine_offsets:
            self.logger.debug("Compute cross-correlation of images")
            # regions_c = self.compute_regions(finalshape, box=200, corners=True)

            # Regions from bright objects
            regions_c = self.compute_regions_from_objs(step,
                                                       result[0].data,
                                                       finalshape,
                                                       box=40)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    images_info, regions_c, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offset_xy0 = numpy.fliplr(offset_fc0)
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = numpy.fliplr(offsets_xy_t)
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets:\n%s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape2, offsetsp2 = narray.combine_shape(
                    baseshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr):\n%s",
                                  offsetsp2)
                self.logger.info(
                    'Shape of resized array (crosscorr) is '
                    '(NAXIS2, NAXIS1) = %s', finalshape2)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                self.resize(target_info, baseshape, offsetsp2, finalshape2)
                result = self.process_basic(images_info,
                                            step=step,
                                            target_is_sky=target_is_sky,
                                            extinction=extinction,
                                            method=method,
                                            method_kwargs=method_kwargs)

            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        step = 1

        while step <= rinput.iterations:
            result = self.process_advanced(images_info,
                                           result,
                                           step,
                                           target_is_sky,
                                           maxsep=sky_images_sep_time,
                                           nframes=sky_images,
                                           extinction=extinction,
                                           method=method,
                                           method_kwargs=method_kwargs)
            step += 1

        return self.create_result(result_image=result)
Пример #14
0
def basic_processing_with_segmentation(rinput, flow,
                                          method=combine.mean,
                                          errors=True, bpm=None):

    odata = []
    cdata = []
    datamodel = EmirDataModel()
    try:
        _logger.info('processing input images')
        for frame in rinput.obresult.images:
            hdulist = frame.open()
            fname = datamodel.get_imgid(hdulist)
            _logger.info('input is %s', fname)
            final = flow(hdulist)
            _logger.debug('output is input: %s', final is hdulist)

            cdata.append(final)

            # Files to be closed at the end
            odata.append(hdulist)
            if final is not hdulist:
                odata.append(final)

        base_header = cdata[0][0].header.copy()

        baseshape = cdata[0][0].data.shape
        subpixshape = cdata[0][0].data.shape

        _logger.info('Computing offsets from WCS information')
        refpix = numpy.divide(numpy.array([baseshape], dtype='int'), 2).astype('float')
        offsets_xy = offsets_from_wcs(rinput.obresult.frames, refpix)
        _logger.debug("offsets_xy %s", offsets_xy)
        # Offsets in numpy order, swaping
        offsets_fc = offsets_xy[:, ::-1]
        offsets_fc_t = numpy.round(offsets_fc).astype('int')

        _logger.info('Computing relative offsets')
        finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)
        _logger.debug("offsetsp %s", offsetsp)

        _logger.info('Shape of resized array is %s', finalshape)
        # Resizing target frames
        rhduls, regions = resize_hdulists(cdata, subpixshape, offsetsp, finalshape)

        _logger.info("stacking %d images, with offsets using '%s'", len(cdata), method.__name__)
        data1 = method([d[0].data for d in rhduls], dtype='float32')

        segmap = segmentation_combined(data1[0])
        # submasks
        if bpm is None:
            masks = [(segmap[region] > 0) for region in regions]
        else:
            masks = [((segmap[region] > 0) & bpm) for region in regions]

        _logger.info("stacking %d images, with objects mask using '%s'", len(cdata), method.__name__)
        data2 = method([d[0].data for d in cdata], masks=masks, dtype='float32')
        hdu = fits.PrimaryHDU(data2[0], header=base_header)
        points_no_data = (data2[2] == 0).sum()

        _logger.debug('update result header')
        hdu.header['TSUTC2'] = cdata[-1][0].header['TSUTC2']
        hdu.header['history'] = "Combined %d images using '%s'" % (len(cdata), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(datetime.datetime.utcnow().isoformat())
        hdu.header['UUID'] = str(uuid.uuid1())
        _logger.info("missing points, total: %d, fraction: %3.1f", points_no_data, points_no_data / data2[2].size)

        if errors:
            varhdu = fits.ImageHDU(data2[1], name='VARIANCE')
            num = fits.ImageHDU(data2[2], name='MAP')
            result = fits.HDUList([hdu, varhdu, num])
        else:
            result = fits.HDUList([hdu])
    finally:
        _logger.debug('closing images')
        for hdulist in odata:
            hdulist.close()

    return result