Esempio n. 1
0
class OffsetSpectraRecipe(EmirRecipe):
    """
    Observing mode:
        Offset spectra beyond the slit
    """

    obresult = ObservationResultRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    master_flat = MasterIntensityFlatFieldRequirement()
    master_spectral_ff = Requirement(prods.MasterSpectralFlat,
                                     'Master spectral flatfield')
    st_calibration = Requirement(prods.SlitTransmissionCalibration,
                                 'Slit tranmision calibration')
    w_calibration = Requirement(prods.WavelengthCalibration,
                                'Wavelength calibration')
    lines = Parameter('lines', None,
                      'List of x-lambda pairs of line coordinates')

    spectra = Product(prods.Spectra)
    catalog = Product(prods.LinesCatalog)

    def run(self, rinput):
        return self.create_result(spectra=prods.Spectra(),
                                  catalog=prods.LinesCatalog())
Esempio n. 2
0
class ImageSkyRecipe(BaseRecipe):

    obresult = Requirement(ObservationResultType, "Observation Result")

    master_bias = Requirement(MasterBias, "Master Bias")
    master_flat = Requirement(MasterFlat, "Master Flat")

    # This field can be disabled via
    # query_options: sky_image: False
    # in drp.yaml
    sky_image = Requirement(
        SkyImage,
        description="Previous Sky Image",
        query_opts=ResultOf('sky.sky_image', node='prev'),
        default=None  # This value is used only if the query is disabled
    )
    final = Product(DataFrameType)

    def run(self, rinput):

        query_sky_image = self.query_options.get('sky_image', True)

        if query_sky_image:
            self.logger.debug('using sky, value=%s', rinput.sky_image)
        else:
            self.logger.debug('not using sky, value=%s', rinput.sky_image)

        # Here the raw images are processed
        # and a final image myframe is created
        myframe = produce_image(rinput.obresult)

        result = self.create_result(final=myframe)
        return result
Esempio n. 3
0
class Flat(BaseRecipe):

    obresult = Requirement(ObservationResultType, "Observation Result")
    master_bias = Requirement(MasterBias, "Master Bias")
    polynomial_degree = Parameter(5,
                                  'Polynomial degree of arc calibration',
                                  as_list=True,
                                  nelem='+',
                                  validator=range_validator(minval=1))

    @master_bias.validator
    def custom_validator(self, value):
        print('func', self, value)
        return True

    master_flat = Result(MasterFlat)

    def run(self, rinput):

        # Here the raw images are processed
        # and a final image myframe is created
        obresult = rinput.obresult
        fr0 = obresult.frames[0].open()
        data = numpy.ones_like(fr0[0].data)
        hdu = fits.PrimaryHDU(data, header=fr0[0].header)
        myframe = fits.HDUList([hdu])
        #
        result = self.create_result(master_flat=myframe)
        return result
Esempio n. 4
0
class ImageRecipe(BaseRecipe):

    obresult = Requirement(ObservationResultType, "Observation Result")
    master_bias = Requirement(MasterBias, "Master Bias")
    master_flat = Requirement(MasterFlat, "Master Flat")
    final = Product(DataFrameType)

    def run(self, rinput):

        # Here the raw images are processed
        # and a final image myframe is created
        myframe = produce_image(rinput.obresult)

        result = self.create_result(final=myframe)
        return result
Esempio n. 5
0
class ArcCalibrationRecipe(EmirRecipe):

    obresult = ObservationResultRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    lines_catalog = Requirement(LinesCatalog, "Catalog of lines")
    polynomial_degree = Parameter(2,
                                  'Polynomial degree of the arc calibration')

    polynomial_coeffs = Product(ArrayType)

    def run(self, rinput):
        _logger.info('starting arc calibration')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        nslits = len(rinput.slits_catalog)
        coeff_table = numpy.zeros((nslits, rinput.polynomial_degree + 1))

        result = self.create_result(polynomial_coeffs=coeff_table)

        return result
Esempio n. 6
0
class TestSkyCorrectRecipe(EmirRecipe):

    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    master_sky = Requirement(prods.MasterIntensityFlat,
                             'Master Sky calibration')

    frame = Result(prods.ProcessedImage)

    def run(self, rinput):
        self.logger.info('starting simple sky reduction')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput,
                                                    flow,
                                                    method=median)
        hdr = hdulist[0].header
        self.set_base_headers(hdr)
        # Update SEC to 0
        hdr['SEC'] = 0

        result = self.create_result(frame=hdulist)

        return result
Esempio n. 7
0
class CSUSpectraExtractionRecipe(EmirRecipe):
    """Extract spectra in image taken with the CSU configured"""

    # Recipe Requirements
    obresult = ObservationResultRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    master_flat = MasterIntensityFlatFieldRequirement()
    master_sky = MasterSkyRequirement()
    nrows_side = Parameter(5, 'Number of rows to extract around the center')
    slits_positions = Requirement(ArrayType,
                                  'Positions and widths of the slits')

    # Recipe products
    frame = Product(DataFrameType)
    rss = Product(DataFrameType)

    def run(self, rinput):
        _logger.info('starting extraction')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        data1 = hdulist[0].data

        _logger.info('Create output images')
        rssdata = numpy.zeros(
            (rinput.slits_positions.shape[0], data1.shape[1]), dtype='float32')

        nrows = rinput.nrows_side
        # Loop over slits
        for idx, slit_coords in enumerate(rinput.slits_positions):

            x, y, ax, ay = slit_coords  # Coords in FITS coordinates

            ref_col = wc_to_pix(x - 1)
            ref_row = wc_to_pix(y - 1)

            _logger.info('Processing slit in column %i, row=%i', ref_col,
                         ref_row)

            # Simple extraction

            _logger.info('Extract %i rows around center', nrows)
            region = data1[ref_row - nrows:ref_row + nrows + 1, :]

            rssdata[idx, :] = region.mean(axis=0)

        hdurss = fits.PrimaryHDU(rssdata)

        result = self.create_result(frame=hdulist, rss=hdurss)

        return result
Esempio n. 8
0
class RecWaveRecipe(EmirRecipe):
    """Builds a MasterRecWave from a seralized version"""
    filename = Requirement(str, 'Full path of MasterRecWave')
    master_rectwv = Result(prods.MasterRectWave)

    def run(self, rinput):
        filename = rinput.filename
        self.logger.debug('filename is %s', filename)
        master_rectwv = load(prods.MasterRectWave, filename)
        return self.create_result(master_rectwv=master_rectwv)
Esempio n. 9
0
class RasterSpectraRecipe(EmirRecipe):
    """
    Observing mode:
        Raster spectra
    """

    obresult = ObservationResultRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    master_flat = MasterIntensityFlatFieldRequirement()
    master_spectral_ff = Requirement(prods.MasterSpectralFlat,
                                     'Master spectral flatfield')
    st_calibration = Requirement(prods.SlitTransmissionCalibration,
                                 'Slit tranmision calibration')
    w_calibration = Requirement(prods.WavelengthCalibration,
                                'Wavelength calibration')

    cube = Product(prods.DataCube)

    def run(self, rinput):
        return self.create_result(cube=prods.DataCube())
Esempio n. 10
0
class BiasRecipe(BaseRecipe):

    obresult = Requirement(ObservationResultType, "Observation Result")
    master_bias = Result(MasterBias)

    def run(self, rinput):

        # Here the raw images are processed
        # and a final image myframe is created
        myframe = rinput.obresult.frames[0].open()
        #

        result = self.create_result(master_bias=myframe)
        return result
Esempio n. 11
0
class MaskImagingRecipe(EmirRecipe):

    # Recipe Requirements
    #
    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    master_sky = reqs.MasterSkyRequirement()

    bars_nominal_positions = Requirement(prods.CoordinateList2DType,
                                         'Nominal positions of the bars')
    median_filter_size = Parameter(5, 'Size of the median box')
    average_box_row_size = Parameter(
        7, 'Number of rows to average for fine centering (odd)')
    average_box_col_size = Parameter(
        21, 'Number of columns to extract for fine centering (odd)')
    fit_peak_npoints = Parameter(
        3, 'Number of points to use for fitting the peak (odd)')

    # Recipe Products
    frame = Result(prods.ProcessedImage)
    # derivative = Result(prods.ProcessedImage)
    slits = Result(tarray.ArrayType)
    positions3 = Result(tarray.ArrayType)
    positions5 = Result(tarray.ArrayType)
    positions7 = Result(tarray.ArrayType)
    positions9 = Result(tarray.ArrayType)
    DTU = Result(tarray.ArrayType)
    ROTANG = Result(float)
    TSUTC1 = Result(float)
    csupos = Result(tarray.ArrayType)
    csusens = Result(tarray.ArrayType)

    def run(self, rinput):
        self.logger.info('starting processing for bars detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        try:
            rotang = hdr['ROTANG']
            tsutc1 = hdr['TSUTC1']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
            csupos = datamodel.get_csup_from_header(hdr)
            csusens = datamodel.get_cs_from_header(hdr)

        except KeyError as error:
            self.logger.error(error)
            raise numina.exceptions.RecipeError(error)

        self.logger.debug('finding bars')
        # Processed array
        arr = hdulist[0].data

        # Median filter of processed array (two times)
        mfilter_size = rinput.median_filter_size

        self.logger.debug('median filtering X, %d columns', mfilter_size)
        arr_median = median_filter(arr, size=(1, mfilter_size))
        self.logger.debug('median filtering X, %d rows', mfilter_size)
        arr_median = median_filter(arr_median, size=(mfilter_size, 1))

        # Median filter of processed array (two times) in the other direction
        # for Y coordinates
        self.logger.debug('median filtering Y, %d rows', mfilter_size)
        arr_median_alt = median_filter(arr, size=(mfilter_size, 1))
        self.logger.debug('median filtering Y, %d columns', mfilter_size)
        arr_median_alt = median_filter(arr_median_alt, size=(1, mfilter_size))

        xfac = dtur[0] / EMIR_PIXSCALE
        yfac = -dtur[1] / EMIR_PIXSCALE

        vec = [yfac, xfac]
        self.logger.debug('DTU shift is %s', vec)

        # and the table of approx positions of the slits
        barstab = rinput.bars_nominal_positions
        # Currently, we only use fields 0 and 2
        # of the nominal positions file

        # Number or rows used
        # These other parameters cab be tuned also
        bstart = 1
        bend = 2047
        self.logger.debug('ignoring columns outside %d - %d', bstart, bend - 1)

        # extract a region to average
        wy = (rinput.average_box_row_size // 2)
        wx = (rinput.average_box_col_size // 2)
        self.logger.debug('extraction window is %d rows, %d cols', 2 * wy + 1,
                          2 * wx + 1)
        # Fit the peak with these points
        wfit = 2 * (rinput.fit_peak_npoints // 2) + 1
        self.logger.debug('fit with %d points', wfit)

        # Minimum threshold
        threshold = 5 * EMIR_RON
        # Savitsky and Golay (1964) filter to compute the X derivative
        # scipy >= xx has a savgol_filter function
        # for compatibility we do it manually

        allpos = {}
        ypos3_kernel = None
        slits = numpy.zeros((EMIR_NBARS, 8), dtype='float')

        self.logger.info('start finding bars')
        for ks in [3, 5, 7, 9]:
            self.logger.debug('kernel size is %d', ks)
            # S and G kernel for derivative
            kw = ks * (ks * ks - 1) / 12.0
            coeffs_are = -numpy.arange((1 - ks) // 2, (ks - 1) // 2 + 1) / kw
            if ks == 3:
                ypos3_kernel = coeffs_are
            self.logger.debug('kernel weights are %s', coeffs_are)

            self.logger.debug('derive image in X direction')
            arr_deriv = convolve1d(arr_median, coeffs_are, axis=-1)
            # Axis 0 is
            #
            self.logger.debug('derive image in Y direction (with kernel=3)')
            arr_deriv_alt = convolve1d(arr_median_alt, ypos3_kernel, axis=0)

            positions = []
            for coords in barstab:
                lbarid = int(coords[0])
                rbarid = lbarid + EMIR_NBARS
                ref_y_coor = coords[1] + vec[1]
                poly_coeffs = coords[2:]
                prow = coor_to_pix_1d(ref_y_coor) - 1
                fits_row = prow + 1  # FITS pixel index

                # A function that returns the center of the bar
                # given its X position
                def center_of_bar(x):
                    # Pixel values are 0-based
                    return polyval(x + 1 - vec[0], poly_coeffs) + vec[1] - 1

                self.logger.debug('looking for bars with ids %d - %d', lbarid,
                                  rbarid)
                self.logger.debug('reference y position is Y %7.2f',
                                  ref_y_coor)

                # if ref_y_coor is outlimits, skip this bar
                # ref_y_coor is in FITS format
                if (ref_y_coor >= 2047) or (ref_y_coor <= 1):
                    self.logger.debug(
                        'reference y position is outlimits, skipping')
                    positions.append([lbarid, fits_row, fits_row, 1, 0, 3])
                    positions.append([rbarid, fits_row, fits_row, 1, 0, 3])
                    continue

                # Left bar
                self.logger.debug('measure left border (%d)', lbarid)

                centery, xpos, fwhm, st = char_bar_peak_l(arr_deriv,
                                                          prow,
                                                          bstart,
                                                          bend,
                                                          threshold,
                                                          center_of_bar,
                                                          wx=wx,
                                                          wy=wy,
                                                          wfit=wfit)
                xpos1 = xpos
                positions.append(
                    [lbarid, centery + 1, fits_row, xpos + 1, fwhm, st])

                # Right bar
                self.logger.debug('measure rigth border (%d)', rbarid)
                centery, xpos, fwhm, st = char_bar_peak_r(arr_deriv,
                                                          prow,
                                                          bstart,
                                                          bend,
                                                          threshold,
                                                          center_of_bar,
                                                          wx=wx,
                                                          wy=wy,
                                                          wfit=wfit)
                positions.append(
                    [rbarid, centery + 1, fits_row, xpos + 1, fwhm, st])
                xpos2 = xpos
                #
                if st == 0:
                    self.logger.debug('measure top-bottom borders')
                    try:
                        y1, y2, statusy = char_bar_height(arr_deriv_alt,
                                                          xpos1,
                                                          xpos2,
                                                          centery,
                                                          threshold,
                                                          wh=35,
                                                          wfit=wfit)
                    except Exception as error:
                        self.logger.warning('Error computing height: %s',
                                            error)
                        statusy = 44

                    if statusy in [0, 40]:
                        # Main border is detected
                        positions[-1][1] = y2 + 1
                        positions[-2][1] = y2 + 1
                    else:
                        # Update status
                        positions[-1][-1] = 4
                        positions[-2][-1] = 4
                else:
                    self.logger.debug('slit is not complete')
                    y1, y2 = 0, 0

                # Update positions

                self.logger.debug(
                    'bar %d centroid-y %9.4f, row %d x-pos %9.4f, FWHM %6.3f, status %d',
                    *positions[-2])
                self.logger.debug(
                    'bar %d centroid-y %9.4f, row %d x-pos %9.4f, FWHM %6.3f, status %d',
                    *positions[-1])

                if ks == 5:
                    slits[lbarid -
                          1] = [xpos1, y2, xpos2, y2, xpos2, y1, xpos1, y1]
                    # FITS coordinates
                    slits[lbarid - 1] += 1.0
                    self.logger.debug('inserting bars %d-%d into "slits"',
                                      lbarid, rbarid)

            allpos[ks] = numpy.asarray(
                positions, dtype='float')  # GCS doesn't like lists of lists

        self.logger.debug('end finding bars')
        result = self.create_result(
            frame=hdulist,
            slits=slits,
            positions9=allpos[9],
            positions7=allpos[7],
            positions5=allpos[5],
            positions3=allpos[3],
            DTU=dtub,
            ROTANG=rotang,
            TSUTC1=tsutc1,
            csupos=csupos,
            csusens=csusens,
        )
        return result
Esempio n. 12
0
class FullDitheredImagesRecipe(JoinDitheredImagesRecipe):
    obresult = ObservationResultRequirement(
        query_opts=ResultOf('frame', node='children'))
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    # extinction = Extinction_Requirement()
    # sources = Catalog_Requirement()
    # offsets = Offsets_Requirement()
    offsets = Requirement(prods.CoordinateList2DType,
                          'List of pairs of offsets',
                          optional=True)

    iterations = Parameter(4, 'Iterations of the recipe')
    sky_images = Parameter(
        5, 'Images used to estimate the '
        'background before and after current image')
    sky_images_sep_time = reqs.SkyImageSepTime_Requirement()
    check_photometry_levels = Parameter(
        [0.5, 0.8], 'Levels to check the flux of the objects')
    check_photometry_actions = Parameter(['warn', 'warn', 'default'],
                                         'Actions to take on images')

    frame = Result(prods.ProcessedImage)
    sky = Result(prods.ProcessedImage, optional=True)
    catalog = Result(prods.SourcesCatalog, optional=True)

    def run(self, rinput):
        partial_result = self.run_single(rinput)
        return partial_result

    def run_single(self, rinput):

        obresult = rinput.obresult

        # just in case images are in result, instead of frames
        if not obresult.frames:
            frames = obresult.results
        else:
            frames = obresult.frames

        img_info = []
        data_hdul = []
        for f in frames:
            img = f.open()
            data_hdul.append(img)
            info = {}
            info['tstamp'] = img[0].header['tstamp']
            info['airmass'] = img[0].header['airmass']
            img_info.append(info)

        channels = FULL

        use_errors = True
        # Initial checks
        baseimg = data_hdul[0]
        has_num_ext = 'NUM' in baseimg
        has_bpm_ext = 'BPM' in baseimg
        baseshape = baseimg[0].shape
        subpixshape = baseshape
        base_header = baseimg[0].header
        compute_sky = 'NUM-SK' not in base_header
        compute_sky_advanced = False

        self.logger.debug('base image is: %s',
                          self.datamodel.get_imgid(baseimg))
        self.logger.debug('images have NUM extension: %s', has_num_ext)
        self.logger.debug('images have BPM extension: %s', has_bpm_ext)
        self.logger.debug('compute sky is needed: %s', compute_sky)

        if compute_sky:
            self.logger.info('compute sky simple')
            sky_result = self.compute_sky_simple(data_hdul, use_errors=False)
            self.save_intermediate_img(sky_result, 'sky_init.fits')
            sky_result.writeto('sky_init.fits', overwrite=True)
            sky_data = sky_result[0].data
            self.logger.debug('sky image has shape %s', sky_data.shape)

            self.logger.info('sky correction in individual images')
            corrector = proc.SkyCorrector(
                sky_data,
                self.datamodel,
                calibid=self.datamodel.get_imgid(sky_result))
            # If we do not update keyword SKYADD
            # there is no sky subtraction
            for m in data_hdul:
                m[0].header['SKYADD'] = True
            # this is a little hackish
            # sky corrected
            data_hdul_s = [corrector(m) for m in data_hdul]
            base_header = data_hdul_s[0][0].header
        else:
            sky_result = None
            data_hdul_s = data_hdul

        self.logger.info('Computing offsets from WCS information')

        finalshape, offsetsp, refpix, offset_xy0 = self.compute_offset_wcs_imgs(
            data_hdul_s, baseshape, subpixshape)

        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)

        # Resizing target imgs
        data_arr_sr, regions = resize_arrays([m[0].data for m in data_hdul_s],
                                             subpixshape,
                                             offsetsp,
                                             finalshape,
                                             fill=1)

        if has_num_ext:
            self.logger.debug('Using NUM extension')
            masks = [
                numpy.where(m['NUM'].data, 0, 1).astype('int16')
                for m in data_hdul
            ]
        elif has_bpm_ext:
            self.logger.debug('Using BPM extension')
            #
            masks = [
                numpy.where(m['BPM'].data, 1, 0).astype('int16')
                for m in data_hdul
            ]
        else:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            masks = [false_mask for _ in data_arr_sr]

        self.logger.debug('resize bad pixel masks')
        mask_arr_r, _ = resize_arrays(masks,
                                      subpixshape,
                                      offsetsp,
                                      finalshape,
                                      fill=1)

        if self.intermediate_results:
            self.logger.debug('save resized intermediate img')
            for idx, arr_r in enumerate(data_arr_sr):
                self.save_intermediate_array(arr_r, 'interm1_%03d.fits' % idx)

        hdulist = self.combine2(data_arr_sr, mask_arr_r, data_hdul, offsetsp,
                                use_errors)

        self.save_intermediate_img(hdulist, 'result_initial1.fits')

        compute_cross_offsets = True
        if compute_cross_offsets:

            self.logger.debug("Compute cross-correlation of images")
            # regions_c = self.compute_regions(finalshape, box=200, corners=True)

            # Regions frm bright objects
            regions_c = self.compute_regions_from_objs(hdulist[0].data,
                                                       finalshape,
                                                       box=20)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    data_arr_sr, regions_c, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = offsets_xy_t[:, ::-1]
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s", offsetsp)
                self.logger.info('Shape of resized array (crosscorr) is %s',
                                 finalshape)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                data_arr_sr, regions = resize_arrays(
                    [m[0].data for m in data_hdul_s],
                    subpixshape,
                    offsetsp,
                    finalshape,
                    fill=1)

                if self.intermediate_results:
                    self.logger.debug('save resized intermediate2 img')
                    for idx, arr_r in enumerate(data_arr_sr):
                        self.save_intermediate_array(arr_r,
                                                     'interm2_%03d.fits' % idx)

                self.logger.debug('resize bad pixel masks')
                mask_arr_r, _ = resize_arrays(masks,
                                              subpixshape,
                                              offsetsp,
                                              finalshape,
                                              fill=1)

                hdulist = self.combine2(data_arr_sr, mask_arr_r, data_hdul,
                                        offsetsp, use_errors)

                self.save_intermediate_img(hdulist, 'result_initial2.fits')
            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        catalog, objmask = self.create_object_catalog(hdulist[0].data,
                                                      border=50)

        data_arr_sky = [sky_result[0].data for _ in data_arr_sr]
        data_arr_0 = [(d[r] + s)
                      for d, r, s in zip(data_arr_sr, regions, data_arr_sky)]
        data_arr_r = [d.copy() for d in data_arr_sr]

        for inum in range(1, rinput.iterations + 1):
            # superflat
            sf_data = self.compute_superflat(data_arr_0, objmask, regions,
                                             channels)
            fits.writeto('superflat_%d.fits' % inum, sf_data, overwrite=True)
            # apply superflat
            data_arr_rf = data_arr_r
            for base, arr, reg in zip(data_arr_rf, data_arr_0, regions):
                arr_f = arr / sf_data
                #arr_f = arr
                base[reg] = arr_f

            # compute sky advanced
            data_arr_sky = []
            data_arr_rfs = []
            self.logger.info('Step %d, SC: computing advanced sky', inum)
            scale = rinput.sky_images_sep_time * 60
            tstamps = numpy.array([info['tstamp'] for info in img_info])
            for idx, hdu in enumerate(data_hdul):
                diff1 = tstamps - tstamps[idx]
                idxs1 = (diff1 > 0) & (diff1 < scale)
                idxs2 = (diff1 < 0) & (diff1 > -scale)
                l1, = numpy.nonzero(idxs1)
                l2, = numpy.nonzero(idxs2)
                limit1 = l1[-rinput.sky_images:]
                limit2 = l2[:rinput.sky_images]
                len_l1 = len(limit1)
                len_l2 = len(limit2)
                self.logger.info('For image %s, using %d-%d images)', idx,
                                 len_l1, len_l2)
                if len_l1 + len_l2 == 0:
                    self.logger.error('No sky image available for frame %d',
                                      idx)
                    raise ValueError('No sky image')
                skydata = []
                skymasks = []
                skyscales = []
                my_region = regions[idx]
                my_sky_scale = numpy.median(data_arr_rf[idx][my_region])
                for i in numpy.concatenate((limit1, limit2)):
                    region_s = regions[i]
                    data_s = data_arr_rf[i][region_s]
                    mask_s = objmask[region_s]
                    scale_s = numpy.median(data_s)
                    skydata.append(data_s)
                    skymasks.append(mask_s)
                    skyscales.append(scale_s)
                self.logger.debug('computing background with %d frames',
                                  len(skydata))
                sky, _, num = median(skydata, skymasks, scales=skyscales)
                # rescale
                sky *= my_sky_scale

                binmask = num == 0

                if numpy.any(binmask):
                    # We have pixels without
                    # sky background information
                    self.logger.warn(
                        'pixels without sky information when correcting %d',
                        idx)

                    # FIXME: during development, this is faster
                    # sky[binmask] = sky[num != 0].mean()
                    # To continue we interpolate over the patches
                    fixpix2(sky, binmask, out=sky, iterations=1)

                name = 'sky_%d_%03d.fits' % (inum, idx)
                fits.writeto(name, sky, overwrite=True)
                name = 'sky_binmask_%d_%03d.fits' % (inum, idx)
                fits.writeto(name, binmask.astype('int16'), overwrite=True)

                data_arr_sky.append(sky)
                arr = numpy.copy(data_arr_rf[idx])
                arr[my_region] = data_arr_rf[idx][my_region] - sky
                data_arr_rfs.append(arr)
                # subtract sky advanced

            if self.intermediate_results:
                self.logger.debug('save resized intermediate img')
                for idx, arr_r in enumerate(data_arr_rfs):
                    self.save_intermediate_array(
                        arr_r, 'interm_%d_%03d.fits' % (inum, idx))

            hdulist = self.combine2(data_arr_rfs, mask_arr_r, data_hdul,
                                    offsetsp, use_errors)

            self.save_intermediate_img(hdulist, 'result_%d.fits' % inum)

            # For next step
            catalog, objmask = self.create_object_catalog(hdulist[0].data,
                                                          border=50)

            data_arr_0 = [
                (d[r] + s)
                for d, r, s in zip(data_arr_rfs, regions, data_arr_sky)
            ]
            data_arr_r = [d.copy() for d in data_arr_rfs]

        result = self.create_result(frame=hdulist)
        self.logger.info('end of dither recipe')
        return result

    def compute_superflat(self, data_arr_r, objmask, regions, channels):
        # superflat

        mask = [objmask[r] for r in regions]
        scales = [numpy.median(d) for d in data_arr_r]
        self.logger.debug('flat scaling %s', scales)
        sf_data, _sf_var, sf_num = flatcombine(data_arr_r,
                                               masks=mask,
                                               scales=scales)

        for channel in channels:
            mask = (sf_num[channel] == 0)
            if numpy.any(mask):
                fixpix2(sf_data[channel], mask, out=sf_data[channel])

        # Normalize, flat has mean = 1
        sf_data /= sf_data.mean()
        return sf_data

    def compute_sky_advanced(self, data_hdul, omasks, base_header, use_errors):
        method = combine.mean

        self.logger.info('recombine images with segmentation mask')
        sky_data = method([m[0].data for m in data_hdul],
                          masks=omasks,
                          dtype='float32')

        hdu = fits.PrimaryHDU(sky_data[0], header=base_header)
        points_no_data = (sky_data[2] == 0).sum()

        self.logger.debug('update created sky image result header')
        skyid = str(uuid.uuid1())
        hdu.header['UUID'] = skyid
        hdu.header['history'] = "Combined {} images using '{}'".format(
            len(data_hdul), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))

        msg = "missing pixels, total: {}, fraction: {:3.1f}".format(
            points_no_data, points_no_data / sky_data[2].size)
        hdu.header['history'] = msg
        self.logger.debug(msg)

        if use_errors:
            varhdu = fits.ImageHDU(sky_data[1], name='VARIANCE')
            num = fits.ImageHDU(sky_data[2], name='MAP')
            sky_result = fits.HDUList([hdu, varhdu, num])
        else:
            sky_result = fits.HDUList([hdu])

        return sky_result
Esempio n. 13
0
class JoinDitheredImagesRecipe(EmirRecipe):
    """Combine single exposures obtained in dithered mode"""

    obresult = ObservationResultRequirement(query_opts=ResultOf(
        'STARE_IMAGE.frame', node='children', id_field="stareImagesIds"))
    accum_in = Requirement(prods.ProcessedImage,
                           description='Accumulated result',
                           optional=True,
                           destination='accum',
                           query_opts=ResultOf('DITHERED_IMAGE.accum',
                                               node='prev'))
    frame = Result(prods.ProcessedImage)
    sky = Result(prods.ProcessedImage, optional=True)
    #
    # Accumulate Frame results
    accum = Result(prods.ProcessedImage, optional=True)

    #@emirdrp.decorators.aggregate
    @emirdrp.decorators.loginfo
    def run(self, rinput):
        partial_result = self.run_single(rinput)
        new_result = self.aggregate_result(partial_result, rinput)
        return new_result

    def aggregate_result(self, partial_result, rinput):

        obresult = rinput.obresult
        # Check if this is our first run
        naccum = getattr(obresult, 'naccum', 1)
        accum = rinput.accum

        frame = partial_result.frame

        if 0 <= naccum <= 1 or accum is None:
            self.logger.debug('round %d initialize accumulator', naccum)
            newaccum = frame
        else:
            self.logger.debug('round %d of accumulation', naccum)
            newaccum = self.aggregate_frames(accum, frame, naccum)

        # Update partial result
        partial_result.accum = newaccum

        return partial_result

    def aggregate_frames(self, accum, frame, naccum):
        return self.aggregate2(accum, frame, naccum)

    def run_single(self, rinput):

        # Open all images
        obresult = rinput.obresult

        data_hdul = []
        for f in obresult.frames:
            img = f.open()
            data_hdul.append(img)

        use_errors = True
        # Initial checks
        baseimg = data_hdul[0]
        has_num_ext = 'NUM' in baseimg
        has_bpm_ext = 'BPM' in baseimg
        baseshape = baseimg[0].shape
        subpixshape = baseshape
        base_header = baseimg[0].header
        compute_sky = 'NUM-SK' not in base_header
        compute_sky_advanced = False

        self.logger.debug('base image is: %s',
                          self.datamodel.get_imgid(baseimg))
        self.logger.debug('images have NUM extension: %s', has_num_ext)
        self.logger.debug('images have BPM extension: %s', has_bpm_ext)
        self.logger.debug('compute sky is needed: %s', compute_sky)

        if compute_sky:
            self.logger.info('compute sky simple')
            sky_result = self.compute_sky_simple(data_hdul, use_errors=False)
            self.save_intermediate_img(sky_result, 'sky_init.fits')
            sky_result.writeto('sky_init.fits', overwrite=True)
            sky_data = sky_result[0].data
            self.logger.debug('sky image has shape %s', sky_data.shape)

            self.logger.info('sky correction in individual images')
            corrector = proc.SkyCorrector(
                sky_data,
                self.datamodel,
                calibid=self.datamodel.get_imgid(sky_result))
            # If we do not update keyword SKYADD
            # there is no sky subtraction
            for m in data_hdul:
                m[0].header['SKYADD'] = True
            # this is a little hackish
            # sky corrected
            data_hdul_s = [corrector(m) for m in data_hdul]
            base_header = data_hdul_s[0][0].header
        else:
            sky_result = None
            data_hdul_s = data_hdul

        self.logger.info('Computing offsets from WCS information')

        finalshape, offsetsp, refpix, offset_xy0 = self.compute_offset_wcs_imgs(
            data_hdul_s, baseshape, subpixshape)

        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)

        # Resizing target imgs
        data_arr_sr, regions = resize_arrays([m[0].data for m in data_hdul_s],
                                             subpixshape,
                                             offsetsp,
                                             finalshape,
                                             fill=1)

        if self.intermediate_results:
            self.logger.debug('save resized intermediate img')
            for idx, arr_r in enumerate(data_arr_sr):
                self.save_intermediate_array(arr_r, 'interm1_%03d.fits' % idx)

        hdulist = self.combine(data_arr_sr, data_hdul, finalshape, offsetsp,
                               refpix, use_errors)

        self.save_intermediate_img(hdulist, 'result_initial1.fits')

        compute_cross_offsets = True
        if compute_cross_offsets:

            self.logger.debug("Compute cross-correlation of images")
            # regions = self.compute_regions(finalshape, box=200, corners=True)

            # Regions frm bright objects
            regions = self.compute_regions_from_objs(hdulist[0].data,
                                                     finalshape,
                                                     box=20)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    data_arr_sr, regions, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = offsets_xy_t[:, ::-1]
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s", offsetsp)
                self.logger.info('Shape of resized array (crosscorr) is %s',
                                 finalshape)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                data_arr_sr, regions = resize_arrays(
                    [m[0].data for m in data_hdul_s],
                    subpixshape,
                    offsetsp,
                    finalshape,
                    fill=1)

                if self.intermediate_results:
                    self.logger.debug('save resized intermediate2 img')
                    for idx, arr_r in enumerate(data_arr_sr):
                        self.save_intermediate_array(arr_r,
                                                     'interm2_%03d.fits' % idx)

                hdulist = self.combine(data_arr_sr, data_hdul, finalshape,
                                       offsetsp, refpix, use_errors)

                self.save_intermediate_img(hdulist, 'result_initial2.fits')
            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        result = self.create_result(frame=hdulist, sky=sky_result)
        self.logger.info('end of dither recipe')
        return result

    def combine(self, data_arr_sr, data_hdul, finalshape, offsetsp, refpix,
                use_errors):
        baseimg = data_hdul[0]
        has_num_ext = 'NUM' in baseimg
        has_bpm_ext = 'BPM' in baseimg
        baseshape = baseimg[0].shape
        subpixshape = baseshape
        base_header = baseimg[0].header

        if has_num_ext:
            self.logger.debug('Using NUM extension')
            masks = [
                numpy.where(m['NUM'].data, 0, 1).astype('int16')
                for m in data_hdul
            ]
        elif has_bpm_ext:
            self.logger.debug('Using BPM extension')
            #
            masks = [
                numpy.where(m['BPM'].data, 1, 0).astype('int16')
                for m in data_hdul
            ]
        else:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            masks = [false_mask for _ in data_arr_sr]

        self.logger.debug('resize bad pixel masks')
        mask_arr_r, _ = resize_arrays(masks,
                                      subpixshape,
                                      offsetsp,
                                      finalshape,
                                      fill=1)

        # Position of refpixel in final image
        refpix_final = refpix + offsetsp[0]
        self.logger.info('Position of refpixel in final image %s',
                         refpix_final)

        self.logger.info('Combine target images (final)')
        method = combine.median
        out = method(data_arr_sr, masks=mask_arr_r, dtype='float32')

        self.logger.debug('create result image')
        hdu = fits.PrimaryHDU(out[0], header=base_header)
        self.logger.debug('update result header')
        hdr = hdu.header
        self.set_base_headers(hdr)

        hdr['TSUTC2'] = data_hdul[-1][0].header['TSUTC2']
        # Update obsmode in header

        hdu.header['history'] = "Combined %d images using '%s'" % (
            len(data_hdul), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        # Update NUM-NCOM, sum of individual imagess
        ncom = 0
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))
            ncom += img[0].header.get('NUM-NCOM', 1)
        hdr['NUM-NCOM'] = ncom
        # Update WCS, approximate solution
        hdr['CRPIX1'] += offsetsp[0][0]
        hdr['CRPIX2'] += offsetsp[0][1]

        #
        if use_errors:
            varhdu = fits.ImageHDU(out[1], name='VARIANCE')
            num = fits.ImageHDU(out[2], name='MAP')
            hdulist = fits.HDUList([hdu, varhdu, num])
        else:
            hdulist = fits.HDUList([hdu])
        return hdulist

    def combine2(self, data, masks, data_hdul, offsetsp, use_errors):
        baseimg = data_hdul[0]
        base_header = baseimg[0].header

        self.logger.info('Combine target images (final)')
        method = combine.median
        out = method(data, masks=masks, dtype='float32')

        out = quantileclip(data, masks, dtype='float32', out=out, fclip=0.1)

        self.logger.debug('create result image')
        hdu = fits.PrimaryHDU(out[0], header=base_header)
        self.logger.debug('update result header')
        hdr = hdu.header
        self.set_base_headers(hdr)

        hdr['TSUTC2'] = data_hdul[-1][0].header['TSUTC2']
        # Update obsmode in header

        hdu.header['history'] = "Combined %d images using '%s'" % (
            len(data_hdul), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        # Update NUM-NCOM, sum of individual images
        ncom = 0
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))
            ncom += img[0].header.get('NUM-NCOM', 1)
        hdr['NUM-NCOM'] = ncom
        # Update WCS, approximate solution
        hdr['CRPIX1'] += offsetsp[0][0]
        hdr['CRPIX2'] += offsetsp[0][1]

        #
        if use_errors:
            varhdu = fits.ImageHDU(out[1], name='VARIANCE')
            num = fits.ImageHDU(out[2], name='MAP')
            hdulist = fits.HDUList([hdu, varhdu, num])
        else:
            hdulist = fits.HDUList([hdu])
        return hdulist

    def set_base_headers(self, hdr):
        """Set metadata in FITS headers."""
        hdr = super(JoinDitheredImagesRecipe, self).set_base_headers(hdr)
        hdr['IMGOBBL'] = 0
        hdr['OBSMODE'] = 'DITHERED_IMAGE'
        return hdr

    def compute_offset_wcs_imgs(self, imgs, baseshape, subpixshape):

        refpix = numpy.divide(numpy.array([baseshape], dtype='int'),
                              2).astype('float')
        offsets_xy = offsets_from_wcs_imgs(imgs, refpix)
        self.logger.debug("offsets_xy %s", offsets_xy)
        # Offsets in numpy order, swaping
        offsets_fc = offsets_xy[:, ::-1]
        offsets_fc_t = numpy.round(offsets_fc).astype('int')

        self.logger.info('Computing relative offsets')
        finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)

        return finalshape, offsetsp, refpix, offsets_xy

    def compute_offset_crosscor(self, arrs, region, subpixshape, refine=False):
        offsets_xy = offsets_from_crosscor(arrs,
                                           region,
                                           refine=refine,
                                           order='xy')
        self.logger.debug("offsets_xy cross-corr %s", offsets_xy)
        # Offsets in numpy order, swaping
        offsets_fc = offsets_xy[:, ::-1]
        offsets_fc_t = numpy.round(offsets_fc).astype('int')

        self.logger.info('Computing relative offsets from cross-corr')
        finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)

        return finalshape, offsetsp, offsets_xy

    def compute_offset_xy_crosscor_regions(self,
                                           arrs,
                                           regions,
                                           refine=False,
                                           tol=0.5):
        offsets_xy = offsets_from_crosscor_regions(arrs,
                                                   regions,
                                                   refine=refine,
                                                   order='xy',
                                                   tol=tol)
        self.logger.debug("offsets_xy cross-corr %s", offsets_xy)
        # Offsets in numpy order, swaping
        return offsets_xy

    def compute_offset_crosscor_regions(self,
                                        arrs,
                                        regions,
                                        subpixshape,
                                        refine=False,
                                        tol=0.5):
        offsets_xy = offsets_from_crosscor_regions(arrs,
                                                   regions,
                                                   refine=refine,
                                                   order='xy',
                                                   tol=tol)
        self.logger.debug("offsets_xy cross-corr %s", offsets_xy)
        # Offsets in numpy order, swaping
        offsets_fc = offsets_xy[:, ::-1]
        offsets_fc_t = numpy.round(offsets_fc).astype('int')

        self.logger.info('Computing relative offsets from cross-corr')
        finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)

        return finalshape, offsetsp, offsets_xy

    def compute_shapes_wcs(self, imgs):

        # Better near the center...
        shapes = [img[0].shape for img in imgs]
        ref_pix_xy_0 = (shapes[0][1] // 2, shapes[0][0] // 2)
        #
        ref_coor_xy = reference_pix_from_wcs_imgs(imgs, ref_pix_xy_0)
        # offsets_xy = offsets_from_wcs_imgs(imgs, numpy.asarray([ref_pix_xy_0]))
        # ll = [(-a[0]+ref_coor_xy[0][0], -a[1]+ref_coor_xy[0][1]) for a in ref_coor_xy]

        self.logger.debug("ref_coor_xy %s", ref_coor_xy)
        # Transform to pixels, integers
        ref_pix_xy = [coor_to_pix(c, order='xy') for c in ref_coor_xy]

        self.logger.info('Computing relative shapes')
        finalshape, partialshapes, finalpix_xy = combine_shapes(shapes,
                                                                ref_pix_xy,
                                                                order='xy')

        return finalshape, partialshapes, ref_pix_xy_0, finalpix_xy

    def compute_object_masks(self, data_arr_r, mask_arr_r, has_bpm_ext,
                             regions, masks):

        method = combine.mean

        self.logger.info(
            "initial stacking, %d images, with offsets using '%s'",
            len(data_arr_r), method.__name__)
        data1 = method(data_arr_r, masks=mask_arr_r, dtype='float32')

        self.logger.info('obtain segmentation mask')
        segmap = segmentation_combined(data1[0])
        # submasks
        if not has_bpm_ext:
            omasks = [(segmap[region] > 0) for region in regions]
        else:
            omasks = [((segmap[region] > 0) & bpm)
                      for region, bpm in zip(regions, masks)]

        return omasks

    def compute_sky_simple(self, data_hdul, use_errors=False):

        refimg = data_hdul[0]
        base_header = refimg[0].header
        self.logger.info('combine images with median')
        method = combine.median
        for m in data_hdul:
            m = numpy.median(m[0].data)
            self.logger.debug('median is %f', m)
        sky_data = method([m[0].data for m in data_hdul], dtype='float32')

        hdu = fits.PrimaryHDU(sky_data[0], header=base_header)

        self.logger.debug('update created sky image result header')
        skyid = str(uuid.uuid1())
        hdu.header['UUID'] = skyid
        hdu.header['history'] = "Combined {} images using '{}'".format(
            len(data_hdul), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))

        if use_errors:
            varhdu = fits.ImageHDU(sky_data[1], name='VARIANCE')
            num = fits.ImageHDU(sky_data[2], name='MAP')
            sky_result = fits.HDUList([hdu, varhdu, num])
        else:
            sky_result = fits.HDUList([hdu])

        return sky_result

    def compute_sky_advanced(self, data_hdul, omasks, base_header, use_errors):
        method = combine.mean

        self.logger.info('recombine images with segmentation mask')
        sky_data = method([m[0].data for m in data_hdul],
                          masks=omasks,
                          dtype='float32')

        hdu = fits.PrimaryHDU(sky_data[0], header=base_header)
        points_no_data = (sky_data[2] == 0).sum()

        self.logger.debug('update created sky image result header')
        skyid = str(uuid.uuid1())
        hdu.header['UUID'] = skyid
        hdu.header['history'] = "Combined {} images using '{}'".format(
            len(data_hdul), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))

        msg = "missing pixels, total: {}, fraction: {:3.1f}".format(
            points_no_data, points_no_data / sky_data[2].size)
        hdu.header['history'] = msg
        self.logger.debug(msg)

        if use_errors:
            varhdu = fits.ImageHDU(sky_data[1], name='VARIANCE')
            num = fits.ImageHDU(sky_data[2], name='MAP')
            sky_result = fits.HDUList([hdu, varhdu, num])
        else:
            sky_result = fits.HDUList([hdu])

        return sky_result

    def aggregate2(self, frame1, frame2, naccum):
        # FIXME, this is almost identical to run_single
        frames = [frame1, frame2]
        use_errors = True
        # Initial checks
        fframe = frames[0]
        img = fframe.open()
        base_header = img[0].header

        imgs = []
        for f in frames:
            img = f.open()
            imgs.append(img)

        self.logger.info('Computing offsets from WCS information')

        finalshape, partial_shapes, refpix_xy_0, refpix_final_xy = self.compute_shapes_wcs(
            imgs)

        self.logger.info('Shape of resized array is %s', finalshape)
        self.logger.debug("partial shapes %s", partial_shapes)

        masks = []
        self.logger.debug('Obtains masks')
        for img in imgs:
            if 'NUM' in img:
                self.logger.debug('Using NUM extension as mask')
                mask = numpy.where(img['NUM'].data, 0, 1).astype('int16')
            elif 'BPM' in img:
                self.logger.debug('Using BPM extension as mask')
                mask = numpy.where(img['BPM'].data, 1, 0).astype('int16')
            else:
                self.logger.warning('BPM missing, use zeros instead')
                mask = numpy.zeros_like(img[0].data)
            masks.append(mask)

        # Resizing target frames
        data_arr_r = resize_arrays_alt([img[0].data for img in imgs],
                                       partial_shapes,
                                       finalshape,
                                       fill=1)

        self.logger.debug('resize bad pixel masks')
        mask_arr_r = resize_arrays_alt(masks,
                                       partial_shapes,
                                       finalshape,
                                       fill=1)

        self.logger.debug("not computing sky")
        data_arr_sr = data_arr_r

        self.logger.info('Combine target images (final, aggregate)')
        self.logger.debug("weights for 'accum' and 'frame'")

        weight_accum = 2 * (1 - 1.0 / naccum)
        weight_frame = 2.0 / naccum
        scales = [1.0 / weight_accum, 1.0 / weight_frame]
        self.logger.debug("weights for 'accum' and 'frame', %s", scales)
        method = combine.mean

        out = method(data_arr_sr,
                     masks=mask_arr_r,
                     scales=scales,
                     dtype='float32')

        self.logger.debug('create result image')
        hdu = fits.PrimaryHDU(out[0], header=base_header)
        self.logger.debug('update result header')
        hdr = hdu.header
        self.set_base_headers(hdr)
        hdr['IMGOBBL'] = 0
        hdr['TSUTC2'] = imgs[-1][0].header['TSUTC2']
        # Update obsmode in header
        hdr['OBSMODE'] = 'DITHERED_IMAGE'
        hdu.header['history'] = "Combined %d images using '%s'" % (
            len(imgs), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        # Update NUM-NCOM, sum of individual frames
        ncom = 0
        for img in imgs:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))
            ncom += img[0].header['NUM-NCOM']

        hdr['NUM-NCOM'] = ncom
        # Update WCS, approximate solution
        hdr['CRPIX1'] += (refpix_final_xy[0] - refpix_xy_0[0])
        hdr['CRPIX2'] += (refpix_final_xy[1] - refpix_xy_0[1])

        #
        if use_errors:
            varhdu = fits.ImageHDU(out[1], name='VARIANCE')
            num = fits.ImageHDU(out[2], name='MAP')
            hdulist = fits.HDUList([hdu, varhdu, num])
        else:
            hdulist = fits.HDUList([hdu])

        return hdulist

    def compute_regions_from_objs(self, arr, finalshape, box=50, corners=True):
        regions = []
        catalog, mask = self.create_object_catalog(arr, border=300)

        self.save_intermediate_array(mask, 'objmask.fits')
        # with the catalog, compute 5 objects

        LIMIT_AREA = 5000
        NKEEP = 1
        idx_small = catalog['npix'] < LIMIT_AREA
        objects_small = catalog[idx_small]
        idx_flux = objects_small['flux'].argsort()
        objects_nth = objects_small[idx_flux][-NKEEP:]
        for obj in objects_nth:
            print(obj['x'], obj['y'])
            region = image_box2d(obj['x'], obj['y'], finalshape, (box, box))
            regions.append(region)
        return regions

    def compute_regions(self, finalshape, box=200, corners=True):
        regions = []
        # A square of 100x100 in the center of the image
        xref_cross = finalshape[1] // 2
        yref_cross = finalshape[0] // 2
        #
        self.logger.debug("Reference position is (x,y) %d  %d", xref_cross + 1,
                          yref_cross + 1)
        self.logger.debug("Reference regions size is %d", 2 * box + 1)
        region = image_box2d(xref_cross, yref_cross, finalshape, (box, box))
        regions.append(region)
        # corners
        if corners:
            xref_c = finalshape[1] // 4
            yref_c = finalshape[0] // 4

            for xi in [xref_c, 3 * xref_c]:
                for yi in [yref_c, 3 * yref_c]:
                    self.logger.debug("Reference position is (x,y) %d  %d",
                                      xi + 1, yi + 1)
                    self.logger.debug("Reference regions size is %d",
                                      2 * box + 1)
                    region = image_box2d(xi, yi, finalshape, (box, box))
                    regions.append(region)

        return regions

    def create_object_catalog(self, arr, threshold=1.5, border=0):

        if border > 0:
            wmap = numpy.ones_like(arr)
            wmap[border:-border, border:-border] = 0
        else:
            wmap = None

        bkg = sep.Background(arr)
        data_sub = arr - bkg
        objects, objmask = sep.extract(data_sub,
                                       threshold,
                                       err=bkg.globalrms *
                                       numpy.ones_like(data_sub),
                                       mask=wmap,
                                       segmentation_map=True)
        return objects, objmask
Esempio n. 14
0
class JoinDitheredImagesRecipe(EmirRecipe):
    """Combine single exposures obtained in dithered mode"""

    obresult = ObservationResultRequirement()
    accum_in = Requirement(DataFrameType,
                           description='Accumulated result',
                           optional=True,
                           destination='accum',
                           query_opts=Result('accum', node='prev'))
    frame = Product(DataFrameType)
    sky = Product(DataFrameType, optional=True)
    #
    # Accumulate Frame results
    accum = Product(DataFrameType, optional=True)

    def build_recipe_input(self, obsres, dal, pipeline='default'):
        if numina.ext.gtc.check_gtc():
            self.logger.debug(
                'Using GTC version of build_recipe_input in DitheredImages')
            return self.build_recipe_input_gtc(obsres, dal, pipeline=pipeline)
        else:
            return super(JoinDitheredImagesRecipe,
                         self).build_recipe_input(obsres, dal)

    def build_recipe_input_gtc(self, obsres, dal, pipeline='default'):
        newOR = ObservationResult()
        # FIXME: this method will work only in GTC
        # stareImagesIds = obsres['stareImagesIds']._v
        stareImagesIds = obsres.stareImagesIds
        obsres.children = stareImagesIds
        self.logger.info('Submode result IDs: %s', obsres.children)
        stareImages = []
        # Field to query the results
        key_field = 'frame'
        for subresId in obsres.children:
            subres = dal.getRecipeResult(subresId)
            # This 'frame' is the name of the product in RecipeResult
            # there is also a 'sky' field
            elements = subres['elements']
            stareImages.append(elements[key_field])
        newOR.frames = stareImages

        naccum = obsres.naccum
        self.logger.info('naccum: %d', naccum)
        mode_field = "DITHERED_IMAGE"
        key_field = 'accum'
        if naccum != 1:  # if it is not the first dithering loop
            self.logger.info("SEARCHING LATEST RESULT of %s", mode_field)
            latest_result = dal.getLastRecipeResult("EMIR", "EMIR", mode_field)
            elements = latest_result['elements']
            accum_dither = elements[key_field]
            self.logger.info("FOUND")
        else:
            self.logger.info("NO ACCUMULATION")
            accum_dither = stareImages[0]

        newOR.naccum = naccum
        newOR.accum = accum_dither

        # obsres['obresult'] = newOR
        # print('Adding RI parameters ', obsres)
        # newRI = DitheredImageARecipeInput(**obsres)
        newRI = self.create_input(obresult=newOR)
        return newRI

    #@emirdrp.decorators.aggregate
    @emirdrp.decorators.loginfo
    def run(self, rinput):
        partial_result = self.run_single(rinput)
        new_result = self.aggregate_result(partial_result, rinput)
        return new_result

    def aggregate_result(self, partial_result, rinput):

        obresult = rinput.obresult
        # Check if this is our first run
        naccum = getattr(obresult, 'naccum', 0)
        accum = getattr(obresult, 'accum', None)

        frame = partial_result.frame

        if naccum == 0:
            self.logger.debug('naccum is not set, do not accumulate')
            return partial_result
        elif naccum == 1:
            self.logger.debug('round %d initialize accumulator', naccum)
            newaccum = frame
        elif naccum > 1:
            self.logger.debug('round %d of accumulation', naccum)
            newaccum = self.aggregate_frames(accum, frame, naccum)
        else:
            msg = 'naccum set to %d, invalid' % (naccum, )
            self.logger.error(msg)
            raise RecipeError(msg)

        # Update partial result
        partial_result.accum = newaccum

        return partial_result

    def aggregate_frames(self, accum, frame, naccum):
        return self.aggregate2(accum, frame, naccum)

    def run_single(self, rinput):

        # Open all images
        obresult = rinput.obresult

        data_hdul = []
        for f in obresult.frames:
            img = f.open()
            data_hdul.append(img)

        use_errors = True
        # Initial checks
        baseimg = data_hdul[0]
        has_num_ext = 'NUM' in baseimg
        has_bpm_ext = 'BPM' in baseimg
        baseshape = baseimg[0].shape
        subpixshape = baseshape
        base_header = baseimg[0].header
        compute_sky = 'NUM-SK' not in base_header
        compute_sky_advanced = False

        self.logger.debug('base image is: %s',
                          self.datamodel.get_imgid(baseimg))
        self.logger.debug('images have NUM extension: %s', has_num_ext)
        self.logger.debug('images have BPM extension: %s', has_bpm_ext)
        self.logger.debug('compute sky is needed: %s', compute_sky)

        if compute_sky:
            sky_result = self.compute_sky_simple(data_hdul, use_errors=False)
            sky_data = sky_result[0].data
            self.logger.debug('sky image has shape %s', sky_data.shape)

            self.logger.info('sky correction in individual images')
            corrector = SkyCorrector(
                sky_data,
                self.datamodel,
                calibid=self.datamodel.get_imgid(sky_result))
            # If we do not update keyword SKYADD
            # there is no sky subtraction
            for m in data_hdul:
                m[0].header['SKYADD'] = True
            # this is a little hackish
            data_hdul_s = [corrector(m) for m in data_hdul]
            # data_arr_s = [m[0].data - sky_data for m in data_hdul]
            base_header = data_hdul_s[0][0].header
        else:
            sky_result = None
            data_hdul_s = data_hdul

        self.logger.info('Computing offsets from WCS information')

        finalshape, offsetsp, refpix, offset_xy0 = self.compute_offset_wcs_imgs(
            data_hdul_s, baseshape, subpixshape)

        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)

        # Resizing target imgs
        data_arr_sr, regions = resize_arrays([m[0].data for m in data_hdul_s],
                                             subpixshape,
                                             offsetsp,
                                             finalshape,
                                             fill=1)

        if self.intermediate_results:
            self.logger.debug('save resized intermediate img')
            for idx, arr_r in enumerate(data_arr_sr):
                self.save_intermediate_array(arr_r, 'interm_%s.fits' % idx)

        compute_cross_offsets = True
        if compute_cross_offsets:
            try:
                self.logger.debug("Compute cross-correlation of images")
                regions = self.compute_regions(finalshape,
                                               box=200,
                                               corners=True)

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    data_arr_sr, regions, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = offsets_xy_t[:, ::-1]
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s", offsetsp)
                self.logger.info('Shape of resized array (crosscorr) is %s',
                                 finalshape)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                data_arr_sr, regions = resize_arrays(
                    [m[0].data for m in data_hdul_s],
                    subpixshape,
                    offsetsp,
                    finalshape,
                    fill=1)

                if self.intermediate_results:
                    self.logger.debug('save resized intermediate2 img')
                    for idx, arr_r in enumerate(data_arr_sr):
                        self.save_intermediate_array(arr_r,
                                                     'interm2_%s.fits' % idx)

            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        if has_num_ext:
            self.logger.debug('Using NUM extension')
            masks = [
                numpy.where(m['NUM'].data, 0, 1).astype('int16')
                for m in data_hdul
            ]
        elif has_bpm_ext:
            self.logger.debug('Using BPM extension')
            #
            masks = [
                numpy.where(m['BPM'].data, 1, 0).astype('int16')
                for m in data_hdul
            ]
        else:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            masks = [false_mask for _ in data_arr_sr]

        self.logger.debug('resize bad pixel masks')
        mask_arr_r, _ = resize_arrays(masks,
                                      subpixshape,
                                      offsetsp,
                                      finalshape,
                                      fill=1)

        # Position of refpixel in final image
        refpix_final = refpix + offsetsp[0]
        self.logger.info('Position of refpixel in final image %s',
                         refpix_final)

        self.logger.info('Combine target images (final)')
        method = combine.median
        out = method(data_arr_sr, masks=mask_arr_r, dtype='float32')

        self.logger.debug('create result image')
        hdu = fits.PrimaryHDU(out[0], header=base_header)
        self.logger.debug('update result header')
        hdr = hdu.header
        self.set_base_headers(hdr)

        hdr['TSUTC2'] = data_hdul[-1][0].header['TSUTC2']
        # Update obsmode in header

        hdu.header['history'] = "Combined %d images using '%s'" % (
            len(data_hdul), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        # Update NUM-NCOM, sum of individual imagess
        ncom = 0
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))
            ncom += img[0].header.get('NUM-NCOM', 1)
        hdr['NUM-NCOM'] = ncom
        # Update WCS, approximate solution
        hdr['CRPIX1'] += offsetsp[0][0]
        hdr['CRPIX2'] += offsetsp[0][1]

        #
        if use_errors:
            varhdu = fits.ImageHDU(out[1], name='VARIANCE')
            num = fits.ImageHDU(out[2], name='MAP')
            hdulist = fits.HDUList([hdu, varhdu, num])
        else:
            hdulist = fits.HDUList([hdu])

        result = self.create_result(frame=hdulist, sky=sky_result)
        self.logger.info('end of dither recipe')
        return result

    def set_base_headers(self, hdr):
        """Set metadata in FITS headers."""
        hdr = super(JoinDitheredImagesRecipe, self).set_base_headers(hdr)
        hdr['IMGOBBL'] = 0
        hdr['OBSMODE'] = 'DITHERED_IMAGE'
        return hdr

    def compute_offset_wcs_imgs(self, imgs, baseshape, subpixshape):

        refpix = numpy.divide(numpy.array([baseshape], dtype='int'),
                              2).astype('float')
        offsets_xy = offsets_from_wcs_imgs(imgs, refpix)
        self.logger.debug("offsets_xy %s", offsets_xy)
        # Offsets in numpy order, swaping
        offsets_fc = offsets_xy[:, ::-1]
        offsets_fc_t = numpy.round(offsets_fc).astype('int')

        self.logger.info('Computing relative offsets')
        finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)

        return finalshape, offsetsp, refpix, offsets_xy

    def compute_offset_crosscor(self, arrs, region, subpixshape, refine=False):
        offsets_xy = offsets_from_crosscor(arrs,
                                           region,
                                           refine=refine,
                                           order='xy')
        self.logger.debug("offsets_xy cross-corr %s", offsets_xy)
        # Offsets in numpy order, swaping
        offsets_fc = offsets_xy[:, ::-1]
        offsets_fc_t = numpy.round(offsets_fc).astype('int')

        self.logger.info('Computing relative offsets from cross-corr')
        finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)

        return finalshape, offsetsp, offsets_xy

    def compute_offset_xy_crosscor_regions(self,
                                           arrs,
                                           regions,
                                           refine=False,
                                           tol=0.5):
        offsets_xy = offsets_from_crosscor_regions(arrs,
                                                   regions,
                                                   refine=refine,
                                                   order='xy',
                                                   tol=tol)
        self.logger.debug("offsets_xy cross-corr %s", offsets_xy)
        # Offsets in numpy order, swaping
        return offsets_xy

    def compute_offset_crosscor_regions(self,
                                        arrs,
                                        regions,
                                        subpixshape,
                                        refine=False,
                                        tol=0.5):
        offsets_xy = offsets_from_crosscor_regions(arrs,
                                                   regions,
                                                   refine=refine,
                                                   order='xy',
                                                   tol=tol)
        self.logger.debug("offsets_xy cross-corr %s", offsets_xy)
        # Offsets in numpy order, swaping
        offsets_fc = offsets_xy[:, ::-1]
        offsets_fc_t = numpy.round(offsets_fc).astype('int')

        self.logger.info('Computing relative offsets from cross-corr')
        finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)

        return finalshape, offsetsp, offsets_xy

    def compute_shapes_wcs(self, imgs):

        # Better near the center...
        shapes = [img[0].shape for img in imgs]
        ref_pix_xy_0 = (shapes[0][1] // 2, shapes[0][0] // 2)
        #
        ref_coor_xy = reference_pix_from_wcs_imgs(imgs, ref_pix_xy_0)
        # offsets_xy = offsets_from_wcs_imgs(imgs, numpy.asarray([ref_pix_xy_0]))
        # ll = [(-a[0]+ref_coor_xy[0][0], -a[1]+ref_coor_xy[0][1]) for a in ref_coor_xy]

        self.logger.debug("ref_coor_xy %s", ref_coor_xy)
        # Transform to pixels, integers
        ref_pix_xy = [coor_to_pix(c, order='xy') for c in ref_coor_xy]

        self.logger.info('Computing relative shapes')
        finalshape, partialshapes, finalpix_xy = combine_shapes(shapes,
                                                                ref_pix_xy,
                                                                order='xy')

        return finalshape, partialshapes, ref_pix_xy_0, finalpix_xy

    def compute_object_masks(self, data_arr_r, mask_arr_r, has_bpm_ext,
                             regions, masks):

        method = combine.mean

        self.logger.info(
            "initial stacking, %d images, with offsets using '%s'",
            len(data_arr_r), method.__name__)
        data1 = method(data_arr_r, masks=mask_arr_r, dtype='float32')

        self.logger.info('obtain segmentation mask')
        segmap = segmentation_combined(data1[0])
        # submasks
        if not has_bpm_ext:
            omasks = [(segmap[region] > 0) for region in regions]
        else:
            omasks = [((segmap[region] > 0) & bpm)
                      for region, bpm in zip(regions, masks)]

        return omasks

    def compute_sky_simple(self, data_hdul, use_errors=False):
        method = combine.median

        refimg = data_hdul[0]
        base_header = refimg[0].header
        self.logger.info('combine images with median')
        sky_data = method([m[0].data for m in data_hdul], dtype='float32')

        hdu = fits.PrimaryHDU(sky_data[0], header=base_header)

        self.logger.debug('update created sky image result header')
        skyid = str(uuid.uuid1())
        hdu.header['UUID'] = skyid
        hdu.header['history'] = "Combined {} images using '{}'".format(
            len(data_hdul), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))

        if use_errors:
            varhdu = fits.ImageHDU(sky_data[1], name='VARIANCE')
            num = fits.ImageHDU(sky_data[2], name='MAP')
            sky_result = fits.HDUList([hdu, varhdu, num])
        else:
            sky_result = fits.HDUList([hdu])

        return sky_result

    def compute_sky_advanced(self, data_hdul, omasks, base_header, use_errors):
        method = combine.mean

        self.logger.info('recombine images with segmentation mask')
        sky_data = method([m[0].data for m in data_hdul],
                          masks=omasks,
                          dtype='float32')

        hdu = fits.PrimaryHDU(sky_data[0], header=base_header)
        points_no_data = (sky_data[2] == 0).sum()

        self.logger.debug('update created sky image result header')
        skyid = str(uuid.uuid1())
        hdu.header['UUID'] = skyid
        hdu.header['history'] = "Combined {} images using '{}'".format(
            len(data_hdul), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))

        msg = "missing pixels, total: {}, fraction: {:3.1f}".format(
            points_no_data, points_no_data / sky_data[2].size)
        hdu.header['history'] = msg
        self.logger.debug(msg)

        if use_errors:
            varhdu = fits.ImageHDU(sky_data[1], name='VARIANCE')
            num = fits.ImageHDU(sky_data[2], name='MAP')
            sky_result = fits.HDUList([hdu, varhdu, num])
        else:
            sky_result = fits.HDUList([hdu])

        return sky_result

    def aggregate2(self, frame1, frame2, naccum):
        # FIXME, this is almost identical to run_single
        frames = [frame1, frame2]
        use_errors = True
        # Initial checks
        fframe = frames[0]
        img = fframe.open()
        base_header = img[0].header

        imgs = []
        for f in frames:
            img = f.open()
            imgs.append(img)

        self.logger.info('Computing offsets from WCS information')

        finalshape, partial_shapes, refpix_xy_0, refpix_final_xy = self.compute_shapes_wcs(
            imgs)

        self.logger.info('Shape of resized array is %s', finalshape)
        self.logger.debug("partial shapes %s", partial_shapes)

        masks = []
        self.logger.debug('Obtains masks')
        for img in imgs:
            if 'NUM' in img:
                self.logger.debug('Using NUM extension as mask')
                mask = numpy.where(img['NUM'].data, 0, 1).astype('int16')
            elif 'BPM' in img:
                self.logger.debug('Using BPM extension as mask')
                mask = numpy.where(img['BPM'].data, 1, 0).astype('int16')
            else:
                self.logger.warning('BPM missing, use zeros instead')
                mask = numpy.zeros_like(img[0].data)
            masks.append(mask)

        # Resizing target frames
        data_arr_r = resize_arrays_alt([img[0].data for img in imgs],
                                       partial_shapes,
                                       finalshape,
                                       fill=1)

        self.logger.debug('resize bad pixel masks')
        mask_arr_r = resize_arrays_alt(masks,
                                       partial_shapes,
                                       finalshape,
                                       fill=1)

        self.logger.debug("not computing sky")
        data_arr_sr = data_arr_r

        self.logger.info('Combine target images (final, aggregate)')
        self.logger.debug("weights for 'accum' and 'frame'")

        weight_accum = 2 * (1 - 1.0 / naccum)
        weight_frame = 2.0 / naccum
        scales = [1.0 / weight_accum, 1.0 / weight_frame]
        self.logger.debug("weights for 'accum' and 'frame', %s", scales)
        method = combine.mean

        out = method(data_arr_sr,
                     masks=mask_arr_r,
                     scales=scales,
                     dtype='float32')

        self.logger.debug('create result image')
        hdu = fits.PrimaryHDU(out[0], header=base_header)
        self.logger.debug('update result header')
        hdr = hdu.header
        self.set_base_headers(hdr)
        hdr['IMGOBBL'] = 0
        hdr['TSUTC2'] = imgs[-1][0].header['TSUTC2']
        # Update obsmode in header
        hdr['OBSMODE'] = 'DITHERED_IMAGE'
        hdu.header['history'] = "Combined %d images using '%s'" % (
            len(imgs), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        # Update NUM-NCOM, sum of individual frames
        ncom = 0
        for img in imgs:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))
            ncom += img[0].header['NUM-NCOM']

        hdr['NUM-NCOM'] = ncom
        # Update WCS, approximate solution
        hdr['CRPIX1'] += (refpix_final_xy[0] - refpix_xy_0[0])
        hdr['CRPIX2'] += (refpix_final_xy[1] - refpix_xy_0[1])

        #
        if use_errors:
            varhdu = fits.ImageHDU(out[1], name='VARIANCE')
            num = fits.ImageHDU(out[2], name='MAP')
            hdulist = fits.HDUList([hdu, varhdu, num])
        else:
            hdulist = fits.HDUList([hdu])

        return hdulist

    def compute_regions(self, finalshape, box=200, corners=True):
        regions = []
        # A square of 100x100 in the center of the image
        xref_cross = finalshape[1] // 2
        yref_cross = finalshape[0] // 2
        #
        self.logger.debug("Reference position is (x,y) %d  %d", xref_cross + 1,
                          yref_cross + 1)
        self.logger.debug("Reference regions size is %d", 2 * box + 1)
        region = image_box2d(xref_cross, yref_cross, finalshape, (box, box))
        regions.append(region)
        # corners
        if corners:
            xref_c = finalshape[1] // 4
            yref_c = finalshape[0] // 4

            for xi in [xref_c, 3 * xref_c]:
                for yi in [yref_c, 3 * yref_c]:
                    self.logger.debug("Reference position is (x,y) %d  %d",
                                      xi + 1, yi + 1)
                    self.logger.debug("Reference regions size is %d",
                                      2 * box + 1)
                    region = image_box2d(xi, yi, finalshape, (box, box))
                    regions.append(region)

        return regions
Esempio n. 15
0
class ArcCalibrationRecipe(EmirRecipe):
    """Process arc images applying wavelength calibration"""

    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    bound_param = reqs.RefinedBoundaryModelParamRequirement()
    lines_catalog = Requirement(LinesCatalog, 'Catalog of lines')

    reduced_image = Result(prods.ProcessedImage)
    rectwv_coeff = Result(prods.RectWaveCoeff)
    reduced_55sp = Result(prods.ProcessedMOS)
    reduced_arc = Result(prods.ProcessedMOS)

    @emirdrp.decorators.loginfo
    def run(self, rinput):
        self.logger.info('starting rect.+wavecal. reduction of arc spectra')

        # build object to proceed with bpm, bias, dark and flat
        flow = self.init_filters(rinput)

        # apply bpm, bias, dark and flat
        reduced_image = basic_processing_with_combination(rinput,
                                                          flow,
                                                          method=median)
        # update header con additional info
        hdr = reduced_image[0].header
        self.set_base_headers(hdr)

        # save intermediate image in work directory
        self.save_intermediate_img(reduced_image, 'reduced_image.fits')

        # RectWaveCoeff object (with rectification and wavelength calibration
        # coefficients for the particular CSU configuration of the arc image)
        # and HDUList object with the FITS image corresponding to 55 median
        # spectra of each slitlet
        rectwv_coeff, reduced_55sp = rectwv_coeff_from_arc_image(
            reduced_image,
            rinput.bound_param,
            rinput.lines_catalog,
        )

        # generate associated ds9 region files and save them in work directory
        if self.intermediate_results:
            save_four_ds9(rectwv_coeff)

        # apply rectification and wavelength calibration
        reduced_arc = apply_rectwv_coeff(reduced_image, rectwv_coeff)

        # save results in result directory
        self.logger.info('end rect.+wavecal. reduction of arc spectra')
        result = self.create_result(reduced_image=reduced_image,
                                    rectwv_coeff=rectwv_coeff,
                                    reduced_55sp=reduced_55sp,
                                    reduced_arc=reduced_arc)
        return result

    def set_base_headers(self, hdr):
        newhdr = super(ArcCalibrationRecipe, self).set_base_headers(hdr)
        # Update SEC to 0
        newhdr['SEC'] = 0
        return newhdr
Esempio n. 16
0
class FullDitheredImagesRecipe(EmirRecipe):
    """Recipe for the reduction of imaging mode observations.

    Recipe to reduce observations obtained in imaging mode, considering
    different possibilities depending on the size of the offsets
    between individual images.
    In particular, the following observing modes are considered: stare imaging,
    nodded beamswitched imaging, and dithered imaging.

    A critical piece of information here is a table that clearly specifies
    which images can be labeled as *science*, and which ones as *sky*.
    Note that some images are used both as *science* and *sky*
    (when the size of the targets is small compared to the offsets).

    **Observing modes:**

     * StareImage
     * Nodded/Beam-switched images
     * Dithered images


    **Inputs:**

     * Science frames + [Sky Frames]
     * Observing mode name: **stare image**, **nodded beamswitched image**,
       or **dithered imaging**
     * A table relating each science image with its sky image(s) (TBD if
       it's in the FITS header and/or in other format)
     * Offsets between them (Offsets must be integer)
     * Master Dark
     * Bad pixel mask (BPM)
     * Non-linearity correction polynomials
     * Master flat (twilight/dome flats)
     * Master background (thermal background, only in K band)
     * Exposure Time (must be the same in all the frames)
     * Airmass for each frame
     * Detector model (gain, RN, lecture mode)
     * Average extinction in the filter
     * Astrometric calibration (TBD)

    **Outputs:**

     * Image with three extensions: final image scaled to the individual
       exposure time, variance  and exposure time map OR number of images
       combined (TBD)

    **Procedure:**

    Images are corrected from dark, non-linearity and flat. Then, an iterative
    process starts:

     * Sky is computed from each frame, using the list of sky images of each
       science frame. The objects are avoided using a mask (from the second
       iteration on).

     * The relative offsets are the nominal from the telescope. From the second
       iteration on, we refine them using objects of appropriate brightness
       (not too bright, not to faint).

     * We combine the sky-subtracted images, output is: a new image, a variance
       image and a exposure map/number of images used map.

     * An object mask is generated.

     * We recompute the sky map, using the object mask as an additional input.
       From here we iterate (typically 4 times).

     * Finally, the images are corrected from atmospheric extinction and flux
       calibrated.

     * A preliminary astrometric calibration can always be used (using
       the central coordinates of the pointing and the plate scale
       in the detector).
       A better calibration might be computed using available stars (TBD).

    """
    obresult = ObservationResultRequirement(
        query_opts=ResultOf('result_image', node='children'))

    master_bpm = reqs.MasterBadPixelMaskRequirement()

    offsets = Requirement(prods.CoordinateList2DType,
                          'List of pairs of offsets',
                          optional=True)
    refine_offsets = Parameter(False, 'Refine offsets by cross-correlation')
    iterations = Parameter(2, 'Iterations of the recipe')
    extinction = Parameter(0.0, 'Mean atmospheric extinction')

    sky_images = Parameter(
        5, 'Images used to estimate the '
        'background before and after current image')

    sky_images_sep_time = Parameter(
        10, 'Maximum time interval between target and sky images [minutes]')

    result_image = Result(prods.ProcessedImage)
    result_sky = Result(prods.ProcessedImage, optional=True)

    def run(self, rinput):

        target_is_sky = True
        obresult = rinput.obresult
        sky_images = rinput.sky_images
        sky_images_sep_time = rinput.sky_images_sep_time
        baseshape = (2048, 2048)
        user_offsets = rinput.offsets
        extinction = rinput.extinction

        images_info = self.initial_classification(obresult, target_is_sky)

        # Resizing target frames
        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]
        finalshape, offsetsp, refpix, offset_fc0 = self.compute_size(
            target_info, baseshape, user_offsets)

        self.resize(target_info, baseshape, offsetsp, finalshape)

        result = self.process_basic(images_info,
                                    target_is_sky=target_is_sky,
                                    extinction=extinction)

        if rinput.refine_offsets:
            self.logger.debug("Compute cross-correlation of images")
            # regions_c = self.compute_regions(finalshape, box=200, corners=True)

            # Regions frm bright objects
            regions_c = self.compute_regions_from_objs(result[0].data,
                                                       finalshape,
                                                       box=40)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    images_info, regions_c, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offset_xy0 = numpy.fliplr(offset_fc0)
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = numpy.fliplr(offsets_xy_t)
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape2, offsetsp2 = narray.combine_shape(
                    baseshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s",
                                  offsetsp2)
                self.logger.info('Shape of resized array (crosscorr) is %s',
                                 finalshape2)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                self.resize(target_info, baseshape, offsetsp2, finalshape2)
            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        result = self.process_basic(images_info,
                                    target_is_sky=target_is_sky,
                                    extinction=extinction)

        step = 1

        while step <= rinput.iterations:
            result = self.process_advanced(images_info,
                                           result,
                                           step,
                                           target_is_sky,
                                           maxsep=sky_images_sep_time,
                                           nframes=sky_images,
                                           extinction=extinction)
            step += 1

        return self.create_result(result_image=result)

    def compute_offset_xy_crosscor_regions(self,
                                           iinfo,
                                           regions,
                                           refine=False,
                                           tol=0.5):

        names = [frame.lastname for frame in iinfo]
        print(names)
        print(regions)
        with nfcom.manage_fits(names) as imgs:
            arrs = [img[0].data for img in imgs]
            offsets_xy = offsets_from_crosscor_regions(arrs,
                                                       regions,
                                                       refine=refine,
                                                       order='xy',
                                                       tol=tol)
            self.logger.debug("offsets_xy cross-corr %s", offsets_xy)
            # Offsets in numpy order, swaping
        return offsets_xy

    def compute_size(self, images_info, baseshape, user_offsets=None):

        # Reference pixel in the center of the frame
        refpix = numpy.array([[baseshape[0] / 2.0, baseshape[1] / 2.0]])

        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]

        if user_offsets is not None:
            self.logger.info('Using offsets from parameters')
            base_ref = numpy.asarray(user_offsets)
            list_of_offsets = -(base_ref - base_ref[0])
        else:
            self.logger.debug('Computing offsets from WCS information')
            with nfcom.manage_fits(img.origin
                                   for img in target_info) as images:
                list_of_offsets = offsets_from_wcs_imgs(images, refpix)

        # FIXME: I am using offsets in row/columns
        # the values are provided in XY so flip-lr
        list_of_offsets = numpy.fliplr(list_of_offsets)

        # Insert pixel offsets between frames
        for iinfo, off in zip(target_info, list_of_offsets):
            # Insert pixel offsets between frames
            iinfo.pix_offset = off

            self.logger.debug('Frame %s, offset=%s', iinfo.label, off)

        self.logger.info('Computing relative offsets')
        offsets = [iinfo.pix_offset for iinfo in target_info]
        offsets = numpy.round(offsets).astype('int')

        finalshape, offsetsp = narray.combine_shape(baseshape, offsets)
        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)
        return finalshape, offsetsp, refpix, list_of_offsets

    def process_basic(self, images_info, target_is_sky=True, extinction=0.0):

        step = 0

        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]
        sky_info = [iinfo for iinfo in images_info if iinfo.valid_sky]

        self.logger.info("Step %d, SF: compute superflat", step)
        sf_arr = self.compute_superflat(images_info)

        # Apply superflat
        self.logger.info("Step %d, SF: apply superflat", step)
        for iinfo in images_info:
            self.correct_superflat(iinfo, sf_arr, step=step, save=True)

        self.logger.info('Simple sky correction')
        if target_is_sky:
            # Each frame is the closest sky frame available
            for iinfo in images_info:
                self.compute_simple_sky_for_frame(iinfo, iinfo)
        else:
            # Not implemented
            self.compute_simple_sky(target_info, sky_info)

        # Combining the frames
        self.logger.info("Step %d, Combining target frames", step)
        result = self.combine_frames(target_info, extinction=extinction)
        self.logger.info('Step %d, finished', step)

        return result

    def process_advanced(self,
                         images_info,
                         result,
                         step,
                         target_is_sky=True,
                         maxsep=5.0,
                         nframes=6,
                         extinction=0):

        seeing_fwhm = None
        baseshape = (2048, 2048)
        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]
        sky_info = [iinfo for iinfo in images_info if iinfo.valid_sky]
        self.logger.info('Step %d, generating segmentation image', step)

        objmask, seeing_fwhm = self.create_mask(result, seeing_fwhm, step=step)

        for frame in target_info:
            frame.objmask = name_object_mask(frame.label, step)
            self.logger.info('Step %d, create object mask %s', step,
                             frame.objmask)
            frame.objmask_data = objmask[frame.valid_region]
            fits.writeto(frame.objmask, frame.objmask_data, overwrite=True)

        if not target_is_sky:
            # Empty object mask for sky frames
            bogus_objmask = numpy.zeros(baseshape, dtype='uint8')

            for frame in sky_info:
                frame.objmask_data = bogus_objmask

        self.logger.info("Step %d, SF: compute superflat", step)
        sf_arr = self.compute_superflat(sky_info, segmask=objmask, step=step)

        # Apply superflat
        self.logger.info("Step %d, SF: apply superflat", step)
        for iinfo in images_info:
            self.correct_superflat(iinfo, sf_arr, step=step, save=True)

        self.logger.info('Step %d, advanced sky correction (SC)', step)
        self.compute_advanced_sky(target_info,
                                  objmask,
                                  skyframes=sky_info,
                                  target_is_sky=target_is_sky,
                                  maxsep=maxsep,
                                  nframes=nframes,
                                  step=step)

        # Combining the images
        self.logger.info("Step %d, Combining the images", step)
        # FIXME: only for science
        result = self.combine_frames(target_info, extinction, step=step)
        return result

    def compute_simple_sky_for_frame(self, frame, skyframe, step=0, save=True):
        self.logger.info('Correcting sky in frame %s', frame.lastname)
        self.logger.info('with sky computed from frame %s', skyframe.lastname)

        if hasattr(skyframe, 'median_sky'):
            sky = skyframe.median_sky
        else:

            with fits.open(skyframe.lastname, mode='readonly') as hdulist:
                data = hdulist['primary'].data
                valid = data[frame.valid_region]

                if skyframe.objmask_data is not None:
                    self.logger.debug('object mask defined')
                    msk = frame.objmask_data
                    sky = numpy.median(valid[msk == 0])
                else:
                    self.logger.debug('object mask empty')
                    sky = numpy.median(valid)

            self.logger.debug('median sky value is %f', sky)
            skyframe.median_sky = sky

        dst = name_skysub_proc(frame.label, step)
        prev = frame.lastname

        if save:
            shutil.copyfile(prev, dst)
        else:
            os.rename(prev, dst)

        frame.lastname = dst

        with fits.open(frame.lastname, mode='update') as hdulist:
            data = hdulist['primary'].data
            valid = data[frame.valid_region]
            valid -= sky

    def compute_simple_sky(self, frame, skyframe, step=0, save=True):
        raise NotImplementedError

    def correct_superflat(self, frame, fitted, step=0, save=True):

        frame.flat_corrected = name_skyflat_proc(frame.label, step)
        if save:
            shutil.copyfile(frame.resized_base, frame.flat_corrected)
        else:
            os.rename(frame.resized_base, frame.flat_corrected)

        self.logger.info("Step %d, SF: apply superflat to frame %s", step,
                         frame.flat_corrected)
        with fits.open(frame.flat_corrected, mode='update') as hdulist:
            data = hdulist['primary'].data
            datar = data[frame.valid_region]
            data[frame.valid_region] = narray.correct_flatfield(datar, fitted)

            frame.lastname = frame.flat_corrected

    def initial_classification(self, obresult, target_is_sky=False):
        """Classify input frames, """
        # lists of targets and sky frames

        with obresult.frames[0].open() as baseimg:
            # Initial checks
            has_bpm_ext = 'BPM' in baseimg
            self.logger.debug('images have BPM extension: %s', has_bpm_ext)

        images_info = []
        for f in obresult.frames:
            with f.open() as img:
                # Getting some metadata from FITS header
                hdr = img[0].header

                iinfo = ImageInfo(f)

                finfo = {}
                iinfo.metadata = finfo

                finfo['uuid'] = hdr['UUID']
                finfo['exposure'] = hdr['EXPTIME']
                # frame.baseshape = get_image_shape(hdr)
                finfo['airmass'] = hdr['airmass']
                finfo['mjd'] = hdr['tstamp']

                iinfo.label = 'result_image_{}'.format(finfo['uuid'])
                iinfo.mask = nfcom.Extension("BPM")
                # Insert pixel offsets between frames
                iinfo.objmask_data = None
                iinfo.valid_target = False
                iinfo.valid_sky = False

                # FIXME: hardcode itype for the moment
                iinfo.itype = 'TARGET'
                if iinfo.itype == 'TARGET':
                    iinfo.valid_target = True
                    #targetframes.append(iinfo)
                    if target_is_sky:
                        iinfo.valid_sky = True
                        #skyframes.append(iinfo)
                if iinfo.itype == 'SKY':
                    iinfo.valid_sky = True
                    #skyframes.append(iinfo)
                images_info.append(iinfo)

        return images_info

    def compute_superflat(self, images_info, segmask=None, step=0):

        self.logger.info("Step %d, SF: combining the frames without offsets",
                         step)

        base_imgs = [img.resized_base for img in images_info]
        with nfcom.manage_fits(base_imgs) as imgs:

            data = []
            masks = []

            for img, img_info in zip(imgs, images_info):
                self.logger.debug('Step %d, opening resized frame %s', step,
                                  img_info.resized_base)
                data.append(img['primary'].data[img_info.valid_region])

            scales = [numpy.median(d) for d in data]

            if segmask is not None:
                masks = [segmask[frame.valid_region] for frame in images_info]
            else:
                for frame in images_info:
                    self.logger.debug('Step %d, opening resized mask %s', step,
                                      frame.resized_mask)
                    hdulist = fits.open(frame.resized_mask,
                                        memmap=True,
                                        mode='readonly')
                    #filelist.append(hdulist)
                    masks.append(hdulist['primary'].data[frame.valid_region])
                masks = None

            self.logger.debug('Step %d, combining %d frames', step, len(data))
            sf_data, _sf_var, sf_num = nacom.median(
                data,
                masks,
                scales=scales,
                dtype='float32',
                #blank=1.0 / scales[0]
            )

        # Normalize, flat has mean = 1
        sf_data[sf_data == 0] = 1e-5
        sf_data /= sf_data.mean()
        #sf_data[sf_data <= 0] = 1.0

        # Auxiliary data
        sfhdu = fits.PrimaryHDU(sf_data)
        self.save_intermediate_img(sfhdu, name_skyflat('comb', step))
        return sf_data

    def run_single(self, rinput):

        # FIXME: remove this, is deprecated

        obresult = rinput.obresult

        # just in case images are in result, instead of frames
        if not obresult.frames:
            frames = obresult.results
        else:
            frames = obresult.frames

        img_info = []
        data_hdul = []
        for f in frames:
            img = f.open()
            data_hdul.append(img)
            info = {}
            info['tstamp'] = img[0].header['tstamp']
            info['airmass'] = img[0].header['airmass']
            img_info.append(info)

        channels = FULL

        use_errors = True
        # Initial checks
        baseimg = data_hdul[0]
        has_num_ext = 'NUM' in baseimg
        has_bpm_ext = 'BPM' in baseimg
        baseshape = baseimg[0].shape
        subpixshape = baseshape
        base_header = baseimg[0].header
        compute_sky = 'NUM-SK' not in base_header
        compute_sky_advanced = False

        self.logger.debug('base image is: %s',
                          self.datamodel.get_imgid(baseimg))
        self.logger.debug('images have NUM extension: %s', has_num_ext)
        self.logger.debug('images have BPM extension: %s', has_bpm_ext)
        self.logger.debug('compute sky is needed: %s', compute_sky)

        if compute_sky:
            self.logger.info('compute sky simple')
            sky_result = self.compute_sky_simple(data_hdul, use_errors=False)
            self.save_intermediate_img(sky_result, 'sky_init.fits')
            sky_result.writeto('sky_init.fits', overwrite=True)
            sky_data = sky_result[0].data
            self.logger.debug('sky image has shape %s', sky_data.shape)

            self.logger.info('sky correction in individual images')
            corrector = proc.SkyCorrector(
                sky_data,
                self.datamodel,
                calibid=self.datamodel.get_imgid(sky_result))
            # If we do not update keyword SKYADD
            # there is no sky subtraction
            for m in data_hdul:
                m[0].header['SKYADD'] = True
            # this is a little hackish
            # sky corrected
            data_hdul_s = [corrector(m) for m in data_hdul]
            base_header = data_hdul_s[0][0].header
        else:
            sky_result = None
            data_hdul_s = data_hdul

        self.logger.info('Computing offsets from WCS information')

        finalshape, offsetsp, refpix, offset_xy0 = self.compute_offset_wcs_imgs(
            data_hdul_s, baseshape, subpixshape)

        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)

        # Resizing target imgs
        data_arr_sr, regions = narray.resize_arrays(
            [m[0].data for m in data_hdul_s],
            subpixshape,
            offsetsp,
            finalshape,
            fill=1)

        if has_num_ext:
            self.logger.debug('Using NUM extension')
            masks = [
                numpy.where(m['NUM'].data, 0, 1).astype('int16')
                for m in data_hdul
            ]
        elif has_bpm_ext:
            self.logger.debug('Using BPM extension')
            #
            masks = [
                numpy.where(m['BPM'].data, 1, 0).astype('int16')
                for m in data_hdul
            ]
        else:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            masks = [false_mask for _ in data_arr_sr]

        self.logger.debug('resize bad pixel masks')
        mask_arr_r, _ = narray.resize_arrays(masks,
                                             subpixshape,
                                             offsetsp,
                                             finalshape,
                                             fill=1)

        if self.intermediate_results:
            self.logger.debug('save resized intermediate img')
            for idx, arr_r in enumerate(data_arr_sr):
                self.save_intermediate_array(arr_r, 'interm1_%03d.fits' % idx)

        hdulist = self.combine2(data_arr_sr, mask_arr_r, data_hdul, offsetsp,
                                use_errors)

        self.save_intermediate_img(hdulist, 'result_initial1.fits')

        compute_cross_offsets = True
        if compute_cross_offsets:

            self.logger.debug("Compute cross-correlation of images")
            # regions_c = self.compute_regions(finalshape, box=200, corners=True)

            # Regions frm bright objects
            regions_c = self.compute_regions_from_objs(hdulist[0].data,
                                                       finalshape,
                                                       box=20)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    data_arr_sr, regions_c, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = offsets_xy_t[:, ::-1]
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape, offsetsp = narray.combine_shape(
                    subpixshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s", offsetsp)
                self.logger.info('Shape of resized array (crosscorr) is %s',
                                 finalshape)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                data_arr_sr, regions = narray.resize_arrays(
                    [m[0].data for m in data_hdul_s],
                    subpixshape,
                    offsetsp,
                    finalshape,
                    fill=1)

                if self.intermediate_results:
                    self.logger.debug('save resized intermediate2 img')
                    for idx, arr_r in enumerate(data_arr_sr):
                        self.save_intermediate_array(arr_r,
                                                     'interm2_%03d.fits' % idx)

                self.logger.debug('resize bad pixel masks')
                mask_arr_r, _ = narray.resize_arrays(masks,
                                                     subpixshape,
                                                     offsetsp,
                                                     finalshape,
                                                     fill=1)

                hdulist = self.combine2(data_arr_sr, mask_arr_r, data_hdul,
                                        offsetsp, use_errors)

                self.save_intermediate_img(hdulist, 'result_initial2.fits')
            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        catalog, objmask = self.create_object_catalog(hdulist[0].data,
                                                      border=50)

        data_arr_sky = [sky_result[0].data for _ in data_arr_sr]
        data_arr_0 = [(d[r] + s)
                      for d, r, s in zip(data_arr_sr, regions, data_arr_sky)]
        data_arr_r = [d.copy() for d in data_arr_sr]

        for inum in range(1, rinput.iterations + 1):
            # superflat
            sf_data = self.compute_superflat(data_arr_0, objmask, regions,
                                             channels)
            fits.writeto('superflat_%d.fits' % inum, sf_data, overwrite=True)
            # apply superflat
            data_arr_rf = data_arr_r
            for base, arr, reg in zip(data_arr_rf, data_arr_0, regions):
                arr_f = arr / sf_data
                #arr_f = arr
                base[reg] = arr_f

            # compute sky advanced
            data_arr_sky = []
            data_arr_rfs = []
            self.logger.info('Step %d, SC: computing advanced sky', inum)
            scale = rinput.sky_images_sep_time * 60
            tstamps = numpy.array([info['tstamp'] for info in img_info])
            for idx, hdu in enumerate(data_hdul):
                diff1 = tstamps - tstamps[idx]
                idxs1 = (diff1 > 0) & (diff1 < scale)
                idxs2 = (diff1 < 0) & (diff1 > -scale)
                l1, = numpy.nonzero(idxs1)
                l2, = numpy.nonzero(idxs2)
                limit1 = l1[-rinput.sky_images:]
                limit2 = l2[:rinput.sky_images]
                len_l1 = len(limit1)
                len_l2 = len(limit2)
                self.logger.info('For image %s, using %d-%d images)', idx,
                                 len_l1, len_l2)
                if len_l1 + len_l2 == 0:
                    self.logger.error('No sky image available for frame %d',
                                      idx)
                    raise ValueError('No sky image')
                skydata = []
                skymasks = []
                skyscales = []
                my_region = regions[idx]
                my_sky_scale = numpy.median(data_arr_rf[idx][my_region])
                for i in numpy.concatenate((limit1, limit2)):
                    region_s = regions[i]
                    data_s = data_arr_rf[i][region_s]
                    mask_s = objmask[region_s]
                    scale_s = numpy.median(data_s)
                    skydata.append(data_s)
                    skymasks.append(mask_s)
                    skyscales.append(scale_s)
                self.logger.debug('computing background with %d frames',
                                  len(skydata))
                sky, _, num = nacom.median(skydata, skymasks, scales=skyscales)
                # rescale
                sky *= my_sky_scale

                binmask = num == 0

                if numpy.any(binmask):
                    # We have pixels without
                    # sky background information
                    self.logger.warn(
                        'pixels without sky information when correcting %d',
                        idx)

                    # FIXME: during development, this is faster
                    # sky[binmask] = sky[num != 0].mean()
                    # To continue we interpolate over the patches
                    narray.fixpix2(sky, binmask, out=sky, iterations=1)

                name = 'sky_%d_%03d.fits' % (inum, idx)
                fits.writeto(name, sky, overwrite=True)
                name = 'sky_binmask_%d_%03d.fits' % (inum, idx)
                fits.writeto(name, binmask.astype('int16'), overwrite=True)

                data_arr_sky.append(sky)
                arr = numpy.copy(data_arr_rf[idx])
                arr[my_region] = data_arr_rf[idx][my_region] - sky
                data_arr_rfs.append(arr)
                # subtract sky advanced

            if self.intermediate_results:
                self.logger.debug('save resized intermediate img')
                for idx, arr_r in enumerate(data_arr_rfs):
                    self.save_intermediate_array(
                        arr_r, 'interm_%d_%03d.fits' % (inum, idx))

            hdulist = self.combine2(data_arr_rfs, mask_arr_r, data_hdul,
                                    offsetsp, use_errors)

            self.save_intermediate_img(hdulist, 'result_%d.fits' % inum)

            # For next step
            catalog, objmask = self.create_object_catalog(hdulist[0].data,
                                                          border=50)

            data_arr_0 = [
                (d[r] + s)
                for d, r, s in zip(data_arr_rfs, regions, data_arr_sky)
            ]
            data_arr_r = [d.copy() for d in data_arr_rfs]

        result = self.create_result(frame=hdulist)
        self.logger.info('end of dither recipe')
        return result

    def compute_sky_advanced(self, data_hdul, omasks, base_header, use_errors):
        method = narray.combine.mean

        self.logger.info('recombine images with segmentation mask')
        sky_data = method([m[0].data for m in data_hdul],
                          masks=omasks,
                          dtype='float32')

        hdu = fits.PrimaryHDU(sky_data[0], header=base_header)
        points_no_data = (sky_data[2] == 0).sum()

        self.logger.debug('update created sky image result header')
        skyid = str(uuid.uuid1())
        hdu.header['UUID'] = skyid
        hdu.header['history'] = "Combined {} images using '{}'".format(
            len(data_hdul), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))

        msg = "missing pixels, total: {}, fraction: {:3.1f}".format(
            points_no_data, points_no_data / sky_data[2].size)
        hdu.header['history'] = msg
        self.logger.debug(msg)

        if use_errors:
            varhdu = fits.ImageHDU(sky_data[1], name='VARIANCE')
            num = fits.ImageHDU(sky_data[2], name='MAP')
            sky_result = fits.HDUList([hdu, varhdu, num])
        else:
            sky_result = fits.HDUList([hdu])

        return sky_result

    def combine_frames(self, frames, extinction, out=None, step=0):
        self.logger.debug('Step %d, opening sky-subtracted frames', step)

        def fits_open(name):
            """Open FITS with memmap in readonly mode"""
            return fits.open(name, mode='readonly', memmap=True)

        frameslll = [
            fits_open(frame.lastname) for frame in frames if frame.valid_target
        ]
        self.logger.debug('Step %d, opening mask frames', step)
        mskslll = [
            fits_open(frame.resized_mask) for frame in frames
            if frame.valid_target
        ]

        self.logger.debug('Step %d, combining %d frames', step, len(frameslll))
        try:
            extinc = [
                pow(10, -0.4 * frame.metadata['airmass'] * extinction)
                for frame in frames if frame.valid_target
            ]
            data = [i['primary'].data for i in frameslll]
            masks = [i['primary'].data for i in mskslll]
            headers = [i['primary'].header for i in frameslll]

            out = nacom.median(data,
                               masks,
                               scales=extinc,
                               dtype='float32',
                               out=out)

            base_header = headers[0]
            hdu = fits.PrimaryHDU(out[0], header=base_header)
            hdu.header['history'] = "Combined %d images using '%s'" % (
                len(frameslll), 'median')
            hdu.header['history'] = 'Combination time {}'.format(
                datetime.datetime.utcnow().isoformat())
            for img in frameslll:
                hdu.header['history'] = "Image {}".format(
                    img[0].header['uuid'])
            prevnum = base_header.get('NUM-NCOM', 1)
            hdu.header['NUM-NCOM'] = prevnum * len(frameslll)
            hdu.header['NUMRNAM'] = 'FullDitheredImagesRecipe'
            hdu.header['UUID'] = str(uuid.uuid1())
            hdu.header['OBSMODE'] = 'FULL_DITHERED_IMAGE'
            # Headers of last image
            hdu.header['TSUTC2'] = headers[-1]['TSUTC2']

            varhdu = fits.ImageHDU(out[1], name='VARIANCE')
            num = fits.ImageHDU(out[2].astype('uint8'), name='MAP')

            result = fits.HDUList([hdu, varhdu, num])
            # saving the three extensions
            fits.writeto('result_i%0d.fits' % step, out[0], overwrite=True)
            fits.writeto('result_i%0d_var.fits' % step, out[1], overwrite=True)
            fits.writeto('result_i%0d_npix.fits' % step,
                         out[2],
                         overwrite=True)

            result.writeto('result_i%0d_full.fits' % step, overwrite=True)
            return result

        finally:
            self.logger.debug('Step %d, closing sky-subtracted frames', step)
            for f in frameslll:
                f.close()
            self.logger.debug('Step %d, closing mask frames', step)
            for f in mskslll:
                f.close()

    def resize(self,
               frames,
               shape,
               offsetsp,
               finalshape,
               window=None,
               scale=1,
               step=0):
        self.logger.info('Resizing frames and masks')
        for frame, rel_offset in zip(frames, offsetsp):
            if frame.valid_target:
                region, _ = narray.subarray_match(finalshape, rel_offset,
                                                  shape)
                # Valid region
                frame.valid_region = region
                # Relative offset
                frame.rel_offset = rel_offset
                # names of frame and mask
                framen, maskn = name_redimensioned_frames(frame.label, step)
                frame.resized_base = framen
                frame.resized_mask = maskn
                self.logger.debug(
                    '%s, valid region is %s, relative offset is %s',
                    frame.label, custom_region_to_str(region), rel_offset)
                self.resize_frame_and_mask(frame, finalshape, framen, maskn,
                                           window, scale)

    def resize_frame_and_mask(self, frame, finalshape, framen, maskn, window,
                              scale):
        self.logger.info('Resizing frame %s', frame.label)
        with frame.origin.open() as hdul:
            baseshape = hdul[0].data.shape

            # FIXME: Resize_fits saves the resized image in framen
            resize_fits(hdul,
                        framen,
                        finalshape,
                        frame.valid_region,
                        window=window,
                        scale=scale,
                        dtype='float32')

        self.logger.info('Resizing mask %s', frame.label)
        # We don't conserve the sum of the values of the frame here, just
        # expand the mask

        if frame.mask is None:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            hdum = fits.HDUList(fits.PrimaryHDU(false_mask))
            frame.mask = hdum  #DataFrame(frame=hdum)
        elif isinstance(frame.mask, nfcom.Extension):
            ename = frame.mask.name
            with frame.origin.open() as hdul:
                frame.mask = fits.HDUList(hdul[ename].copy())

        resize_fits(frame.mask,
                    maskn,
                    finalshape,
                    frame.valid_region,
                    fill=1,
                    window=window,
                    scale=scale,
                    conserve=False)

    def create_mask(self, img, seeing_fwhm, step=0):

        #
        remove_border = True

        # sextractor takes care of bad pixels

        # if seeing_fwhm is not None and seeing_fwhm > 0:
        #    sex.config['SEEING_FWHM'] = seeing_fwhm * sex.config['PIXEL_SCALE']

        if remove_border:
            weigthmap = 'weights4rms.fits'

            # Create weight map, remove n pixs from either side
            # using a Hannig filter
            # npix = 90
            # w1 = npix
            # w2 = npix
            # wmap = numpy.ones_like(sf_data[0])

            # cos_win1 = numpy.hanning(2 * w1)
            # cos_win2 = numpy.hanning(2 * w2)

            # wmap[:,:w1] *= cos_win1[:w1]
            # wmap[:,-w1:] *= cos_win1[-w1:]
            # wmap[:w2,:] *= cos_win2[:w2, numpy.newaxis]
            # wmap[-w2:,:] *= cos_win2[-w2:, numpy.newaxis]

            # Take the number of combined images from the combined image
            wm = img[2].data.copy()
            # Dont search objects where nimages < lower
            # FIXME: this is a magic number
            # We ignore objects in regions where we have less
            # than 10% of the images
            lower = wm.max() // 10
            border = (wm < lower)
            fits.writeto(weigthmap, border.astype('uint8'), overwrite=True)

            # sex.config['WEIGHT_TYPE'] = 'MAP_WEIGHT'
            # FIXME: this is a magic number
            # sex.config['WEIGHT_THRESH'] = 50
            # sex.config['WEIGHT_IMAGE'] = weigthmap
        else:
            border = None

        data_res = img[0].data
        bkg = sep.Background(data_res)
        data_sub = data_res - bkg

        self.logger.info('Runing source extraction in previous result')
        objects, objmask = sep.extract(data_sub,
                                       1.5,
                                       err=bkg.globalrms,
                                       mask=border,
                                       segmentation_map=True)
        fits.writeto(name_segmask(step), objmask, overwrite=True)

        # # Plot objects
        # # FIXME, plot sextractor objects on top of image
        # patches = []
        # fwhms = []
        # nfirst = 0
        # catalog_f = sopen(sex.config['CATALOG_NAME'])
        # try:
        #     star = catalog_f.readline()
        #     while star:
        #         flags = star['FLAGS']
        #         # ignoring those objects with corrupted apertures
        #         if flags & sexcatalog.CORRUPTED_APER:
        #             star = catalog_f.readline()
        #             continue
        #         center = (star['X_IMAGE'], star['Y_IMAGE'])
        #         wd = 10 * star['A_IMAGE']
        #         hd = 10 * star['B_IMAGE']
        #         color = 'red'
        #         e = Ellipse(center, wd, hd, star['THETA_IMAGE'], color=color)
        #         patches.append(e)
        #         fwhms.append(star['FWHM_IMAGE'])
        #         nfirst += 1
        #         # FIXME Plot a ellipse
        #         star = catalog_f.readline()
        # finally:
        #     catalog_f.close()
        #
        # p = PatchCollection(patches, alpha=0.4)
        # ax = self._figure.gca()
        # ax.add_collection(p)
        # self._figure.canvas.draw()
        # self._figure.savefig('figure-segmentation-overlay_%01d.png' % step)
        #
        # self.figure_fwhm_histogram(fwhms, step=step)
        #
        # # mode with an histogram
        # hist, edges = numpy.histogram(fwhms, 50)
        # idx = hist.argmax()
        #
        # seeing_fwhm = 0.5 * (edges[idx] + edges[idx + 1])
        # if seeing_fwhm <= 0:
        #     _logger.warning(
        #         'Seeing FHWM %f pixels is negative, reseting', seeing_fwhm)
        #     seeing_fwhm = None
        # else:
        #     _logger.info('Seeing FHWM %f pixels (%f arcseconds)',
        #                  seeing_fwhm, seeing_fwhm * sex.config['PIXEL_SCALE'])
        # objmask = fits.getdata(name_segmask(step))

        return objmask, seeing_fwhm

    def compute_advanced_sky(self,
                             targetframes,
                             objmask,
                             skyframes=None,
                             target_is_sky=False,
                             maxsep=5.0,
                             nframes=10,
                             step=0,
                             save=True):

        if target_is_sky:
            skyframes = targetframes
            # Each frame is its closest sky frame
            nframes += 1
        elif skyframes is None:
            raise ValueError('skyframes not defined')

        # build kdtree
        sarray = numpy.array([frame.metadata['mjd'] for frame in skyframes])
        # shape must be (n, 1)
        sarray = numpy.expand_dims(sarray, axis=1)

        # query
        tarray = numpy.array([frame.metadata['mjd'] for frame in targetframes])
        # shape must be (n, 1)
        tarray = numpy.expand_dims(tarray, axis=1)

        kdtree = KDTree(sarray)

        # 1 / minutes in a Julian day
        SCALE = 60.0
        # max_time_sep = ri.sky_images_sep_time / 1440.0
        _dis, idxs = kdtree.query(tarray,
                                  k=nframes,
                                  distance_upper_bound=maxsep * SCALE)

        nsky = len(sarray)

        for tid, idss in enumerate(idxs):
            try:
                tf = targetframes[tid]
                self.logger.info('Step %d, SC: computing advanced sky for %s',
                                 step, tf.label)
                # filter(lambda x: x < nsky, idss)
                locskyframes = []
                for si in idss:
                    if tid == si:
                        # this sky frame it is the current frame, reject
                        continue
                    if si < nsky:
                        self.logger.debug('Step %d, SC: %s is a sky frame',
                                          step, skyframes[si].label)
                        locskyframes.append(skyframes[si])
                self.compute_advanced_sky_for_frame(tf,
                                                    locskyframes,
                                                    step=step,
                                                    save=save)
            except IndexError:
                self.logger.error('No sky image available for frame %s',
                                  tf.lastname)
                raise

    def compute_advanced_sky_for_frame(self,
                                       frame,
                                       skyframes,
                                       step=0,
                                       save=True):
        self.logger.info('Correcting sky in frame %s', frame.lastname)
        self.logger.info('with sky computed from frames')
        for i in skyframes:
            self.logger.info('%s', i.flat_corrected)

        data = []
        scales = []
        masks = []
        # handle the FITS file to close it finally
        desc = []
        try:
            for i in skyframes:
                filename = i.flat_corrected
                hdulist = fits.open(filename, mode='readonly', memmap=True)

                data.append(hdulist['primary'].data[i.valid_region])
                desc.append(hdulist)
                #scales.append(numpy.median(data[-1]))
                if i.objmask_data is not None:
                    masks.append(i.objmask_data)
                    self.logger.debug('object mask is shared')
                elif i.objmask is not None:
                    hdulistmask = fits.open(i.objmask,
                                            mode='readonly',
                                            memmap=True)
                    masks.append(hdulistmask['primary'].data)
                    desc.append(hdulistmask)
                    self.logger.debug('object mask is particular')
                else:
                    self.logger.warn('no object mask for %s', filename)

            self.logger.debug('computing background with %d frames', len(data))
            sky, _, num = nacom.median(data, masks)  #, scales=scales)

        finally:
            # Closing all FITS files
            for hdl in desc:
                hdl.close()

        if numpy.any(num == 0):
            # We have pixels without
            # sky background information
            self.logger.warn(
                'pixels without sky information when correcting %s',
                frame.flat_corrected)
            binmask = num == 0
            # FIXME: during development, this is faster
            # sky[binmask] = sky[num != 0].mean()

            # To continue we interpolate over the patches
            narray.fixpix2(sky, binmask, out=sky, iterations=1)

            name = name_skybackgroundmask(frame.label, step)
            fits.writeto(name, binmask.astype('int16'), overwrite=True)

        name_sky = name_skybackground(frame.label, step)
        fits.writeto(name_sky, sky, overwrite=True)

        dst = name_skysub_proc(frame.label, step)
        prev = frame.lastname
        shutil.copyfile(prev, dst)
        frame.lastname = dst

        with fits.open(frame.lastname, mode='update') as hdulist:
            data = hdulist['primary'].data
            valid = data[frame.valid_region]
            valid -= sky

    def compute_regions_from_objs(self, arr, finalshape, box=50, corners=True):
        regions = []
        catalog, mask = self.create_object_catalog(arr, border=300)

        self.save_intermediate_array(mask, 'objmask.fits')
        # with the catalog, compute 5 objects

        LIMIT_AREA = 5000
        NKEEP = 1
        idx_small = catalog['npix'] < LIMIT_AREA
        objects_small = catalog[idx_small]
        idx_flux = objects_small['flux'].argsort()
        objects_nth = objects_small[idx_flux][-NKEEP:]
        for obj in objects_nth:
            print('ref is', obj['x'], obj['y'])
            region = nautils.image_box2d(obj['x'], obj['y'], finalshape,
                                         (box, box))
            print(region)
            regions.append(region)
        return regions

    def create_object_catalog(self, arr, threshold=3.0, border=0):

        if border > 0:
            wmap = numpy.ones_like(arr)
            wmap[border:-border, border:-border] = 0
        else:
            wmap = None

        bkg = sep.Background(arr)
        data_sub = arr - bkg
        objects, objmask = sep.extract(data_sub,
                                       threshold,
                                       err=bkg.globalrms *
                                       numpy.ones_like(data_sub),
                                       mask=wmap,
                                       segmentation_map=True)
        return objects, objmask
Esempio n. 17
0
class CosmeticsRecipe(EmirRecipe):
    """Detector Cosmetics.

    Recipe to find and tag bad pixels in the detector.
    """

    obresult = ObservationResultRequirement()
    insconf = InstrumentConfigurationRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = Requirement(MasterBias, 'Master bias image')
    master_dark = Requirement(MasterDark, 'Master dark image')
    lowercut = Parameter(
        4.0, 'Values bellow this sigma level are flagged as dead pixels')
    uppercut = Parameter(
        4.0, 'Values above this sigma level are flagged as hot pixels')
    maxiter = Parameter(30, 'Maximum number of iterations')

    ratio = Product(DataFrameType)
    mask = Product(DataFrameType)

    def run(self, rinput):

        # FIXME:
        # We need 2 flats
        # Of different exposure times
        #
        # And their calibrations
        #

        if len(rinput.obresult.frames) < 2:
            raise RecipeError('The recipe requires 2 flat frames')

        iinfo = []
        for frame in rinput.obresult.frames:
            with frame.open() as hdulist:
                iinfo.append(gather_info(hdulist))

        print(iinfo)

        # Loading calibrations
        with rinput.master_bias.open() as hdul:
            readmode = hdul[0].header.get('READMODE', 'undefined')
            if readmode.lower() in ['simple', 'bias']:
                _logger.info('loading bias')
                mbias = hdul[0].data
                bias_corrector = BiasCorrector(mbias)
            else:
                _logger.info('ignoring bias')
                bias_corrector = IdNode()

        with rinput.master_dark.open() as mdark_hdul:
            _logger.info('loading dark')
            mdark = mdark_hdul[0].data
            dark_corrector = DarkCorrector(mdark)

        flow = SerialFlow([bias_corrector, dark_corrector])

        _logger.info('processing flat #1')
        with rinput.obresult.frames[0].open() as hdul:
            other = flow(hdul)
            f1 = other[0].data.copy() * iinfo[0]['texp'] * 1e-3

        _logger.info('processing flat #2')
        with rinput.obresult.frames[1].open() as hdul:
            other = flow(hdul)
            f2 = other[0].data.copy() * iinfo[1]['texp'] * 1e-3

        # Preprocess...

        maxiter = rinput.maxiter
        lowercut = rinput.lowercut
        uppercut = rinput.uppercut

        ninvalid = 0
        mask = None

        if mask:
            m = fits.getdata(mask)
            ninvalid = numpy.count_nonzero(m)
        else:
            m = numpy.zeros_like(f1, dtype='int')

        for niter in range(1, maxiter + 1):
            _logger.info('iter %d', niter)
            ratio, m, sigma = cosmetics(f1,
                                        f2,
                                        m,
                                        lowercut=lowercut,
                                        uppercut=uppercut)
            # FIXME
            # These are intermediate results that
            # can be removed later
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                fits.writeto('numina-cosmetics-i%02d.fits' % niter,
                             ratio,
                             clobber=True)
                fits.writeto('numina-mask-i%02d.fits' % niter, m, clobber=True)
                fits.writeto('numina-sigma-i%02d.fits' % niter,
                             m * 0.0 + sigma,
                             clobber=True)
            _logger.info('iter %d, invalid points in input mask: %d', niter,
                         ninvalid)
            _logger.info('iter %d, estimated sigma is %f', niter, sigma)
            n_ninvalid = numpy.count_nonzero(m)

            # Probably there is something wrong here
            # too much defective pixels
            if ninvalid / m.size >= 0.10:
                # This should set a flag in the output
                msg = 'defective pixels are greater than 10%'
                _logger.warning(msg)

            if n_ninvalid == ninvalid:
                _logger.info('convergence reached after %d iterations', niter)
                break
            _logger.info('new invalid points: %d', n_ninvalid - ninvalid)
            ninvalid = n_ninvalid
        else:
            # This should set a flag in the output
            msg = 'convergence not reached after %d iterations' % maxiter
            _logger.warning(msg)

        _logger.info('number of dead pixels %d',
                     numpy.count_nonzero(m == PIXEL_DEAD))
        _logger.info('number of hot pixels %d',
                     numpy.count_nonzero(m == PIXEL_HOT))

        # FIXME
        # These are intermediate results that
        # caN be removed later
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            fits.writeto('numina-cosmetics.fits', ratio, clobber=True)
            fits.writeto('numina-mask.fits', m, clobber=True)
            fits.writeto('numina-sigma.fits',
                         sigma * numpy.ones_like(m),
                         clobber=True)

        hdu = fits.PrimaryHDU(ratio)
        hdr = hdu.header
        hdr['NUMXVER'] = (__version__, 'Numina package version')
        hdr['NUMRNAM'] = (self.__class__.__name__, 'Numina recipe name')
        hdr['NUMRVER'] = (self.__version__, 'Numina recipe version')
        ratiohdl = fits.HDUList([hdu])

        maskhdu = fits.PrimaryHDU(m)
        hdr = maskhdu.header
        hdr['NUMXVER'] = (__version__, 'Numina package version')
        hdr['NUMRNAM'] = (self.__class__.__name__, 'Numina recipe name')
        hdr['NUMRVER'] = (self.__version__, 'Numina recipe version')
        maskhdl = fits.HDUList([maskhdu])

        res = self.create_result(ratio=ratiohdl, mask=maskhdl)
        return res
Esempio n. 18
0
class BaseABBARecipe(EmirRecipe):
    """Process images in ABBA mode"""

    obresult = ObservationResultRequirement(
        query_opts=qmod.ResultOf('STARE_SPECTRA.reduced_mos',
                                 node='children',
                                 id_field="stareSpectraIds"))
    accum_in = Requirement(prods.DataFrameType,
                           description='Accumulated result',
                           optional=True,
                           destination='accum',
                           query_opts=qmod.ResultOf('LS_ABBA.accum',
                                                    node='prev'))

    reduced_mos_abba = Result(prods.ProcessedMOS)
    # Accumulate 'reduced_mos_abba' results
    accum = Result(prods.ProcessedMOS, optional=True)

    def build_recipe_input(self, obsres, dal, pipeline='default'):
        if numina.ext.gtc.check_gtc():
            self.logger.debug('running in GTC environment')
            return self.build_recipe_input_gtc(obsres, dal)
        else:
            self.logger.debug('running outside of GTC environment')
            return super(BaseABBARecipe, self).build_recipe_input(obsres, dal)

    def build_recipe_input_gtc(self, obsres, dal):
        self.logger.debug('start recipe input builder')
        stareImagesIds = obsres.stareSpectraIds
        self.logger.debug('Stare Spectra images IDS: %s', stareImagesIds)
        stareImages = []
        for subresId in stareImagesIds:
            subres = dal.getRecipeResult(subresId)
            stareImages.append(subres['elements']['reduced_mos'])

        naccum = obsres.naccum
        self.logger.info('naccum: %d', naccum)
        if naccum != 1:  # if it is not the first dithering loop
            self.logger.info("SEARCHING LATEST RESULT LS_ABBA TO ACCUMULATE")
            latest_result = dal.getLastRecipeResult("EMIR", "EMIR", "LS_ABBA")
            accum_dither = latest_result['elements']['accum']
            self.logger.info("FOUND")
        else:
            self.logger.info("NO ACCUMULATION LS_ABBA")
            accum_dither = stareImages[0]

        newOR = numina.core.ObservationResult()
        newOR.frames = stareImages
        newOR.naccum = naccum
        newOR.accum = accum_dither
        newRI = self.create_input(obresult=newOR)
        self.logger.debug('end recipe input builder')
        return newRI

    def run(self, rinput):
        partial_result = self.run_single(rinput)
        new_result = self.aggregate_result(partial_result, rinput)
        return new_result

    def run_single(self, rinput):
        self.logger.info('starting spectroscopy ABBA reduction')

        flow = self.init_filters(rinput)
        nimages = len(rinput.obresult.frames)
        self.logger.info('we receive %d images', nimages)
        if nimages != 4:
            msg = 'Recipe expects 4 images, received %d' % nimages
            raise numina.exceptions.RecipeError(msg)

        procesed_hdulists = basic_processing(rinput, flow)

        # INPUTS are ABBA, so
        #
        hdulist = self.process_abba(procesed_hdulists)
        grism = hdulist[0].header.get('GRISM', 'unknown')
        if grism.lower() == 'open':
            # perform TEST10 in addition
            import emirdrp.recipes.acquisition.maskcheck as mk
            from numina.core import ObservationResult, DataFrame
            import numpy
            try:
                import StringIO as S
            except ImportError:
                import io as S

            self.logger.info('GRISM is OPEN, doing a RECIPE10')
            sub = mk.MaskCheckRecipe()
            sub.configure(instrument='EMIR', mode='TEST10')
            o = ObservationResult()
            o.__dict__ = rinput.obresult.__dict__
            o.frames = [DataFrame(frame=hdulist)]
            subd = {}
            subd['obresult'] = o
            ss = S.StringIO(_NOMINALPOS)
            subd['bars_nominal_positions'] = numpy.loadtxt(ss)

            subinput = mk.MaskCheckRecipe.RecipeInput(**subd)

            sub.run(subinput)

        result = self.create_result(reduced_mos_abba=hdulist)
        self.logger.info('end spectroscopy ABBA reduction')
        return result

    def process_abba(self, images):
        # Process four images in ABBA mode
        dataA0 = images[0][0].data.astype('float32')
        dataB0 = images[1][0].data.astype('float32')

        dataB1 = images[2][0].data.astype('float32')
        dataA1 = images[3][0].data.astype('float32')

        dataAB0 = dataA0 - dataB0
        dataAB1 = dataA1 - dataB1

        dataABBA = dataAB0 + dataAB1

        hdulist = self.create_proc_hdulist(images, dataABBA)
        self.logger.debug('update result header')
        hdu = hdulist[0]
        hdu.header['history'] = "Processed ABBA"
        hdu.header['NUM-NCOM'] = (2, 'Number of combined frames')
        dm = emirdrp.datamodel.EmirDataModel()
        for img, key in zip(images, ['A', 'B', 'B', 'A']):
            imgid = dm.get_imgid(img)
            hdu.header['history'] = "Image '{}' is '{}'".format(imgid, key)

        return hdulist

    def create_proc_hdulist(self, cdata, data_array):
        import astropy.io.fits as fits
        import uuid
        # Copy header of first image
        base_header = cdata[0][0].header.copy()

        hdu = fits.PrimaryHDU(data_array, header=base_header)
        self.set_base_headers(hdu.header)
        hdu.header['EMIRUUID'] = str(uuid.uuid1())
        # Update obsmode in header
        hdu.header['OBSMODE'] = 'LS_ABBA'
        # Headers of last image
        hdu.header['TSUTC2'] = cdata[-1][0].header['TSUTC2']
        result = fits.HDUList([hdu])
        return result

    def create_accum_hdulist(self,
                             cdata,
                             data_array_n,
                             method_name='unkwnow',
                             use_errors=False):
        import uuid

        base_header = cdata[0][0].header.copy()
        hdu = fits.PrimaryHDU(data_array_n[0], header=base_header)
        hdr = hdu.header
        self.set_base_headers(hdr)
        hdu.header['EMIRUUID'] = str(uuid.uuid1())
        hdr['IMGOBBL'] = 0
        hdr['TSUTC2'] = cdata[-1][0].header['TSUTC2']

        hdu.header['history'] = "Combined %d images using '%s'" % (len(cdata),
                                                                   method_name)
        #hdu.header['history'] = 'Combination time {}'.format(
        #    datetime.datetime.utcnow().isoformat()
        #)
        # Update NUM-NCOM, sum of individual frames
        ncom = 0
        for hdul in cdata:
            ncom += hdul[0].header['NUM-NCOM']
        hdr['NUM-NCOM'] = ncom

        #
        if use_errors:
            varhdu = fits.ImageHDU(data_array_n[1], name='VARIANCE')
            num = fits.ImageHDU(data_array_n[2], name='MAP')
            hdulist = fits.HDUList([hdu, varhdu, num])
        else:
            hdulist = fits.HDUList([hdu])

        return hdulist

    def aggregate_result(self, partial_result, rinput):
        obresult = rinput.obresult
        # Check if this is our first run
        naccum = getattr(obresult, 'naccum', 0)
        accum = getattr(obresult, 'accum', None)
        # result to accumulate
        result_key = 'reduced_mos_abba'
        field_to_accum = getattr(partial_result, result_key)

        if naccum == 0:
            self.logger.debug('naccum is not set, do not accumulate')
            return partial_result
        elif naccum == 1:
            self.logger.debug('round %d initialize accumulator', naccum)
            newaccum = field_to_accum
        elif naccum > 1:
            self.logger.debug('round %d of accumulation', naccum)
            newaccum = self.aggregate_frames(accum, field_to_accum, naccum)
        else:
            msg = 'naccum set to %d, invalid' % (naccum, )
            self.logger.error(msg)
            raise RecipeError(msg)

        # Update partial result
        partial_result.accum = newaccum

        return partial_result

    def aggregate_frames(self, accum, frame, naccum):
        return self.aggregate2(accum, frame, naccum)

    def aggregate2(self, img1, img2, naccum):

        frames = [img1, img2]
        use_errors = True
        # Initial checks
        fframe = frames[0]
        # Ref image
        img = fframe.open()
        has_num_ext = 'NUM' in img
        has_bpm_ext = 'BPM' in img
        base_header = img[0].header
        baseshape = img[0].shape

        data_hdul = []
        for f in frames:
            img = f.open()
            data_hdul.append(img)

        if has_num_ext:
            self.logger.debug('Using NUM extension')
            masks = [
                numpy.where(m['NUM'].data, 0, 1).astype('uint8')
                for m in data_hdul
            ]
        elif has_bpm_ext:
            self.logger.debug('Using BPM extension')
            masks = [m['BPM'].data for m in data_hdul]
        else:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            masks = [false_mask for _ in data_hdul]

        self.logger.info('Combine target images (final, aggregate)')

        weight_accum = 2 * (1 - 1.0 / naccum)
        weight_frame = 2.0 / naccum
        self.logger.debug("weights for 'accum' and 'frame', %s",
                          [weight_accum, weight_frame])
        scales = [1.0 / weight_accum, 1.0 / weight_frame]
        method = combine.mean
        data_arr = [hdul[0].data for hdul in data_hdul]
        out = method(data_arr, masks=masks, scales=scales, dtype='float32')

        self.logger.debug('create result image')

        return self.create_accum_hdulist(data_hdul,
                                         out,
                                         method_name=method.__name__,
                                         use_errors=False)
Esempio n. 19
0
class TestPinholeRecipe(EmirRecipe):

    # Recipe Requirements
    #
    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    master_sky = reqs.MasterSkyRequirement()

    pinhole_nominal_positions = Requirement(
        prods.CoordinateList2DType, 'Nominal positions of the pinholes')
    shift_coordinates = Parameter(
        True, 'Use header information to'
        ' shift the pinhole positions from (0,0) '
        'to X_DTU, Y_DTU')
    box_half_size = Parameter(4, 'Half of the computation box size in pixels')
    recenter = Parameter(True, 'Recenter the pinhole coordinates')
    max_recenter_radius = Parameter(2.0, 'Maximum distance for recentering')

    # Recipe Results
    frame = Result(prods.ProcessedImage)
    positions = Result(tarray.ArrayType)
    positions_alt = Result(tarray.ArrayType)
    DTU = Result(tarray.ArrayType)
    filter = Result(str)
    readmode = Result(str)
    ROTANG = Result(float)
    DETPA = Result(float)
    DTUPA = Result(float)
    param_recenter = Result(bool)
    param_max_recenter_radius = Result(float)
    param_box_half_size = Result(float)

    def run(self, rinput):
        _logger.info('starting processing for slit detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        _logger.debug('finding pinholes')

        try:
            filtername = hdr['FILTER']
            readmode = hdr['READMODE']
            rotang = hdr['ROTANG']
            detpa = hdr['DETPA']
            dtupa = hdr['DTUPA']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
        except KeyError as error:
            _logger.error(error)
            raise numina.exceptions.RecipeError(error)

        if rinput.shift_coordinates:
            xdtur, ydtur, zdtur = dtur
            xfac = xdtur / EMIR_PIXSCALE
            yfac = -ydtur / EMIR_PIXSCALE

            vec = numpy.array([yfac, xfac])
            _logger.info('shift is %s', vec)
            ncenters = rinput.pinhole_nominal_positions + vec
        else:
            _logger.info('using pinhole coordinates as they are')
            ncenters = rinput.pinhole_nominal_positions

        _logger.info('pinhole characterization')
        positions = pinhole_char(hdulist[0].data,
                                 ncenters,
                                 box=rinput.box_half_size,
                                 recenter_pinhole=rinput.recenter,
                                 maxdist=rinput.max_recenter_radius)

        _logger.info('alternate pinhole characterization')
        positions_alt = pinhole_char2(
            hdulist[0].data,
            ncenters,
            recenter_pinhole=rinput.recenter,
            recenter_half_box=rinput.box_half_size,
            recenter_maxdist=rinput.max_recenter_radius)

        result = self.create_result(
            frame=hdulist,
            positions=positions,
            positions_alt=positions_alt,
            filter=filtername,
            DTU=dtub,
            readmode=readmode,
            ROTANG=rotang,
            DETPA=detpa,
            DTUPA=dtupa,
            param_recenter=rinput.recenter,
            param_max_recenter_radius=rinput.max_recenter_radius,
            param_box_half_size=rinput.box_half_size)
        return result
Esempio n. 20
0
class FullDitheredImagesRecipe(EmirRecipe):
    """Recipe for the reduction of imaging mode observations.

    Recipe to reduce observations obtained in imaging mode, considering
    different possibilities depending on the size of the offsets
    between individual images.
    In particular, the following observing modes are considered: stare imaging,
    nodded beamswitched imaging, and dithered imaging.

    A critical piece of information here is a table that clearly specifies
    which images can be labeled as *science*, and which ones as *sky*.
    Note that some images are used both as *science* and *sky*
    (when the size of the targets is small compared to the offsets).

    **Observing modes:**

     * StareImage
     * Nodded/Beam-switched images
     * Dithered images


    **Inputs:**

     * Science frames + [Sky Frames]
     * Observing mode name: **stare image**, **nodded beamswitched image**,
       or **dithered imaging**
     * A table relating each science image with its sky image(s) (TBD if
       it's in the FITS header and/or in other format)
     * Offsets between them (Offsets must be integer)
     * Master Dark
     * Bad pixel mask (BPM)
     * Non-linearity correction polynomials
     * Master flat (twilight/dome flats)
     * Master background (thermal background, only in K band)
     * Exposure Time (must be the same in all the frames)
     * Airmass for each frame
     * Detector model (gain, RN, lecture mode)
     * Average extinction in the filter
     * Astrometric calibration (TBD)

    **Outputs:**

     * Image with three extensions: final image scaled to the individual
       exposure time, variance  and exposure time map OR number of images
       combined (TBD)

    **Procedure:**

    Images are corrected from dark, non-linearity and flat. Then, an iterative
    process starts:

     * Sky is computed from each frame, using the list of sky images of each
       science frame. The objects are avoided using a mask (from the second
       iteration on).

     * The relative offsets are the nominal from the telescope. From the second
       iteration on, we refine them using objects of appropriate brightness
       (not too bright, not to faint).

     * We combine the sky-subtracted images, output is: a new image, a variance
       image and a exposure map/number of images used map.

     * An object mask is generated.

     * We recompute the sky map, using the object mask as an additional input.
       From here we iterate (typically 4 times).

     * Finally, the images are corrected from atmospheric extinction and flux
       calibrated.

     * A preliminary astrometric calibration can always be used (using
       the central coordinates of the pointing and the plate scale
       in the detector).
       A better calibration might be computed using available stars (TBD).

    """

    logger = logging.getLogger(__name__)

    obresult = ObservationResultRequirement(
        query_opts=ResultOf('result_image', node='children'))

    master_bpm = reqs.MasterBadPixelMaskRequirement()

    offsets = Requirement(prods.CoordinateList2DType,
                          'List of pairs of offsets',
                          optional=True)
    refine_offsets = Parameter(False, 'Refine offsets by cross-correlation')
    iterations = Parameter(0, 'Iterations of the recipe')
    extinction = Parameter(0.0, 'Mean atmospheric extinction')

    method = Parameter('sigmaclip',
                       description='Combination method',
                       choices=['mean', 'median', 'sigmaclip'])
    method_kwargs = Parameter(dict(),
                              description='Arguments for combination method',
                              optional=True)

    sky_images = Parameter(
        0, 'Images used to estimate the '
        'background before and after current image')

    sky_images_sep_time = Parameter(
        10, 'Maximum time interval between target and sky images [minutes]')

    result_image = Result(prods.ProcessedImage)
    result_sky = Result(prods.ProcessedImage, optional=True)

    def run(self, rinput):

        target_is_sky = True
        obresult = rinput.obresult
        sky_images = rinput.sky_images
        sky_images_sep_time = rinput.sky_images_sep_time
        baseshape = (EMIR_NAXIS2, EMIR_NAXIS1)
        user_offsets = rinput.offsets
        extinction = rinput.extinction

        # protections
        if rinput.iterations == 0 and sky_images != 0:
            raise ValueError(
                'sky_images: {} not compatible with iterations: {}'.format(
                    sky_images, rinput.iterations))

        if rinput.iterations > 0 and sky_images == 0:
            raise ValueError('iterations != 0 requires sky_images > 0')

        # check combination method
        if rinput.method != 'sigmaclip':
            if rinput.method_kwargs != {}:
                raise ValueError('Unexpected method_kwargs={}'.format(
                    rinput.method_kwargs))
        # combination method and arguments
        method = getattr(nacom, rinput.method)
        method_kwargs = rinput.method_kwargs

        images_info = self.initial_classification(obresult, target_is_sky)

        # Resizing target frames
        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]
        finalshape, offsetsp, refpix, offset_fc0 = self.compute_size(
            target_info, baseshape, user_offsets)

        self.resize(target_info, baseshape, offsetsp, finalshape)

        step = 0

        result = self.process_basic(images_info,
                                    step=step,
                                    target_is_sky=target_is_sky,
                                    extinction=extinction,
                                    method=method,
                                    method_kwargs=method_kwargs)

        if rinput.refine_offsets:
            self.logger.debug("Compute cross-correlation of images")
            # regions_c = self.compute_regions(finalshape, box=200, corners=True)

            # Regions from bright objects
            regions_c = self.compute_regions_from_objs(step,
                                                       result[0].data,
                                                       finalshape,
                                                       box=40)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    images_info, regions_c, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offset_xy0 = numpy.fliplr(offset_fc0)
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = numpy.fliplr(offsets_xy_t)
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets:\n%s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape2, offsetsp2 = narray.combine_shape(
                    baseshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr):\n%s",
                                  offsetsp2)
                self.logger.info(
                    'Shape of resized array (crosscorr) is '
                    '(NAXIS2, NAXIS1) = %s', finalshape2)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                self.resize(target_info, baseshape, offsetsp2, finalshape2)
                result = self.process_basic(images_info,
                                            step=step,
                                            target_is_sky=target_is_sky,
                                            extinction=extinction,
                                            method=method,
                                            method_kwargs=method_kwargs)

            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        step = 1

        while step <= rinput.iterations:
            result = self.process_advanced(images_info,
                                           result,
                                           step,
                                           target_is_sky,
                                           maxsep=sky_images_sep_time,
                                           nframes=sky_images,
                                           extinction=extinction,
                                           method=method,
                                           method_kwargs=method_kwargs)
            step += 1

        return self.create_result(result_image=result)

    def compute_offset_xy_crosscor_regions(self,
                                           iinfo,
                                           regions,
                                           refine=False,
                                           tol=0.5):

        names = [frame.lastname for frame in iinfo]
        with nfcom.manage_fits(names) as imgs:
            arrs = [img[0].data for img in imgs]
            offsets_xy = offsets_from_crosscor_regions(arrs,
                                                       regions,
                                                       refine=refine,
                                                       order='xy',
                                                       tol=tol)
            self.logger.debug("offsets_xy cross-corr:\n%s", offsets_xy)
        return offsets_xy

    def compute_size(self, images_info, baseshape, user_offsets=None):

        # Reference pixel in the center of the frame
        refpix = numpy.array([[baseshape[0] / 2.0, baseshape[1] / 2.0]])

        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]

        if user_offsets is not None:
            self.logger.info('Using offsets from parameters')
            base_ref = numpy.asarray(user_offsets)
            list_of_offsets = -(base_ref - base_ref[0])
        else:
            self.logger.info('Computing offsets from WCS information')
            with nfcom.manage_fits(img.origin
                                   for img in target_info) as images:
                list_of_offsets = offsets_from_wcs_imgs(images, refpix)

        # FIXME: I am using offsets in row/columns
        # the values are provided in XY so flip-lr
        list_of_offsets = numpy.fliplr(list_of_offsets)

        # Insert pixel offsets between frames
        for iinfo, off in zip(target_info, list_of_offsets):
            # Insert pixel offsets between frames
            iinfo.pix_offset = off

            self.logger.debug('Frame %s, offset=%s', iinfo.label, off)

        self.logger.info('Computing relative offsets')
        offsets = [iinfo.pix_offset for iinfo in target_info]
        offsets = numpy.round(offsets).astype('int')

        finalshape, offsetsp = narray.combine_shape(baseshape, offsets)
        self.logger.debug("Relative offsetsp:\n%s", offsetsp)
        self.logger.info('Shape of resized array is (NAXIS2, NAXIS1) = %s',
                         finalshape)
        return finalshape, offsetsp, refpix, list_of_offsets

    def process_basic(self,
                      images_info,
                      step=None,
                      target_is_sky=True,
                      extinction=0.0,
                      method=None,
                      method_kwargs=None):

        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]
        sky_info = [iinfo for iinfo in images_info if iinfo.valid_sky]

        self.logger.info("Step %d, SF: compute superflat", step)
        sf_arr = self.compute_superflat(images_info,
                                        method=method,
                                        method_kwargs=method_kwargs)

        # Apply superflat
        self.logger.info("Step %d, SF: apply superflat", step)
        for iinfo in images_info:
            self.correct_superflat(iinfo, sf_arr, step=step, save=True)

        self.logger.info('Simple sky correction')
        if target_is_sky:
            # Each frame is the closest sky frame available
            for iinfo in images_info:
                self.compute_simple_sky_for_frame(iinfo, iinfo)
        else:
            # Not implemented
            self.compute_simple_sky(target_info, sky_info)

        # Combining the frames
        self.logger.info("Step %d, Combining target frames", step)
        result = self.combine_frames(target_info,
                                     extinction=extinction,
                                     method=method,
                                     method_kwargs=method_kwargs)
        self.logger.info('Step %d, finished', step)

        return result

    def process_advanced(self,
                         images_info,
                         result,
                         step,
                         target_is_sky=True,
                         maxsep=5.0,
                         nframes=6,
                         extinction=0,
                         method=None,
                         method_kwargs=None):

        seeing_fwhm = None
        baseshape = (EMIR_NAXIS2, EMIR_NAXIS1)
        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]
        sky_info = [iinfo for iinfo in images_info if iinfo.valid_sky]
        self.logger.info('Step %d, generating segmentation image', step)

        objmask, seeing_fwhm = self.create_mask(result, seeing_fwhm, step=step)

        for frame in target_info:
            frame.objmask = name_object_mask(frame.label, step)
            self.logger.info('Step %d, create object mask %s', step,
                             frame.objmask)
            frame.objmask_data = objmask[frame.valid_region]
            fits.writeto(frame.objmask, frame.objmask_data, overwrite=True)

        if not target_is_sky:
            # Empty object mask for sky frames
            bogus_objmask = numpy.zeros(baseshape, dtype='uint8')

            for frame in sky_info:
                frame.objmask_data = bogus_objmask

        self.logger.info("Step %d, SF: compute superflat", step)
        sf_arr = self.compute_superflat(sky_info,
                                        segmask=objmask,
                                        step=step,
                                        method=method,
                                        method_kwargs=method_kwargs)

        # Apply superflat
        self.logger.info("Step %d, SF: apply superflat", step)
        for iinfo in images_info:
            self.correct_superflat(iinfo, sf_arr, step=step, save=True)

        self.logger.info('Step %d, advanced sky correction (SC)', step)
        self.compute_advanced_sky(target_info,
                                  objmask,
                                  skyframes=sky_info,
                                  target_is_sky=target_is_sky,
                                  maxsep=maxsep,
                                  nframes=nframes,
                                  step=step,
                                  method=method,
                                  method_kwargs=method_kwargs)

        # Combining the images
        self.logger.info("Step %d, Combining the images", step)
        # FIXME: only for science
        result = self.combine_frames(target_info,
                                     extinction,
                                     step=step,
                                     method=method,
                                     method_kwargs=method_kwargs)
        return result

    def compute_simple_sky_for_frame(self, frame, skyframe, step=0, save=True):
        self.logger.info('Correcting sky in frame.....: %s', frame.lastname)
        self.logger.info('with sky computed from frame: %s', skyframe.lastname)

        if hasattr(skyframe, 'median_sky'):
            sky = skyframe.median_sky
        else:

            with fits.open(skyframe.lastname, mode='readonly') as hdulist:
                data = hdulist['primary'].data
                valid = data[frame.valid_region]

                if skyframe.objmask_data is not None:
                    self.logger.debug('object mask defined')
                    msk = frame.objmask_data
                    sky = numpy.median(valid[msk == 0])
                else:
                    self.logger.debug('object mask empty')
                    sky = numpy.median(valid)

            self.logger.debug('median sky value is %f', sky)
            skyframe.median_sky = sky

        dst = name_skysub_proc(frame.label, step)
        prev = frame.lastname

        if save:
            shutil.copyfile(prev, dst)
        else:
            os.rename(prev, dst)

        frame.lastname = dst

        with fits.open(frame.lastname, mode='update') as hdulist:
            data = hdulist['primary'].data
            valid = data[frame.valid_region]
            valid -= sky
            self.logger.info('Sky-subtrated image in frame: %s',
                             frame.lastname)
            self.logger.info('---')

    def compute_simple_sky(self, frame, skyframe, step=0, save=True):
        raise NotImplementedError

    def correct_superflat(self, frame, fitted, step=0, save=True):

        frame.flat_corrected = name_skyflat_proc(frame.label, step)
        if save:
            shutil.copyfile(frame.resized_base, frame.flat_corrected)
        else:
            os.rename(frame.resized_base, frame.flat_corrected)

        self.logger.info("Step %d, SF: apply superflat to frame %s", step,
                         frame.flat_corrected)
        with fits.open(frame.flat_corrected, mode='update') as hdulist:
            data = hdulist['primary'].data
            datar = data[frame.valid_region]
            data[frame.valid_region] = narray.correct_flatfield(datar, fitted)

            frame.lastname = frame.flat_corrected

    def initial_classification(self, obresult, target_is_sky=False):
        """Classify input frames, """
        # lists of targets and sky frames

        with obresult.frames[0].open() as baseimg:
            # Initial checks
            has_bpm_ext = 'BPM' in baseimg
            self.logger.info('images have BPM extension: %s', has_bpm_ext)

        images_info = []
        for f in obresult.frames:
            with f.open() as img:
                # Getting some metadata from FITS header
                hdr = img[0].header

                iinfo = ImageInfo(f)

                finfo = {}
                iinfo.metadata = finfo

                finfo['uuid'] = hdr['UUID']
                finfo['exposure'] = hdr['EXPTIME']
                # frame.baseshape = get_image_shape(hdr)
                finfo['airmass'] = hdr['airmass']
                finfo['mjd'] = hdr['tstamp']

                iinfo.label = 'result_image_{}'.format(finfo['uuid'])
                iinfo.mask = nfcom.Extension("BPM")
                # Insert pixel offsets between frames
                iinfo.objmask_data = None
                iinfo.valid_target = False
                iinfo.valid_sky = False

                # FIXME: hardcode itype for the moment
                iinfo.itype = 'TARGET'
                if iinfo.itype == 'TARGET':
                    iinfo.valid_target = True
                    # targetframes.append(iinfo)
                    if target_is_sky:
                        iinfo.valid_sky = True
                        # skyframes.append(iinfo)
                if iinfo.itype == 'SKY':
                    iinfo.valid_sky = True
                    # skyframes.append(iinfo)
                images_info.append(iinfo)

        return images_info

    def compute_superflat(self,
                          images_info,
                          segmask=None,
                          step=0,
                          method=None,
                          method_kwargs=None):

        self.logger.info("Step %d, SF: combining the frames without offsets",
                         step)

        base_imgs = [img.resized_base for img in images_info]
        with nfcom.manage_fits(base_imgs) as imgs:

            data = []
            masks = []

            for img, img_info in zip(imgs, images_info):
                self.logger.debug('Step %d, opening resized frame %s', step,
                                  img_info.resized_base)
                data.append(img['primary'].data[img_info.valid_region])

            scales = [numpy.median(d) for d in data]

            if segmask is not None:
                masks = [segmask[frame.valid_region] for frame in images_info]
            else:
                for frame in images_info:
                    self.logger.debug('Step %d, opening resized mask  %s',
                                      step, frame.resized_mask)
                    hdulist = fits.open(frame.resized_mask,
                                        memmap=True,
                                        mode='readonly')
                    # filelist.append(hdulist)
                    masks.append(hdulist['primary'].data[frame.valid_region])
                masks = None

            self.logger.debug("Step %d, combining %d frames using '%s'", step,
                              len(data), method.__name__)
            sf_data, _sf_var, sf_num = method(data,
                                              masks,
                                              scales=scales,
                                              dtype='float32',
                                              **method_kwargs)

        # Normalize, flat has mean = 1
        sf_data[sf_data == 0] = 1e-5
        sf_data /= sf_data.mean()
        # sf_data[sf_data <= 0] = 1.0

        # Auxiliary data
        sfhdu = fits.PrimaryHDU(sf_data)
        self.save_intermediate_img(sfhdu, name_skyflat('comb', step))
        return sf_data

    '''
    def compute_sky_advanced(self, data_hdul, omasks, base_header, use_errors):
        method = narray.combine.mean

        self.logger.info("recombine images with segmentation mask using '%s'", method.__name__)
        sky_data = method([m[0].data for m in data_hdul], masks=omasks, dtype='float32')

        hdu = fits.PrimaryHDU(sky_data[0], header=base_header)
        points_no_data = (sky_data[2] == 0).sum()

        self.logger.debug('update created sky image result header')
        skyid = str(uuid.uuid1())
        hdu.header['UUID'] = skyid
        hdu.header['history'] = "Combined {} images using '{}'".format(
            len(data_hdul),
            method.__name__
        )
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat()
        )
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(self.datamodel.get_imgid(img))

        msg = "missing pixels, total: {}, fraction: {:3.1f}".format(
            points_no_data,
            points_no_data / sky_data[2].size
        )
        hdu.header['history'] = msg
        self.logger.debug(msg)

        if use_errors:
            varhdu = fits.ImageHDU(sky_data[1], name='VARIANCE')
            num = fits.ImageHDU(sky_data[2], name='MAP')
            sky_result = fits.HDUList([hdu, varhdu, num])
        else:
            sky_result = fits.HDUList([hdu])

        return sky_result
    '''

    def combine_frames(self,
                       frames,
                       extinction,
                       out=None,
                       step=0,
                       method=None,
                       method_kwargs=None):
        self.logger.debug('Step %d, opening sky-subtracted frames', step)

        def fits_open(name):
            """Open FITS with memmap in readonly mode"""
            return fits.open(name, mode='readonly', memmap=True)

        frameslll = [
            fits_open(frame.lastname) for frame in frames if frame.valid_target
        ]
        self.logger.debug('Step %d, opening mask frames', step)
        mskslll = [
            fits_open(frame.resized_mask) for frame in frames
            if frame.valid_target
        ]

        self.logger.debug("Step %d, combining %d frames using '%s'", step,
                          len(frameslll), method.__name__)
        try:
            extinc = [
                pow(10, -0.4 * frame.metadata['airmass'] * extinction)
                for frame in frames if frame.valid_target
            ]
            data = [i['primary'].data for i in frameslll]
            masks = [i['primary'].data for i in mskslll]
            headers = [i['primary'].header for i in frameslll]

            out = method(data,
                         masks,
                         scales=extinc,
                         dtype='float32',
                         out=out,
                         **method_kwargs)

            base_header = headers[0]
            hdu = fits.PrimaryHDU(out[0], header=base_header)
            hdu.header['history'] = "Combined %d images using '%s'" % (
                len(frameslll), method.__name__)
            hdu.header['history'] = 'Combination time {}'.format(
                datetime.datetime.utcnow().isoformat())
            for img in frameslll:
                hdu.header['history'] = "Image {}".format(
                    img[0].header['uuid'])
            prevnum = base_header.get('NUM-NCOM', 1)
            hdu.header['NUM-NCOM'] = prevnum * len(frameslll)
            hdu.header['NUMRNAM'] = 'FullDitheredImagesRecipe'
            hdu.header['UUID'] = str(uuid.uuid1())
            hdu.header['OBSMODE'] = 'FULL_DITHERED_IMAGE'
            # Headers of last image
            hdu.header['TSUTC2'] = headers[-1]['TSUTC2']

            varhdu = fits.ImageHDU(out[1], name='VARIANCE')
            num = fits.ImageHDU(out[2].astype('uint8'), name='MAP')

            result = fits.HDUList([hdu, varhdu, num])
            # saving the three extensions
            fits.writeto('result_i%0d.fits' % step, out[0], overwrite=True)
            fits.writeto('result_i%0d_var.fits' % step, out[1], overwrite=True)
            fits.writeto('result_i%0d_npix.fits' % step,
                         out[2],
                         overwrite=True)

            result.writeto('result_i%0d_full.fits' % step, overwrite=True)
            return result

        finally:
            self.logger.debug('Step %d, closing sky-subtracted frames', step)
            for f in frameslll:
                f.close()
            self.logger.debug('Step %d, closing mask frames', step)
            for f in mskslll:
                f.close()

    def resize(self,
               frames,
               shape,
               offsetsp,
               finalshape,
               window=None,
               scale=1,
               step=0):
        self.logger.info('Resizing frames and masks')
        self.logger.debug('shape, finalshape (NAXIS2, NAXIS1) = %s --> %s',
                          shape, finalshape)
        for frame, rel_offset in zip(frames, offsetsp):
            if frame.valid_target:
                region, _ = narray.subarray_match(finalshape, rel_offset,
                                                  shape)
                # Valid region
                frame.valid_region = region
                # Relative offset
                frame.rel_offset = rel_offset
                # names of frame and mask
                framen, maskn = name_redimensioned_frames(frame.label, step)
                frame.resized_base = framen
                frame.resized_mask = maskn
                self.logger.debug('%s', frame.label)
                self.logger.debug('valid region is %s, relative offset is %s',
                                  custom_region_to_str(region), rel_offset)
                self.resize_frame_and_mask(frame, finalshape, framen, maskn,
                                           window, scale)
                self.logger.debug('---')

    def resize_frame_and_mask(self, frame, finalshape, framen, maskn, window,
                              scale):
        self.logger.info('Resizing frame %s', frame.label)
        with frame.origin.open() as hdul:
            baseshape = hdul[0].data.shape

            # FIXME: Resize_fits saves the resized image in framen
            resize_fits(hdul,
                        framen,
                        finalshape,
                        frame.valid_region,
                        window=window,
                        scale=scale,
                        dtype='float32')

        self.logger.info('Resizing mask  %s', frame.label)
        # We don't conserve the sum of the values of the frame here, just
        # expand the mask

        if frame.mask is None:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            hdum = fits.HDUList(fits.PrimaryHDU(false_mask))
            frame.mask = hdum  # DataFrame(frame=hdum)
        elif isinstance(frame.mask, nfcom.Extension):
            ename = frame.mask.name
            with frame.origin.open() as hdul:
                frame.mask = fits.HDUList(hdul[ename].copy())

        resize_fits(frame.mask,
                    maskn,
                    finalshape,
                    frame.valid_region,
                    fill=1,
                    window=window,
                    scale=scale,
                    conserve=False)

    def create_mask(self, img, seeing_fwhm, step=0):

        #
        remove_border = True

        # sextractor takes care of bad pixels

        # if seeing_fwhm is not None and seeing_fwhm > 0:
        #    sex.config['SEEING_FWHM'] = seeing_fwhm * sex.config['PIXEL_SCALE']

        if remove_border:
            weigthmap = 'weights4rms.fits'

            # Create weight map, remove n pixs from either side
            # using a Hannig filter
            # npix = 90
            # w1 = npix
            # w2 = npix
            # wmap = numpy.ones_like(sf_data[0])

            # cos_win1 = numpy.hanning(2 * w1)
            # cos_win2 = numpy.hanning(2 * w2)

            # wmap[:,:w1] *= cos_win1[:w1]
            # wmap[:,-w1:] *= cos_win1[-w1:]
            # wmap[:w2,:] *= cos_win2[:w2, numpy.newaxis]
            # wmap[-w2:,:] *= cos_win2[-w2:, numpy.newaxis]

            # Take the number of combined images from the combined image
            wm = img[2].data.copy()
            # Dont search objects where nimages < lower
            # FIXME: this is a magic number
            # We ignore objects in regions where we have less
            # than 10% of the images
            lower = wm.max() // 10
            border = (wm < lower)
            fits.writeto(weigthmap, border.astype('uint8'), overwrite=True)

            # sex.config['WEIGHT_TYPE'] = 'MAP_WEIGHT'
            # FIXME: this is a magic number
            # sex.config['WEIGHT_THRESH'] = 50
            # sex.config['WEIGHT_IMAGE'] = weigthmap
        else:
            border = None

        data_res = img[0].data
        bkg = sep.Background(data_res)
        data_sub = data_res - bkg

        self.logger.info('Runing source extraction in previous result')
        objects, objmask = sep.extract(data_sub,
                                       1.5,
                                       err=bkg.globalrms,
                                       mask=border,
                                       segmentation_map=True)
        fits.writeto(name_segmask(step), objmask, overwrite=True)

        # # Plot objects
        # # FIXME, plot sextractor objects on top of image
        # patches = []
        # fwhms = []
        # nfirst = 0
        # catalog_f = sopen(sex.config['CATALOG_NAME'])
        # try:
        #     star = catalog_f.readline()
        #     while star:
        #         flags = star['FLAGS']
        #         # ignoring those objects with corrupted apertures
        #         if flags & sexcatalog.CORRUPTED_APER:
        #             star = catalog_f.readline()
        #             continue
        #         center = (star['X_IMAGE'], star['Y_IMAGE'])
        #         wd = 10 * star['A_IMAGE']
        #         hd = 10 * star['B_IMAGE']
        #         color = 'red'
        #         e = Ellipse(center, wd, hd, star['THETA_IMAGE'], color=color)
        #         patches.append(e)
        #         fwhms.append(star['FWHM_IMAGE'])
        #         nfirst += 1
        #         # FIXME Plot a ellipse
        #         star = catalog_f.readline()
        # finally:
        #     catalog_f.close()
        #
        # p = PatchCollection(patches, alpha=0.4)
        # ax = self._figure.gca()
        # ax.add_collection(p)
        # self._figure.canvas.draw()
        # self._figure.savefig('figure-segmentation-overlay_%01d.png' % step)
        #
        # self.figure_fwhm_histogram(fwhms, step=step)
        #
        # # mode with an histogram
        # hist, edges = numpy.histogram(fwhms, 50)
        # idx = hist.argmax()
        #
        # seeing_fwhm = 0.5 * (edges[idx] + edges[idx + 1])
        # if seeing_fwhm <= 0:
        #     _logger.warning(
        #         'Seeing FHWM %f pixels is negative, reseting', seeing_fwhm)
        #     seeing_fwhm = None
        # else:
        #     _logger.info('Seeing FHWM %f pixels (%f arcseconds)',
        #                  seeing_fwhm, seeing_fwhm * sex.config['PIXEL_SCALE'])
        # objmask = fits.getdata(name_segmask(step))

        return objmask, seeing_fwhm

    def compute_advanced_sky(self,
                             targetframes,
                             objmask,
                             skyframes=None,
                             target_is_sky=False,
                             maxsep=5.0,
                             nframes=10,
                             step=0,
                             save=True,
                             method=None,
                             method_kwargs=None):

        if target_is_sky:
            skyframes = targetframes
            # Each frame is its closest sky frame
            nframes += 1
        elif skyframes is None:
            raise ValueError('skyframes not defined')

        # build kdtree
        sarray = numpy.array([frame.metadata['mjd'] for frame in skyframes])
        # shape must be (n, 1)
        sarray = numpy.expand_dims(sarray, axis=1)

        # query
        tarray = numpy.array([frame.metadata['mjd'] for frame in targetframes])
        # shape must be (n, 1)
        tarray = numpy.expand_dims(tarray, axis=1)

        kdtree = KDTree(sarray)

        # 1 / minutes in a Julian day
        SCALE = 60.0
        # max_time_sep = ri.sky_images_sep_time / 1440.0
        _dis, idxs = kdtree.query(tarray,
                                  k=nframes,
                                  distance_upper_bound=maxsep * SCALE)

        nsky = len(sarray)

        for tid, idss in enumerate(idxs):
            try:
                tf = targetframes[tid]
                self.logger.info(
                    "Step %d, SC: computing advanced sky for %s using '%s'",
                    step, tf.label, method.__name__)
                # filter(lambda x: x < nsky, idss)
                locskyframes = []
                for si in idss:
                    if tid == si:
                        # this sky frame it is the current frame, reject
                        continue
                    if si < nsky:
                        self.logger.debug('Step %d, SC: %s is a sky frame',
                                          step, skyframes[si].label)
                        locskyframes.append(skyframes[si])
                self.compute_advanced_sky_for_frame(
                    tf,
                    locskyframes,
                    step=step,
                    save=save,
                    method=method,
                    method_kwargs=method_kwargs)
            except IndexError:
                self.logger.error('No sky image available for frame %s',
                                  tf.lastname)
                raise

    def compute_advanced_sky_for_frame(self,
                                       frame,
                                       skyframes,
                                       step=0,
                                       save=True,
                                       method=None,
                                       method_kwargs=None):
        self.logger.info('Correcting sky in frame %s', frame.lastname)
        self.logger.info('with sky computed from frames')
        for i in skyframes:
            self.logger.info('%s', i.flat_corrected)

        data = []
        scales = []
        masks = []
        # handle the FITS file to close it finally
        desc = []
        try:
            for i in skyframes:
                filename = i.flat_corrected
                hdulist = fits.open(filename, mode='readonly', memmap=True)

                data.append(hdulist['primary'].data[i.valid_region])
                desc.append(hdulist)
                scales.append(numpy.median(data[-1]))
                if i.objmask_data is not None:
                    masks.append(i.objmask_data)
                    self.logger.debug('object mask is shared')
                elif i.objmask is not None:
                    hdulistmask = fits.open(i.objmask,
                                            mode='readonly',
                                            memmap=True)
                    masks.append(hdulistmask['primary'].data)
                    desc.append(hdulistmask)
                    self.logger.debug('object mask is particular')
                else:
                    self.logger.warn('no object mask for %s', filename)

            self.logger.debug("computing background with %d frames using '%s'",
                              len(data), method.__name__)
            sky, _, num = method(data, masks, scales=scales, **method_kwargs)

            with fits.open(frame.lastname) as hdulist:
                data = hdulist['primary'].data
                valid = data[frame.valid_region]

                if frame.objmask_data is not None:
                    self.logger.debug('object mask defined')
                    msk = frame.objmask_data
                    skymedian = numpy.median(valid[msk == 0])
                else:
                    self.logger.debug('object mask empty')
                    skymedian = numpy.median(valid)
                self.logger.debug('scaling with skymedian %s', skymedian)
            sky *= skymedian

        finally:
            # Closing all FITS files
            for hdl in desc:
                hdl.close()

        if numpy.any(num == 0):
            # We have pixels without
            # sky background information
            self.logger.warning(
                'pixels without sky information when correcting %s',
                frame.flat_corrected)
            binmask = num == 0
            # FIXME: during development, this is faster
            # sky[binmask] = sky[num != 0].mean()

            # To continue we interpolate over the patches
            narray.fixpix2(sky, binmask, out=sky, iterations=1)

            name = name_skybackgroundmask(frame.label, step)
            fits.writeto(name, binmask.astype('int16'), overwrite=True)

        name_sky = name_skybackground(frame.label, step)
        fits.writeto(name_sky, sky, overwrite=True)

        dst = name_skysub_proc(frame.label, step)
        prev = frame.lastname
        shutil.copyfile(prev, dst)
        frame.lastname = dst

        with fits.open(frame.lastname, mode='update') as hdulist:
            data = hdulist['primary'].data
            valid = data[frame.valid_region]
            valid -= sky
            # ToDo: ad hoc sky correction
            adhoc_correction = False
            if adhoc_correction:
                print('---')
                print(frame)
                print('*** adhoc_correction ***')
                skycorr = self.sky_adhoc_correction(valid, frame.objmask_data)
                valid -= skycorr

    def compute_regions_from_objs(self,
                                  step,
                                  arr,
                                  finalshape,
                                  box=50,
                                  corners=True):
        regions = []
        # create catalog of objects skipping a border around the image
        catalog, mask = self.create_object_catalog(arr, border=300)

        self.save_intermediate_array(mask, 'objmask_i{}.fits'.format(step))
        # with the catalog, compute the brightest NKEEP objects

        LIMIT_AREA = 5000
        NKEEP = 3
        idx_small = catalog['npix'] < LIMIT_AREA
        objects_small = catalog[idx_small]
        idx_flux = objects_small['flux'].argsort()
        objects_nth = objects_small[idx_flux][-NKEEP:]
        for obj in objects_nth:
            self.logger.debug('ref is (x,y) = (%s, %s)', obj['x'], obj['y'])
            region = nautils.image_box2d(obj['x'], obj['y'], finalshape,
                                         (box, box))
            regions.append(region)
        return regions

    def create_object_catalog(self, arr, threshold=3.0, border=0):

        if border > 0:
            wmap = numpy.ones_like(arr)
            wmap[border:-border, border:-border] = 0
        else:
            wmap = None

        bkg = sep.Background(arr)
        data_sub = arr - bkg
        objects, objmask = sep.extract(data_sub,
                                       threshold,
                                       err=bkg.globalrms *
                                       numpy.ones_like(data_sub),
                                       mask=wmap,
                                       segmentation_map=True)
        return objects, objmask

    def sky_adhoc_correction(self, arr, objmask, nsample=10000):
        self.logger.info('computing ad hoc sky correction')
        skyfit = arr.copy()
        # ToDo: remove next line
        numpy.random.seed(2019)
        # fit each quadrant
        lim = [0, 1024, 2048]
        for i in range(2):
            i1 = lim[i]
            i2 = lim[i + 1]
            for j in range(2):
                j1 = lim[j]
                j2 = lim[j + 1]
                print(i1, i2, j1, j2)
                data_masked = numpy.where(objmask[i1:i2, j1:j2], numpy.nan,
                                          skyfit[i1:i2, j1:j2])
                print('filter1')
                skyfit[i1:i2, j1:j2] = ndimage.generic_filter(data_masked,
                                                              numpy.nanmedian,
                                                              size=(1, 51),
                                                              mode='nearest')
                data_masked = numpy.where(objmask[i1:i2, j1:j2], numpy.nan,
                                          skyfit[i1:i2, j1:j2])
                print('filter2')
                skyfit[i1:i2, j1:j2] = ndimage.generic_filter(data_masked,
                                                              numpy.nanmedian,
                                                              size=(51, 1),
                                                              mode='nearest')
                if False:
                    # define arrays for fitting
                    xarray = numpy.tile(numpy.arange(j1, j2), i2 - i1)
                    yarray = numpy.repeat(numpy.arange(i1, i2), j2 - j1)
                    farray = arr[i1:i2, j1:j2].flatten()
                    print('initial number of pixels:', xarray.size)
                    # remove pixels affected by objects
                    usefulpix = (objmask[i1:i2, j1:j2].flatten() == 0)
                    xarray = xarray[usefulpix]
                    yarray = yarray[usefulpix]
                    farray = farray[usefulpix]
                    print('number of useful pixels.:', xarray.size)
                    # random choice of nsample pixels
                    ntotpix = xarray.size
                    if (ntotpix != yarray.size) or (ntotpix != farray.size):
                        raise ValueError('Unexpected sizes: {} {} {}'.format(
                            ntotpix, yarray.size, farray.size))
                    if ntotpix > nsample:
                        pixsample = numpy.random.choice(
                            numpy.array(ntotpix), nsample)
                    else:
                        pixsample = numpy.array(ntotpix)
                    xarray = xarray[pixsample]
                    yarray = yarray[pixsample]
                    farray = farray[pixsample]
                    print('number of sampled pixels:', xarray.size)

                    import itertools

                    def polyfit2d(x, y, z, order=2):
                        ncols = (order + 1)**2
                        G = numpy.zeros((x.size, ncols))
                        ij = itertools.product(range(order + 1),
                                               range(order + 1))
                        for k, (i, j) in enumerate(ij):
                            G[:, k] = x**i * y**j
                        m, _, _, _ = numpy.linalg.lstsq(G, z)
                        return m

                    def polyval2d(x, y, m):
                        order = int(numpy.sqrt(len(m))) - 1
                        ij = itertools.product(range(order + 1),
                                               range(order + 1))
                        z = numpy.zeros_like(x)
                        for a, (i, j) in zip(m, ij):
                            z += a * x**i * y**j
                        return z

                    m = polyfit2d(xarray, yarray, farray)
                    print('m:', m)
                    xx, yy = numpy.meshgrid(numpy.arange(j1, j2, dtype=float),
                                            numpy.arange(i1, i2, dtype=float))
                    zz = polyval2d(xx, yy, m)
                    print(skyfit[i1:i2, j1:j2].shape, zz.shape)
                    skyfit[i1:i2, j1:j2] = zz

        hdu = fits.PrimaryHDU(arr.astype('float32'))
        hdul = fits.HDUList([hdu])
        hdul.writeto('xxx1.fits', overwrite=True)
        hdu = fits.PrimaryHDU(skyfit.astype('float32'))
        hdul = fits.HDUList([hdu])
        hdul.writeto('xxx2.fits', overwrite=True)
        return skyfit
Esempio n. 21
0
class BarDetectionRecipe(EmirRecipe):

    # Recipe Requirements
    #
    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    master_sky = reqs.MasterSkyRequirement()

    bars_nominal_positions = Requirement(prods.CoordinateList2DType,
                                         'Nominal positions of the bars')
    median_filter_size = Parameter(5, 'Size of the median box')
    canny_sigma = Parameter(3.0, 'Sigma for the canny algorithm')
    canny_high_threshold = Parameter(0.04,
                                     'High threshold for the canny algorithm')
    canny_low_threshold = Parameter(0.01,
                                    'High threshold for the canny algorithm')

    # Recipe Results
    frame = Result(prods.ProcessedImage)
    positions = Result(tarray.ArrayType)
    DTU = Result(tarray.ArrayType)
    ROTANG = Result(float)
    csupos = Result(tarray.ArrayType)
    csusens = Result(tarray.ArrayType)
    param_median_filter_size = Result(float)
    param_canny_high_threshold = Result(float)
    param_canny_low_threshold = Result(float)

    def run(self, rinput):

        logger = logging.getLogger('numina.recipes.emir')

        logger.info('starting processing for bars detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        try:
            rotang = hdr['ROTANG']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
            csupos = datamodel.get_csup_from_header(hdr)
            csusens = datamodel.get_cs_from_header(hdr)

        except KeyError as error:
            logger.error(error)
            raise numina.exceptions.RecipeError(error)

        logger.debug('finding bars')

        arr = hdulist[0].data

        # Median filter
        logger.debug('median filtering')
        mfilter_size = rinput.median_filter_size

        arr_median = median_filter(arr, size=mfilter_size)

        # Image is mapped between 0 and 1
        # for the full range [0: 2**16]
        logger.debug('image scaling to 0-1')
        arr_grey = normalize_raw(arr_median)

        # Find borders
        logger.debug('find borders')
        canny_sigma = rinput.canny_sigma
        # These threshols corespond roughly with
        # value x (2**16 - 1)
        high_threshold = rinput.canny_high_threshold
        low_threshold = rinput.canny_low_threshold

        edges = canny(arr_grey,
                      sigma=canny_sigma,
                      high_threshold=high_threshold,
                      low_threshold=low_threshold)

        # Number or rows used
        # These other parameters cab be tuned also
        total = 5
        maxdist = 1.0
        bstart = 100
        bend = 1900

        positions = []
        nt = total // 2

        xfac = dtur[0] / EMIR_PIXSCALE
        yfac = -dtur[1] / EMIR_PIXSCALE

        vec = [yfac, xfac]
        logger.debug('DTU shift is %s', vec)

        # Based on the 'edges image'
        # and the table of approx positions of the slits
        barstab = rinput.bars_nominal_positions

        # Currently, we only use fields 0 and 2
        # of the nominal positions file

        for coords in barstab:
            lbarid = int(coords[0])
            rbarid = lbarid + 55
            ref_y_coor = coords[2] + vec[1]
            prow = coor_to_pix_1d(ref_y_coor) - 1
            fits_row = prow + 1  # FITS pixel index

            logger.debug('looking for bars with ids %d - %d', lbarid, rbarid)
            logger.debug('reference y position is Y %7.2f', ref_y_coor)
            # Find the position of each bar

            bpos = find_position(edges, prow, bstart, bend, total)

            nbars_found = len(bpos)

            # If no bar is found, append and empty token
            if nbars_found == 0:
                logger.debug('bars %d, %d not found at row %d', lbarid, rbarid,
                             fits_row)
                thisres1 = (lbarid, fits_row, 0, 0, 1)
                thisres2 = (rbarid, fits_row, 0, 0, 1)

            elif nbars_found == 2:

                # Order values by increasing X
                centl, centr = sorted(bpos, key=lambda cen: cen[0])
                c1 = centl[0]
                c2 = centr[0]

                logger.debug('bars found  at row %d between %7.2f - %7.2f',
                             fits_row, c1, c2)
                # Compute FWHM of the collapsed profile

                cslit = arr_grey[prow - nt:prow + nt + 1, :]
                pslit = cslit.mean(axis=0)

                # Add 1 to return FITS coordinates
                epos, epos_f, error = locate_bar_l(pslit, c1)
                thisres1 = lbarid, fits_row, epos + 1, epos_f + 1, error

                epos, epos_f, error = locate_bar_r(pslit, c2)
                thisres2 = rbarid, fits_row, epos + 1, epos_f + 1, error

            elif nbars_found == 1:
                logger.warning(
                    'only 1 edge found  at row %d, not yet implemented',
                    fits_row)
                thisres1 = (lbarid, fits_row, 0, 0, 1)
                thisres2 = (rbarid, fits_row, 0, 0, 1)

            else:
                logger.warning(
                    '3 or more edges found  at row %d, not yet implemented',
                    fits_row)
                thisres1 = (lbarid, fits_row, 0, 0, 1)
                thisres2 = (rbarid, fits_row, 0, 0, 1)

            positions.append(thisres1)
            positions.append(thisres2)

        logger.debug('end finding bars')
        result = self.create_result(
            frame=hdulist,
            positions=positions,
            DTU=dtub,
            ROTANG=rotang,
            csupos=csupos,
            csusens=csusens,
            param_median_filter_size=rinput.median_filter_size,
            param_canny_high_threshold=rinput.canny_high_threshold,
            param_canny_low_threshold=rinput.canny_low_threshold)
        return result
Esempio n. 22
0
class MaskSpectraExtractionRecipe(EmirRecipe):
    '''
    '''

    # Recipe Requirements
    obresult = ObservationResultRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    master_flat = MasterIntensityFlatFieldRequirement()
    master_sky = MasterSkyRequirement()

    median_filter_size = Parameter(5, 'Size of the median box')
    slits_positions = Requirement(ArrayType,
                                  'Positions and widths of the slits')

    frame = Product(DataFrameType)
    rss = Product(DataFrameType)
    regions = Product(ArrayType)

    #slitstable = Product(ArrayType)
    #DTU = Product(ArrayType)
    #ROTANG = Product(float)
    #DETPA = Product(float)
    #DTUPA = Product(float)

    def run(self, rinput):
        _logger.info('starting extraction')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        # First, prefilter with median
        median_filter_size = rinput.median_filter_size

        data1 = hdulist[0].data
        _logger.debug('Median filter with box %d', median_filter_size)
        data2 = median_filter(data1, size=median_filter_size)

        # Normalize input between -1 and +1
        data3 = img_norm(data2)

        # Tracing parameters
        ws = 10
        step = 15
        hs = 15
        tol = 2
        doplot = False
        npol = 5

        _logger.info('Create output images')
        rssdata = numpy.zeros(
            (rinput.slits_positions.shape[0], data3.shape[1]), dtype='float32')

        # FIXME, number of columns depends on polynomial degree
        regiontable = numpy.zeros(
            (rinput.slits_positions.shape[0], 4 + 2 * (npol + 1)),
            dtype='float32')

        count = 0
        # Loop over slits
        for slit_coords in rinput.slits_positions:
            col, y1, y2 = convert_to_(*slit_coords)
            _logger.info('Processing slit in column %i, row1=%i, row2=%i', col,
                         y1, y2)
            xmin, xmax, ymin, ymax, pfit1, pfit2 = ex_region(data3,
                                                             col,
                                                             y1,
                                                             y2,
                                                             step,
                                                             hs,
                                                             ws,
                                                             tol=tol,
                                                             doplot=doplot)

            _logger.info('Spectrum region is %i, %i, %i, %i', xmin, xmax, ymin,
                         ymax)
            try:
                region = data1[ymin:ymax + 1, xmin:xmax + 1]
                rssdata[count, xmin:xmax + 1] = region.mean(axis=0)
            except ValueError as err:
                _logger.error("Error collapsing spectrum: %s", err)
            # IN FITS convention
            _logger.info('Create regions table')
            regiontable[count, :4] = xmin + 1, xmax + 1, ymin + 1, ymax + 1
            #regiontable[count, 4:4 + npol + 1] = pfit1
            #regiontable[count, 4 + npol + 1:] = pfit2
            count += 1

        hdurss = fits.PrimaryHDU(rssdata)

        result = self.create_result(frame=hdulist,
                                    rss=hdurss,
                                    regions=regiontable)

        return result
Esempio n. 23
0
class MaskCheckRecipe(EmirRecipe):

    """
    Acquire a target.

    Recipe for the processing of multi-slit/long-slit check images.

    **Observing modes:**

        * MSM and LSM check

    """

    # Recipe Requirements
    #
    obresult = ObservationResultRequirement(
        query_opts=qmod.ResultOf(
            'STARE_IMAGE.frame',
            node='children',
            id_field="resultsIds"
        )
    )
    master_bpm = reqs.MasterBadPixelMaskRequirement()

    bars_nominal_positions = Requirement(
        prods.NominalPositions,
        'Nominal positions of the bars'
    )

    # Recipe Products
    slit_image = Result(prods.ProcessedImage)
    object_image = Result(prods.ProcessedImage)
    offset = Result(tarray.ArrayType)
    angle = Result(float)

    def run(self, rinput):
        self.logger.info('starting processing for image acquisition')
        # Combine and masking
        flow = self.init_filters(rinput)

        # count frames
        frames = rinput.obresult.frames

        nframes = len(frames)
        if nframes not in [1, 2, 4]:
            raise ValueError("expected 1, 2 or 4 frames, got {}".format(nframes))

        interm = basic_processing_(frames, flow, self.datamodel)

        if nframes == 1:
            hdulist_slit = combine_images(interm[:], self.datamodel)
            hdulist_object = hdulist_slit
            # background_subs = False
        elif nframes == 2:
            hdulist_slit = combine_images(interm[0:], self.datamodel)
            hdulist_object = process_ab(interm, self.datamodel)
            # background_subs = True
        elif nframes == 4:
            hdulist_slit = combine_images(interm[0::3], self.datamodel)
            hdulist_object = process_abba(interm, self.datamodel)
            # background_subs = True
        else:
            raise ValueError("expected 1, 2 or 4 frames, got {}".format(nframes))

        self.set_base_headers(hdulist_slit[0].header)
        self.set_base_headers(hdulist_object[0].header)

        self.save_intermediate_img(hdulist_slit, 'slit_image.fits')

        self.save_intermediate_img(hdulist_object, 'object_image.fits')

        # Get slits
        # Rotation around (0,0)
        # For other axis, offset is changed
        # (Off - raxis) = Rot * (Offnew - raxis)
        crpix1 = hdulist_slit[0].header['CRPIX1']
        crpix2 = hdulist_slit[0].header['CRPIX2']

        rotaxis = np.array((crpix1 - 1, crpix2 - 1))

        self.logger.debug('center of rotation (from CRPIX) is %s', rotaxis)

        csu_conf = self.load_csu_conf(hdulist_slit, rinput.bars_nominal_positions)

        # IF CSU is completely open OR there are no refereces,
        # this is not needed
        if not csu_conf.is_open():
            self.logger.info('CSU is configured, detecting slits')
            slits_bb = self.compute_slits(hdulist_slit, csu_conf)

            image_sep = hdulist_object[0].data.astype('float32')

            self.logger.debug('center of rotation (from CRPIX) is %s', rotaxis)

            offset, angle, qc = compute_off_rotation(
                image_sep, csu_conf, slits_bb,
                rotaxis=rotaxis, logger=self.logger,
                debug_plot=False, intermediate_results=self.intermediate_results
            )
        else:
            self.logger.info('CSU is open, not detecting slits')
            offset = [0.0, 0.0]
            angle = 0.0
            qc = QC.GOOD

        # Convert mm to m
        offset_out = np.array(offset) / 1000.0
        # Convert DEG to RAD
        angle_out = np.deg2rad(angle)
        result = self.create_result(
            slit_image=hdulist_slit,
            object_image=hdulist_object,
            offset=offset_out,
            angle=angle_out,
            qc=qc
        )
        self.logger.info('end processing for image acquisition')
        return result

    def load_csu_conf(self, hdulist, bars_nominal_positions):
        # Get slits
        hdr = hdulist[0].header
        # Extract DTU and CSU information from headers

        dtuconf = self.datamodel.get_dtur_from_header(hdr)

        # coordinates transformation from DTU coordinates
        # to image coordinates
        # Y inverted
        # XY switched
        # trans1 = [[1, 0, 0], [0,-1, 0], [0,0,1]]
        # trans2 = [[0,1,0], [1,0,0], [0,0,1]]
        trans3 = [[0, -1, 0], [1, 0, 0], [0, 0, 1]]  # T3 = T2 * T1

        vec = np.dot(trans3, dtuconf.coor_r) / EMIR_PIXSCALE
        self.logger.debug('DTU shift is %s', vec)

        self.logger.debug('create bar model')
        barmodel = csuconf.create_bar_models(bars_nominal_positions)
        csu_conf = csuconf.read_csu_2(hdr, barmodel)

        if self.intermediate_results:
            # FIXME: coordinates are in VIRT pixels
            self.logger.debug('create bar mask from predictions')
            mask = np.ones_like(hdulist[0].data)
            for i in itertools.chain(csu_conf.lbars, csu_conf.rbars):
                bar = csu_conf.bars[i]
                mask[bar.bbox().slice] = 0
            self.save_intermediate_array(mask, 'mask_bars.fits')

            self.logger.debug('create slit mask from predictions')
            mask = np.zeros_like(hdulist[0].data)
            for slit in csu_conf.slits.values():
                mask[slit.bbox().slice] = slit.idx
            self.save_intermediate_array(mask, 'mask_slit.fits')

            self.logger.debug('create slit reference mask from predictions')
            mask1 = np.zeros_like(hdulist[0].data)
            for slit in csu_conf.slits.values():
                if slit.target_type == TargetType.REFERENCE:
                    mask1[slit.bbox().slice] = slit.idx
            self.save_intermediate_array(mask1, 'mask_slit_ref.fits')

        return csu_conf

    def compute_slits(self, hdulist, csu_conf):

        self.logger.debug('finding borders of slits')
        self.logger.debug('not strictly necessary...')
        data = hdulist[0].data
        self.logger.debug('dtype of data %s', data.dtype)

        self.logger.debug('median filter (3x3)')
        image_base = ndi.filters.median_filter(data, size=3)

        # Cast as original type for skimage
        self.logger.debug('casting image to unit16 (for skimage)')
        iuint16 = np.iinfo(np.uint16)
        image = np.clip(image_base, iuint16.min, iuint16.max).astype(np.uint16)

        self.logger.debug('compute Sobel filter')
        # FIXME: compute sob and sob_v is redundant
        sob = filt.sobel(image)
        self.save_intermediate_array(sob, 'sobel_image.fits')
        sob_v = filt.sobel_v(image)
        self.save_intermediate_array(sob_v, 'sobel_v_image.fits')

        # Compute detector coordinates of bars
        all_coords_virt = np.empty((110, 2))
        all_coords_real = np.empty((110, 2))

        # Origin of coordinates is 1
        for bar in csu_conf.bars.values():
            all_coords_virt[bar.idx - 1] = bar.xpos, bar.y0

        # Origin of coordinates is 1 for this function
        _x, _y = dist.exvp(all_coords_virt[:, 0], all_coords_virt[:, 1])
        all_coords_real[:, 0] = _x
        all_coords_real[:, 1] = _y

        # FIXME: hardcoded value
        h = 16
        slit_h_virt = 16.242
        slit_h_tol = 3
        slits_bb = {}

        mask1 = np.zeros_like(hdulist[0].data)

        for idx in range(EMIR_NBARS):
            lbarid = idx + 1
            rbarid = lbarid + EMIR_NBARS
            ref_x_l_v, ref_y_l_v = all_coords_virt[lbarid - 1]
            ref_x_r_v, ref_y_r_v = all_coords_virt[rbarid - 1]

            ref_x_l_d, ref_y_l_d = all_coords_real[lbarid - 1]
            ref_x_r_d, ref_y_r_d = all_coords_real[rbarid - 1]

            width_v = ref_x_r_v - ref_x_l_v
            # width_d = ref_x_r_d - ref_x_l_d

            if (ref_y_l_d >= 2047 + h) or (ref_y_l_d <= 1 - h):
                # print('reference y position is outlimits, skipping')
                continue

            if width_v < 5:
                # print('width is less than 5 pixels, skipping')
                continue

            plot = False
            regionw = 12
            px1 = coor_to_pix_1d(ref_x_l_d) - 1
            px2 = coor_to_pix_1d(ref_x_r_d) - 1
            prow = coor_to_pix_1d(ref_y_l_d) - 1

            comp_l, comp_r = calc0(image, sob_v, prow, px1, px2, regionw, h=h,
                                   plot=plot, lbarid=lbarid, rbarid=rbarid,
                                   plot2=False)
            if np.any(np.isnan([comp_l, comp_r])):
                self.logger.warning("converting NaN value, border of=%d", idx + 1)
                self.logger.warning("skipping bar=%d", idx + 1)
                continue
            elif comp_l > comp_r:
                # Not refining
                self.logger.warning("computed left border of=%d greater than right border", idx + 1)
                comp2_l, comp2_r = px1, px2
            else:
                region2 = 5
                px21 = coor_to_pix_1d(comp_l)
                px22 = coor_to_pix_1d(comp_r)

                comp2_l, comp2_r = calc0(image, sob_v, prow, px21, px22, region2,
                                         refine=True,
                                         plot=plot, lbarid=lbarid, rbarid=rbarid,
                                         plot2=False)

                if np.any(np.isnan([comp2_l, comp2_r])):
                    self.logger.warning("converting NaN value, border of=%d", idx + 1)
                    comp2_l, comp2_r = comp_l, comp_r
                elif comp2_l > comp2_r:
                    # Not refining
                    self.logger.warning("computed left border of=%d greater than right border", idx + 1)
                    comp2_l, comp2_r = comp_l, comp_r

            # print('slit', lbarid, '-', rbarid, comp_l, comp_r)
            # print('pos1', comp_l, comp_r)
            # print('pos2', comp2_l, comp2_r)

            xpos1_virt, _ = dist.pvex(comp2_l + 1, ref_y_l_d)
            xpos2_virt, _ = dist.pvex(comp2_r + 1, ref_y_r_d)

            y1_virt = ref_y_l_v - slit_h_virt - slit_h_tol
            y2_virt = ref_y_r_v + slit_h_virt + slit_h_tol
            _, y1 = dist.exvp(xpos1_virt + 1, y1_virt)
            _, y2 = dist.exvp(xpos2_virt + 1, y2_virt)
            # print(comp2_l, comp2_r, y1 - 1, y2 - 1)
            cbb = BoundingBox.from_coordinates(comp2_l, comp2_r, y1 - 1, y2 - 1)
            slits_bb[lbarid] = cbb
            mask1[cbb.slice] = lbarid

        self.save_intermediate_array(mask1, 'mask_slit_computed.fits')
        return slits_bb
Esempio n. 24
0
class BarDetectionRecipe(EmirRecipe):

    # Recipe Requirements
    #
    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    master_sky = reqs.MasterSkyRequirement()

    bars_nominal_positions = Requirement(prods.NominalPositions,
                                         'Nominal positions of the bars')
    median_filter_size = Parameter(5, 'Size of the median box')
    average_box_row_size = Parameter(
        7, 'Number of rows to average for fine centering (odd)')
    average_box_col_size = Parameter(
        21, 'Number of columns to extract for fine centering (odd)')
    fit_peak_npoints = Parameter(
        3, 'Number of points to use for fitting the peak (odd)')

    # Recipe Products
    frame = Result(prods.ProcessedImage)
    # derivative = Result(prods.ProcessedImage)
    slits = Result(tarray.ArrayType)
    positions3 = Result(tarray.ArrayType)
    positions5 = Result(tarray.ArrayType)
    positions7 = Result(tarray.ArrayType)
    positions9 = Result(tarray.ArrayType)
    DTU = Result(tarray.ArrayType)
    ROTANG = Result(float)
    TSUTC1 = Result(float)
    csupos = Result(tarray.ArrayType)
    csusens = Result(tarray.ArrayType)

    def run(self, rinput):
        self.logger.info('starting processing for bars detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        self.save_intermediate_img(hdulist, 'reduced_image.fits')

        try:
            rotang = hdr['ROTANG']
            tsutc1 = hdr['TSUTC1']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
            csupos = datamodel.get_csup_from_header(hdr)
            if len(csupos) != 2 * EMIR_NBARS:
                raise RecipeError('Number of CSUPOS != 2 * NBARS')
            csusens = datamodel.get_cs_from_header(hdr)

        except KeyError as error:
            self.logger.error(error)
            raise RecipeError(error)

        self.logger.debug('start finding bars')
        allpos, slits = find_bars(
            hdulist,
            rinput.bars_nominal_positions,
            csupos,
            dtur,
            average_box_row_size=rinput.average_box_row_size,
            average_box_col_size=rinput.average_box_col_size,
            fit_peak_npoints=rinput.fit_peak_npoints,
            median_filter_size=rinput.median_filter_size,
            logger=self.logger)

        self.logger.debug('end finding bars')

        if self.intermediate_results:
            with open('ds9.reg', 'w') as ds9reg:
                slits_to_ds9_reg(ds9reg, slits)

        result = self.create_result(
            frame=hdulist,
            slits=slits,
            positions9=allpos[9],
            positions7=allpos[7],
            positions5=allpos[5],
            positions3=allpos[3],
            DTU=dtub,
            ROTANG=rotang,
            TSUTC1=tsutc1,
            csupos=csupos,
            csusens=csusens,
        )
        return result
Esempio n. 25
0
class TestMaskRecipe(EmirRecipe):

    # Recipe Requirements
    #
    obresult = ObservationResultRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    master_flat = MasterIntensityFlatFieldRequirement()
    master_sky = MasterSkyRequirement()

    pinhole_nominal_positions = Requirement(CoordinateList2DType,
                                            'Nominal positions of the pinholes'
                                            )
    shift_coordinates = Parameter(True, 'Use header information to'
                                  ' shift the pinhole positions from (0,0) '
                                  'to X_DTU, Y_DTU')
    box_half_size = Parameter(4, 'Half of the computation box size in pixels')
    recenter = Parameter(True, 'Recenter the pinhole coordinates')
    max_recenter_radius = Parameter(2.0, 'Maximum distance for recentering')

    median_filter_size = Parameter(5, 'Size of the median box')
    canny_sigma = Parameter(3.0, 'Sigma for the canny algorithm')
    obj_min_size = Parameter(200, 'Minimum size of the slit')
    obj_max_size = Parameter(3000, 'Maximum size of the slit')
    slit_size_ratio = Parameter(4.0, 'Minimum ratio between height and width for slits')

    # Recipe Products
    frame = Product(DataFrameType)
    positions = Product(ArrayType)
    positions_alt = Product(ArrayType)
    slitstable = Product(ArrayType)
    DTU = Product(ArrayType)
    filter = Product(str)
    readmode = Product(str)
    ROTANG = Product(float)
    DETPA = Product(float)
    DTUPA = Product(float)
    param_recenter = Product(bool)
    param_max_recenter_radius = Product(float)
    param_box_half_size = Product(float)

    def run(self, rinput):
        _logger.info('starting processing for slit detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        _logger.debug('finding pinholes')

        try:
            filtername = hdr['FILTER']
            readmode = hdr['READMODE']
            rotang = hdr['ROTANG']
            detpa = hdr['DETPA']
            dtupa = hdr['DTUPA']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
        except KeyError as error:
            _logger.error(error)
            raise RecipeError(error)

        if rinput.shift_coordinates:
            xdtur, ydtur, zdtur = dtur
            xfac = xdtur / EMIR_PIXSCALE
            yfac = -ydtur / EMIR_PIXSCALE

            vec = numpy.array([yfac, xfac])
            _logger.info('shift is %s', vec)
            ncenters = rinput.pinhole_nominal_positions + vec
        else:
            _logger.info('using pinhole coordinates as they are')
            ncenters = rinput.pinhole_nominal_positions

        _logger.info('pinhole characterization')
        positions = pinhole_char(
            hdulist[0].data,
            ncenters,
            box=rinput.box_half_size,
            recenter_pinhole=rinput.recenter,
            maxdist=rinput.max_recenter_radius
        )

        _logger.info('alternate pinhole characterization')
        positions_alt = pinhole_char2(
            hdulist[0].data, ncenters,
            recenter_pinhole=rinput.recenter,
            recenter_half_box=rinput.box_half_size,
            recenter_maxdist=rinput.max_recenter_radius
        )

        _logger.debug('finding slits')

        # First, prefilter with median
        median_filter_size = rinput.median_filter_size
        canny_sigma = rinput.canny_sigma
        obj_min_size = rinput.obj_min_size
        obj_max_size = rinput.obj_max_size

        data1 = hdulist[0].data
        _logger.debug('Median filter with box %d', median_filter_size)
        data2 = median_filter(data1, size=median_filter_size)

        # Grey level image
        img_grey = normalize(data2)

        # Find edges with canny
        _logger.debug('Find edges with canny, sigma %d', canny_sigma)
        edges = canny(img_grey, sigma=canny_sigma)

        # Fill edges
        _logger.debug('Fill holes')
        fill_slits = ndimage.binary_fill_holes(edges)

        _logger.debug('Label objects')
        label_objects, nb_labels = ndimage.label(fill_slits)
        _logger.debug('%d objects found', nb_labels)
        # Filter on the area of the labeled region
        # Perhaps we could ignore this filtering and
        # do it later?
        _logger.debug('Filter objects by size')
        # Sizes of regions
        sizes = numpy.bincount(label_objects.ravel())

        _logger.debug('Min size is %d', obj_min_size)
        _logger.debug('Max size is %d', obj_max_size)

        mask_sizes = (sizes > obj_min_size) & (sizes < obj_max_size)

        # Filter out regions
        nids, = numpy.where(mask_sizes)

        mm = numpy.in1d(label_objects, nids)
        mm.shape = label_objects.shape

        fill_slits_clean = numpy.where(mm, 1, 0)

        # and relabel
        _logger.debug('Label filtered objects')
        relabel_objects, nb_labels = ndimage.label(fill_slits_clean)
        _logger.debug('%d objects found after filtering', nb_labels)
        ids = list(six.moves.range(1, nb_labels + 1))

        _logger.debug('Find regions and centers')
        regions = ndimage.find_objects(relabel_objects)
        centers = ndimage.center_of_mass(data2, labels=relabel_objects,
                                         index=ids
                                         )

        table = char_slit(data2, regions,
                          slit_size_ratio=rinput.slit_size_ratio
                          )

        result = self.create_result(frame=hdulist,
                                    positions=positions,
                                    positions_alt=positions_alt,
                                    slitstable=table,
                                    filter=filtername,
                                    DTU=dtub,
                                    readmode=readmode,
                                    ROTANG=rotang,
                                    DETPA=detpa,
                                    DTUPA=dtupa,
                                    param_recenter=rinput.recenter,
                                    param_max_recenter_radius=rinput.max_recenter_radius,
                                    param_box_half_size=rinput.box_half_size
                                    )
        return result
Esempio n. 26
0
class BarDetectionRecipe(EmirRecipe):

    # Recipe Requirements
    #
    obresult = ObservationResultRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    master_flat = MasterIntensityFlatFieldRequirement()
    master_sky = MasterSkyRequirement()

    bars_nominal_positions = Requirement(NominalPositions,
                                         'Nominal positions of the bars')
    median_filter_size = Parameter(5, 'Size of the median box')
    average_box_row_size = Parameter(
        7, 'Number of rows to average for fine centering (odd)')
    average_box_col_size = Parameter(
        21, 'Number of columns to extract for fine centering (odd)')
    fit_peak_npoints = Parameter(
        3, 'Number of points to use for fitting the peak (odd)')

    # Recipe Products
    frame = Product(DataFrameType)
    # derivative = Product(DataFrameType)
    slits = Product(ArrayType)
    positions3 = Product(ArrayType)
    positions5 = Product(ArrayType)
    positions7 = Product(ArrayType)
    positions9 = Product(ArrayType)
    DTU = Product(ArrayType)
    ROTANG = Product(float)
    TSUTC1 = Product(float)
    csupos = Product(ArrayType)
    csusens = Product(ArrayType)

    def run(self, rinput):
        self.logger.info('starting processing for bars detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        self.save_intermediate_img(hdulist, 'reduced_image.fits')

        try:
            rotang = hdr['ROTANG']
            tsutc1 = hdr['TSUTC1']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
            csupos = datamodel.get_csup_from_header(hdr)
            if len(csupos) != 2 * EMIR_NBARS:
                raise RecipeError('Number of CSUPOS != 2 * NBARS')
            csusens = datamodel.get_cs_from_header(hdr)

        except KeyError as error:
            self.logger.error(error)
            raise RecipeError(error)

        self.logger.debug('start finding bars')
        allpos, slits = self.find_bars(hdulist, rinput, csupos, dtur)
        self.logger.debug('end finding bars')

        if self.intermediate_results:
            with open('ds9.reg', 'w') as ds9reg:
                self.to_ds9_reg(ds9reg, slits)

        result = self.create_result(
            frame=hdulist,
            slits=slits,
            positions9=allpos[9],
            positions7=allpos[7],
            positions5=allpos[5],
            positions3=allpos[3],
            DTU=dtub,
            ROTANG=rotang,
            TSUTC1=tsutc1,
            csupos=csupos,
            csusens=csusens,
        )
        return result

    def median_filtering(self, hdulist, rinput):

        # Processed array
        arr = hdulist[0].data

        # Median filter of processed array (two times)
        mfilter_size = rinput.median_filter_size

        self.logger.debug('median filtering 1')
        self.logger.debug('median filtering X, %d columns', mfilter_size)
        arr_median = median_filter(arr, size=(1, mfilter_size))
        self.logger.debug('median filtering X, %d rows', mfilter_size)
        arr_median = median_filter(arr_median, size=(mfilter_size, 1))
        self.save_intermediate_array(arr_median, 'median_image.fits')

        # Median filter of processed array (two times) in the other direction
        # for Y coordinates
        self.logger.debug('median filtering 2')
        self.logger.debug('median filtering Y, %d rows', mfilter_size)
        arr_median_alt = median_filter(arr, size=(mfilter_size, 1))
        self.logger.debug('median filtering Y, %d columns', mfilter_size)
        arr_median_alt = median_filter(arr_median_alt, size=(1, mfilter_size))
        self.save_intermediate_array(arr_median_alt, 'median_image_alt.fits')

        return arr_median, arr_median_alt

    def find_bars(self, hdulist, rinput, csupos, dtur):

        self.logger.debug('filtering image')
        # Processed array
        arr_median, arr_median_alt = self.median_filtering(hdulist, rinput)

        xfac = dtur[0] / EMIR_PIXSCALE
        yfac = -dtur[1] / EMIR_PIXSCALE

        vec = [yfac, xfac]
        self.logger.debug('DTU shift is %s', vec)

        # and the table of approx positions of the slits
        barstab = rinput.bars_nominal_positions
        # Currently, we only use fields 0 and 2
        # of the nominal positions file

        # Number or rows used
        # These other parameters cab be tuned also
        bstart = 1
        bend = 2047
        self.logger.debug('ignoring columns outside %d - %d', bstart, bend - 1)

        # extract a region to average
        wy = (rinput.average_box_row_size // 2)
        wx = (rinput.average_box_col_size // 2)
        self.logger.debug('extraction window is %d rows, %d cols', 2 * wy + 1,
                          2 * wx + 1)
        # Fit the peak with these points
        wfit = 2 * (rinput.fit_peak_npoints // 2) + 1
        self.logger.debug('fit with %d points', wfit)

        # Minimum threshold
        threshold = 5 * EMIR_RON
        # Savitsky and Golay (1964) filter to compute the X derivative
        # scipy >= xx has a savgol_filter function
        # for compatibility we do it manually

        allpos = {}
        ypos3_kernel = None
        slits = numpy.zeros((EMIR_NBARS, 8), dtype='float')

        self.logger.info('find peaks in derivative image')
        for ks in [3, 5, 7, 9]:
            self.logger.debug('kernel size is %d', ks)
            # S and G kernel for derivative
            kw = ks * (ks * ks - 1) / 12.0
            coeffs_are = -numpy.arange((1 - ks) // 2, (ks - 1) // 2 + 1) / kw
            if ks == 3:
                ypos3_kernel = coeffs_are
            self.logger.debug('kernel weights are %s', coeffs_are)

            self.logger.debug('derive image in X direction')
            arr_deriv = convolve1d(arr_median, coeffs_are, axis=-1)
            self.save_intermediate_array(arr_deriv,
                                         'deriv_image_k%d.fits' % ks)
            # Axis 0 is
            #
            # self.logger.debug('derive image in Y direction (with kernel=3)')
            # arr_deriv_alt = convolve1d(arr_median_alt, ypos3_kernel, axis=0)

            positions = []
            self.logger.info('using bar parameters')
            for idx in range(EMIR_NBARS):
                params_l = barstab[idx]
                params_r = barstab[idx + EMIR_NBARS]
                lbarid = int(params_l[0])

                # CSUPOS for this bar
                rbarid = lbarid + EMIR_NBARS
                current_csupos_l = csupos[lbarid - 1]
                current_csupos_r = csupos[rbarid - 1]
                self.logger.debug('CSUPOS for bar %d is %f', lbarid,
                                  current_csupos_l)
                self.logger.debug('CSUPOS for bar %d is %f', rbarid,
                                  current_csupos_r)

                ref_y_coor_virt = params_l[1]  # Do I need to add vec[1]?
                ref_x_l_coor_virt = params_l[3] + current_csupos_l * params_l[2]
                ref_x_r_coor_virt = params_r[3] + current_csupos_r * params_r[2]
                # Transform to REAL..
                ref_x_l_coor, ref_y_l_coor = dist.exvp(ref_x_l_coor_virt,
                                                       ref_y_coor_virt)
                ref_x_r_coor, ref_y_r_coor = dist.exvp(ref_x_r_coor_virt,
                                                       ref_y_coor_virt)
                # FIXME: check if DTU has to be applied
                # ref_y_coor = ref_y_coor + vec[1]
                prow = coor_to_pix_1d(ref_y_l_coor) - 1
                fits_row = prow + 1  # FITS pixel index

                # A function that returns the center of the bar
                # given its X position
                def center_of_bar_l(x):
                    # Pixel values are 0-based
                    # return ref_y_coor + vec[1] - 1
                    # FIXME: check if DTU has to be applied
                    return ref_y_l_coor - 1

                def center_of_bar_r(x):
                    # Pixel values are 0-based
                    # return ref_y_coor + vec[1] - 1
                    # FIXME: check if DTU has to be applied
                    return ref_y_r_coor - 1

                self.logger.debug('looking for bars with ids %d - %d', lbarid,
                                  rbarid)
                self.logger.debug('ref Y virtual position is %7.2f',
                                  ref_y_coor_virt)
                self.logger.debug('ref X virtual positions are %7.2f %7.2f',
                                  ref_x_l_coor_virt, ref_x_r_coor_virt)
                self.logger.debug('ref X positions are %7.2f %7.2f',
                                  ref_x_l_coor, ref_x_r_coor)
                self.logger.debug('ref Y positions are %7.2f %7.2f',
                                  ref_y_l_coor, ref_y_r_coor)
                # if ref_y_coor is outlimits, skip this bar
                # ref_y_coor is in FITS format
                if (ref_y_l_coor >= 2047) or (ref_y_l_coor <= 1):
                    self.logger.debug(
                        'reference y position is outlimits, skipping')
                    positions.append(
                        [lbarid, fits_row, fits_row, fits_row, 1, 1, 0, 3])
                    positions.append(
                        [rbarid, fits_row, fits_row, fits_row, 1, 1, 0, 3])
                    continue

                # minimal width of the slit
                minwidth = 0.9
                if abs(ref_x_l_coor_virt - ref_x_r_coor_virt) < minwidth:
                    self.logger.debug(
                        'slit is less than %d virt pixels, skipping', minwidth)
                    positions.append(
                        [lbarid, fits_row, fits_row, fits_row, 1, 1, 0, 3])
                    positions.append(
                        [rbarid, fits_row, fits_row, fits_row, 1, 1, 0, 3])
                    continue

                # Left bar
                # Dont add +1 to virtual pixels
                self.logger.debug('measure left border (%d)', lbarid)
                regionw = 10
                bstart1 = coor_to_pix_1d(ref_x_l_coor - regionw)
                bend1 = coor_to_pix_1d(ref_x_l_coor + regionw) + 1
                centery, centery_virt, xpos1, xpos1_virt, fwhm, st = char_bar_peak_l(
                    arr_deriv,
                    prow,
                    bstart1,
                    bend1,
                    threshold,
                    center_of_bar_l,
                    wx=wx,
                    wy=wy,
                    wfit=wfit)

                insert1 = [
                    lbarid, centery + 1, centery_virt, fits_row, xpos1 + 1,
                    xpos1_virt, fwhm, st
                ]
                positions.append(insert1)

                # Right bar
                # Dont add +1 to virtual pixels
                self.logger.debug('measure rigth border (%d)', rbarid)
                bstart2 = coor_to_pix_1d(ref_x_r_coor - regionw)
                bend2 = coor_to_pix_1d(ref_x_r_coor + regionw) + 1
                centery, centery_virt, xpos2, xpos2_virt, fwhm, st = char_bar_peak_r(
                    arr_deriv,
                    prow,
                    bstart2,
                    bend2,
                    threshold,
                    center_of_bar_l,
                    wx=wx,
                    wy=wy,
                    wfit=wfit)
                # This centery/centery_virt should be equal to ref_y_coor_virt
                insert2 = [
                    rbarid, centery + 1, centery_virt, fits_row, xpos2 + 1,
                    xpos2_virt, fwhm, st
                ]
                positions.append(insert2)

                # FIXME: hardcoded value
                y1_virt = ref_y_coor_virt - 16.242
                y2_virt = ref_y_coor_virt + 16.242
                _, y1 = dist.exvp(xpos1_virt + 1, y1_virt + 1)
                _, y2 = dist.exvp(xpos2_virt + 1, y2_virt + 1)

                # Update positions

                msg = 'bar %d, centroid-y %9.4f centroid-y virt %9.4f, ' \
                      'row %d, x-pos %9.4f x-pos virt %9.4f, FWHM %6.3f, status %d'
                self.logger.debug(msg, *positions[-2])
                self.logger.debug(msg, *positions[-1])

                if ks == 5:
                    slits[lbarid - 1] = numpy.array(
                        [xpos1, y2, xpos2, y2, xpos2, y1, xpos1, y1])
                    # FITS coordinates
                    slits[lbarid - 1] += 1.0
                    self.logger.debug('inserting bars %d-%d into "slits"',
                                      lbarid, rbarid)

            allpos[ks] = numpy.asarray(
                positions, dtype='float')  # GCS doesn't like lists of lists

        return allpos, slits

    def to_ds9_reg(self, ds9reg, slits):
        """Transform fiber traces to ds9-region format.

        Parameters
        ----------
        ds9reg : BinaryIO
            Handle to output file name in ds9-region format.
        """

        # open output file and insert header

        ds9reg.write('# Region file format: DS9 version 4.1\n')
        ds9reg.write(
            'global color=green dashlist=8 3 width=1 font="helvetica 10 '
            'normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 '
            'move=1 delete=1 include=1 source=1\n')
        ds9reg.write('physical\n')

        for idx, slit in enumerate(slits, 1):
            xpos1, y2, xpos2, y2, xpos2, y1, xpos1, y1 = slit
            xc = 0.5 * (xpos1 + xpos2) + 1
            yc = 0.5 * (y1 + y2) + 1
            xd = (xpos2 - xpos1)
            yd = (y2 - y1)
            ds9reg.write('box({0},{1},{2},{3},0)\n'.format(xc, yc, xd, yd))
            ds9reg.write('# text({0},{1}) color=red text={{{2}}}\n'.format(
                xpos1 - 5, yc, idx))
            ds9reg.write('# text({0},{1}) color=red text={{{2}}}\n'.format(
                xpos2 + 5, yc, idx + EMIR_NBARS))