Beispiel #1
0
class DTUFocusRecipe(EmirRecipe):
    """
    Recipe to compute the DTU focus.

    **Observing modes:**

        * EMIR focus control

    **Inputs:**

     * A list of images
     * A list of sky images
     * Bias, dark, flat
     * A model of the detector
     * List of focii

    **Outputs:**
     * Best focus

    """

    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    objects = Parameter([], 'List of x-y pair of object coordinates'),
    msm_pattern = Parameter([], 'List of x-y pair of slit coordinates'),
    dtu_focus_range = Parameter(
        'dtu_focus_range', [], 'Focus range of the DTU: begin, end and step')

    focus = Product(DTUFocus)


    def run(self, rinput):
        return self.create_result(focus=DTUFocus())
Beispiel #2
0
class TelescopeRoughFocusRecipe(EmirRecipe):
    """Recipe to compute the telescope focus.

    **Observing modes:**

     * Telescope rough focus
     * Emir focus control

    **Inputs:**

     * A list of images
     * A list of sky images
     * Bias, dark, flat
     * A model of the detector
     * List of focii

    **Outputs:**
     * Best focus
    """

    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    objects = Parameter([], 'List of x-y pair of object coordinates'),
    focus_range = Parameter([], 'Focus range: begin, end and step')

    focus = Product(TelescopeFocus)

    def run(self, rinput):
        return self.create_result(focus=TelescopeFocus())
Beispiel #3
0
class MosaicRecipe(EmirRecipe):
    """
    The effect of recording a series of stare images, with the same
    acquisition parameters, and taken by pointing to a number of
    sky positions, with separations of the order of the EMIR FOV.
    This command is designed to fully cover a given area on the
    sky, but can also be used to point to a number of sky positions
    on which acquisition is only made at the beginning. Supersky
    frame(s) can be built from the image series.

    **Observing modes:**

        * Mosaic images

    """

    obresult = ObservationResultRequirement()
    # FIXME: this parameter is optional
    sources = Parameter([], 'List of x, y coordinates to measure FWHM')

    frame = Product(DataFrameType)
    catalog = Product(SourcesCatalog)

    def run(self, ri):
        return self.create_result(frame=DataFrame(None),
                                  catalog=SourcesCatalog())
Beispiel #4
0
class TelescopeFineFocusRecipe(EmirRecipe):
    """
    Recipe to compute the telescope focus.

    **Observing modes:**

        * Telescope fine focus

    **Inputs:**

     * A list of images
     * A list of sky images
     * Bias, dark, flat
     * A model of the detector
     * List of focii

    **Outputs:**
     * Best focus

    """

    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    objects = Parameter([], 'List of x-y pair of object coordinates'),

    focus = Result(prods.TelescopeFocus)

    def run(self, rinput):
        return self.create_result(focus=prods.TelescopeFocus())
Beispiel #5
0
class Flat(BaseRecipe):

    obresult = Requirement(ObservationResultType, "Observation Result")
    master_bias = Requirement(MasterBias, "Master Bias")
    polynomial_degree = Parameter(5,
                                  'Polynomial degree of arc calibration',
                                  as_list=True,
                                  nelem='+',
                                  validator=range_validator(minval=1))

    @master_bias.validator
    def custom_validator(self, value):
        print('func', self, value)
        return True

    master_flat = Result(MasterFlat)

    def run(self, rinput):

        # Here the raw images are processed
        # and a final image myframe is created
        obresult = rinput.obresult
        fr0 = obresult.frames[0].open()
        data = numpy.ones_like(fr0[0].data)
        hdu = fits.PrimaryHDU(data, header=fr0[0].header)
        myframe = fits.HDUList([hdu])
        #
        result = self.create_result(master_flat=myframe)
        return result
Beispiel #6
0
class ArcCalibrationRecipe(EmirRecipe):

    obresult = ObservationResultRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    lines_catalog = Requirement(LinesCatalog, "Catalog of lines")
    polynomial_degree = Parameter(2,
                                  'Polynomial degree of the arc calibration')

    polynomial_coeffs = Product(ArrayType)

    def run(self, rinput):
        _logger.info('starting arc calibration')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        nslits = len(rinput.slits_catalog)
        coeff_table = numpy.zeros((nslits, rinput.polynomial_degree + 1))

        result = self.create_result(polynomial_coeffs=coeff_table)

        return result
Beispiel #7
0
class OffsetSpectraRecipe(EmirRecipe):
    """
    Observing mode:
        Offset spectra beyond the slit
    """

    obresult = ObservationResultRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    master_flat = MasterIntensityFlatFieldRequirement()
    master_spectral_ff = Requirement(prods.MasterSpectralFlat,
                                     'Master spectral flatfield')
    st_calibration = Requirement(prods.SlitTransmissionCalibration,
                                 'Slit tranmision calibration')
    w_calibration = Requirement(prods.WavelengthCalibration,
                                'Wavelength calibration')
    lines = Parameter('lines', None,
                      'List of x-lambda pairs of line coordinates')

    spectra = Product(prods.Spectra)
    catalog = Product(prods.LinesCatalog)

    def run(self, rinput):
        return self.create_result(spectra=prods.Spectra(),
                                  catalog=prods.LinesCatalog())
Beispiel #8
0
class NBImageRecipeInput(RecipeInput):
    obresult = ObservationResultRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    master_flat = MasterIntensityFlatFieldRequirement()
    extinction = Parameter(0.0, 'Mean atmospheric extinction')
    sources = Parameter(
        [], 'List of x, y coordinates to measure FWHM', optional=True)
    offsets = Offsets_Requirement()
    sky_images = Parameter(5, 'Images used to estimate the background'
                           ' before and after current image')
    sky_images_sep_time = SkyImageSepTime_Requirement()
    check_photometry_levels = Parameter(
        [0.5, 0.8], 'Levels to check the flux of the objects')
    check_photometry_actions = Parameter(
        ['warn', 'warn', 'default'], 'Actions to take on images')
Beispiel #9
0
class DTU_XY_CalibrationRecipe(EmirRecipe):

    """

    **Observing modes:**

        * DTU X_Y calibration
    """
    
    obresult = ObservationResultRequirement()
    slit_pattern = Parameter([], 'Slit pattern'),
    dtu_range = Parameter([], 'DTU range: begin, end and step')

    calibration = Product(DTU_XY_Calibration)


    def run(self, rinput):
        return self.create_result(calibration=DTU_XY_Calibration())
Beispiel #10
0
class CSUSpectraExtractionRecipe(EmirRecipe):
    """Extract spectra in image taken with the CSU configured"""

    # Recipe Requirements
    obresult = ObservationResultRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    master_flat = MasterIntensityFlatFieldRequirement()
    master_sky = MasterSkyRequirement()
    nrows_side = Parameter(5, 'Number of rows to extract around the center')
    slits_positions = Requirement(ArrayType,
                                  'Positions and widths of the slits')

    # Recipe products
    frame = Product(DataFrameType)
    rss = Product(DataFrameType)

    def run(self, rinput):
        _logger.info('starting extraction')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        data1 = hdulist[0].data

        _logger.info('Create output images')
        rssdata = numpy.zeros(
            (rinput.slits_positions.shape[0], data1.shape[1]), dtype='float32')

        nrows = rinput.nrows_side
        # Loop over slits
        for idx, slit_coords in enumerate(rinput.slits_positions):

            x, y, ax, ay = slit_coords  # Coords in FITS coordinates

            ref_col = wc_to_pix(x - 1)
            ref_row = wc_to_pix(y - 1)

            _logger.info('Processing slit in column %i, row=%i', ref_col,
                         ref_row)

            # Simple extraction

            _logger.info('Extract %i rows around center', nrows)
            region = data1[ref_row - nrows:ref_row + nrows + 1, :]

            rssdata[idx, :] = region.mean(axis=0)

        hdurss = fits.PrimaryHDU(rssdata)

        result = self.create_result(frame=hdulist, rss=hdurss)

        return result
Beispiel #11
0
class SpectroPhotometricCalibrationRecipe(EmirRecipe):

    """

    **Observing modes:**

        * Spectrophotometric calibration
    """

    obresult = ObservationResultRequirement()
    sphot = Parameter([], 'Information about standard stars')

    calibration = Product(SpectroPhotometricCalibration)

    def run(self, rinput):
        return self.create_result(calibration=SpectroPhotometricCalibration())
Beispiel #12
0
class CSU2DetectorRecipe(EmirRecipe):

    """

    **Observing modes:**

        * CSU2Detector calibration
    """

    obresult = ObservationResultRequirement()
    dtu_range = Parameter([], 'DTU range: begin, end and step')

    calibration = Product(DTU_XY_Calibration)

    def run(self, rinput):
        return self.create_result(calibration=DTU_XY_Calibration())
Beispiel #13
0
class MicroditheredImageRecipeInput(RecipeInput):
    obresult = ObservationResultRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    master_flat = MasterIntensityFlatFieldRequirement()
    extinction = Parameter(0.0, 'Mean atmospheric extinction')
    sources = Parameter([],
                        'List of x, y coordinates to measure FWHM',
                        optional=True)
    offsets = Offsets_Requirement()
    iterations = Parameter(4, 'Iterations of the recipe')
    sky_images = Parameter(
        5, 'Images used to estimate the background before '
        'and after current image')
    sky_images_sep_time = SkyImageSepTime_Requirement()
    check_photometry_levels = Parameter(
        [0.5, 0.8], 'Levels to check the flux of the objects')
    check_photometry_actions = Parameter(['warn', 'warn', 'default'],
                                         'Actions to take on images')
    subpixelization = Parameter(4, 'Number of subdivisions in each pixel side')
    window = Parameter([], 'Region of interesting data', optional=True)
Beispiel #14
0
class TestSlitMaskDetectionRecipe(EmirRecipe):

    # Recipe Requirements
    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    master_sky = reqs.MasterSkyRequirement()

    median_filter_size = Parameter(5, 'Size of the median box')
    canny_sigma = Parameter(3.0, 'Sigma for the Canny algorithm')
    canny_high_threshold = Parameter(0.04, 'High threshold for the Canny algorithm')
    canny_low_threshold = Parameter(0.01, 'High threshold for the Canny algorithm')
    obj_min_size = Parameter(200, 'Minimum size of the slit')
    obj_max_size = Parameter(3000, 'Maximum size of the slit')
    slit_size_ratio = Parameter(4.0, 'Minimum ratio between height and width for slits')

    # Recipe Results
    frame = Result(prods.DataFrameType)
    slitstable = Result(tarray.ArrayType)
    DTU = Result(tarray.ArrayType)
    ROTANG = Result(float)
    DETPA = Result(float)
    DTUPA = Result(float)

    def run(self, rinput):
        self.logger.info('starting slit processing')

        self.logger.info('basic image reduction')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)
        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        try:
            rotang = hdr['ROTANG']
            detpa = hdr['DETPA']
            dtupa = hdr['DTUPA']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)

        except KeyError as error:
            self.logger.error(error)
            raise RecipeError(error)

        self.logger.debug('finding slits')

        # First, prefilter with median
        median_filter_size = rinput.median_filter_size
        canny_sigma = rinput.canny_sigma
        obj_min_size = rinput.obj_min_size
        obj_max_size = rinput.obj_max_size

        data1 = hdulist[0].data
        self.logger.debug('Median filter with box %d', median_filter_size)
        data2 = median_filter(data1, size=median_filter_size)

        # Grey level image
        img_grey = normalize_raw(data2)

        # Find edges with Canny
        self.logger.debug('Find edges with Canny, sigma %f', canny_sigma)
        # These thresholds corespond roughly with
        # value x (2**16 - 1)
        high_threshold = rinput.canny_high_threshold
        low_threshold = rinput.canny_low_threshold
        self.logger.debug('Find edges, Canny high threshold %f', high_threshold)
        self.logger.debug('Find edges, Canny low threshold %f', low_threshold)
        edges = canny(img_grey, sigma=canny_sigma,
                      high_threshold=high_threshold,
                      low_threshold=low_threshold)
        # Fill edges
        self.logger.debug('Fill holes')
        fill_slits =  ndimage.binary_fill_holes(edges)

        self.logger.debug('Label objects')
        label_objects, nb_labels = ndimage.label(fill_slits)
        self.logger.debug('%d objects found', nb_labels)
        # Filter on the area of the labeled region
        # Perhaps we could ignore this filtering and
        # do it later?
        self.logger.debug('Filter objects by size')
        # Sizes of regions
        sizes = numpy.bincount(label_objects.ravel())

        self.logger.debug('Min size is %d', obj_min_size)
        self.logger.debug('Max size is %d', obj_max_size)

        mask_sizes = (sizes > obj_min_size) & (sizes < obj_max_size)

        # Filter out regions
        nids, = numpy.where(mask_sizes)

        mm = numpy.in1d(label_objects, nids)
        mm.shape = label_objects.shape

        fill_slits_clean = numpy.where(mm, 1, 0)
        #plt.imshow(fill_slits_clean)

        # and relabel
        self.logger.debug('Label filtered objects')
        relabel_objects, nb_labels = ndimage.label(fill_slits_clean)
        self.logger.debug('%d objects found after filtering', nb_labels)
        ids = list(six.moves.range(1, nb_labels + 1))

        self.logger.debug('Find regions and centers')
        regions = ndimage.find_objects(relabel_objects)
        centers = ndimage.center_of_mass(data2, labels=relabel_objects,
                                         index=ids
                                         )

        table = char_slit(data2, regions,
                          slit_size_ratio=rinput.slit_size_ratio
                          )

        result = self.create_result(frame=hdulist, slitstable=table,
                                    DTU=dtub,
                                    ROTANG=rotang,
                                    DETPA=detpa,
                                    DTUPA=dtupa
                                    )

        return result
Beispiel #15
0
class TestPinholeRecipe(EmirRecipe):

    # Recipe Requirements
    #
    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    master_sky = reqs.MasterSkyRequirement()

    pinhole_nominal_positions = Requirement(
        prods.CoordinateList2DType, 'Nominal positions of the pinholes')
    shift_coordinates = Parameter(
        True, 'Use header information to'
        ' shift the pinhole positions from (0,0) '
        'to X_DTU, Y_DTU')
    box_half_size = Parameter(4, 'Half of the computation box size in pixels')
    recenter = Parameter(True, 'Recenter the pinhole coordinates')
    max_recenter_radius = Parameter(2.0, 'Maximum distance for recentering')

    # Recipe Results
    frame = Result(prods.ProcessedImage)
    positions = Result(tarray.ArrayType)
    positions_alt = Result(tarray.ArrayType)
    DTU = Result(tarray.ArrayType)
    filter = Result(str)
    readmode = Result(str)
    ROTANG = Result(float)
    DETPA = Result(float)
    DTUPA = Result(float)
    param_recenter = Result(bool)
    param_max_recenter_radius = Result(float)
    param_box_half_size = Result(float)

    def run(self, rinput):
        _logger.info('starting processing for slit detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        _logger.debug('finding pinholes')

        try:
            filtername = hdr['FILTER']
            readmode = hdr['READMODE']
            rotang = hdr['ROTANG']
            detpa = hdr['DETPA']
            dtupa = hdr['DTUPA']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
        except KeyError as error:
            _logger.error(error)
            raise numina.exceptions.RecipeError(error)

        if rinput.shift_coordinates:
            xdtur, ydtur, zdtur = dtur
            xfac = xdtur / EMIR_PIXSCALE
            yfac = -ydtur / EMIR_PIXSCALE

            vec = numpy.array([yfac, xfac])
            _logger.info('shift is %s', vec)
            ncenters = rinput.pinhole_nominal_positions + vec
        else:
            _logger.info('using pinhole coordinates as they are')
            ncenters = rinput.pinhole_nominal_positions

        _logger.info('pinhole characterization')
        positions = pinhole_char(hdulist[0].data,
                                 ncenters,
                                 box=rinput.box_half_size,
                                 recenter_pinhole=rinput.recenter,
                                 maxdist=rinput.max_recenter_radius)

        _logger.info('alternate pinhole characterization')
        positions_alt = pinhole_char2(
            hdulist[0].data,
            ncenters,
            recenter_pinhole=rinput.recenter,
            recenter_half_box=rinput.box_half_size,
            recenter_maxdist=rinput.max_recenter_radius)

        result = self.create_result(
            frame=hdulist,
            positions=positions,
            positions_alt=positions_alt,
            filter=filtername,
            DTU=dtub,
            readmode=readmode,
            ROTANG=rotang,
            DETPA=detpa,
            DTUPA=dtupa,
            param_recenter=rinput.recenter,
            param_max_recenter_radius=rinput.max_recenter_radius,
            param_box_half_size=rinput.box_half_size)
        return result
Beispiel #16
0
class GenerateRectwvCoeff(EmirRecipe):
    """Process images in Stare spectra.

    This recipe generates a rectified and wavelength calibrated
    image after applying a model (master_rectwv). This calibration
    can be refined (using refine_wavecalib_mode != 0).

    """

    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterSpectralFlatFieldRequirement()
    master_rectwv = reqs.MasterRectWaveRequirement()
    refine_wavecalib_mode = Parameter(
        0,
        description='Apply wavelength calibration refinement',
        choices=[0, 1, 2, 11, 12])
    minimum_slitlet_width_mm = Parameter(
        float(EMIR_MINIMUM_SLITLET_WIDTH_MM),
        description='Minimum width (mm) for a valid slitlet',
    )
    maximum_slitlet_width_mm = Parameter(
        float(EMIR_MAXIMUM_SLITLET_WIDTH_MM),
        description='Maximum width (mm) for a valid slitlet',
    )
    global_integer_offset_x_pix = Parameter(
        0,
        description='Global offset (pixels) in wavelength direction (integer)',
    )
    global_integer_offset_y_pix = Parameter(
        0,
        description='Global offset (pixels) in spatial direction (integer)',
    )

    reduced_mos = Result(prods.ProcessedMOS)
    rectwv_coeff = Result(RectWaveCoeff)

    def run(self, rinput):
        self.logger.info('starting rect.+wavecal. reduction of stare spectra')

        self.logger.info(rinput.master_rectwv)
        self.logger.info('Wavelength calibration refinement mode: {}'.format(
            rinput.refine_wavecalib_mode))
        self.logger.info('Minimum slitlet width (mm)............: {}'.format(
            rinput.minimum_slitlet_width_mm))
        self.logger.info('Maximum slitlet width (mm)............: {}'.format(
            rinput.maximum_slitlet_width_mm))
        self.logger.info('Global offset X direction (pixels)....: {}'.format(
            rinput.global_integer_offset_x_pix))
        self.logger.info('Global offset Y direction (pixels)....: {}'.format(
            rinput.global_integer_offset_y_pix))

        # build object to proceed with bpm, bias, dark and flat
        flow = self.init_filters(rinput)

        # apply bpm, bias, dark and flat
        reduced_image = basic_processing_with_combination(rinput,
                                                          flow,
                                                          method=median)
        # update header with additional info
        hdr = reduced_image[0].header
        self.set_base_headers(hdr)

        # save intermediate image in work directory
        self.save_intermediate_img(reduced_image, 'reduced_image.fits')

        # RectWaveCoeff object with rectification and wavelength
        # calibration coefficients for the particular CSU configuration
        rectwv_coeff = rectwv_coeff_from_mos_library(reduced_image,
                                                     rinput.master_rectwv)

        # set global offsets
        rectwv_coeff.global_integer_offset_x_pix = \
            rinput.global_integer_offset_x_pix
        rectwv_coeff.global_integer_offset_y_pix = \
            rinput.global_integer_offset_y_pix

        # apply rectification and wavelength calibration
        reduced_mos = apply_rectwv_coeff(reduced_image, rectwv_coeff)

        # wavelength calibration refinement
        # 0 -> no refinement
        # 1 -> apply global offset to all the slitlets (using ARC lines)
        # 2 -> apply individual offset to each slitlet (using ARC lines)
        # 11 -> apply global offset to all the slitlets (using OH lines)
        # 12 -> apply individual offset to each slitlet (using OH lines)
        if rinput.refine_wavecalib_mode != 0:
            self.logger.info(
                'Refining wavelength calibration (mode={})'.format(
                    rinput.refine_wavecalib_mode))
            # refine RectWaveCoeff object
            rectwv_coeff, expected_catalog_lines = refine_rectwv_coeff(
                reduced_mos,
                rectwv_coeff,
                rinput.refine_wavecalib_mode,
                rinput.minimum_slitlet_width_mm,
                rinput.maximum_slitlet_width_mm,
                save_intermediate_results=self.intermediate_results)
            self.save_intermediate_img(expected_catalog_lines,
                                       'expected_catalog_lines.fits')
            # re-apply rectification and wavelength calibration
            reduced_mos = apply_rectwv_coeff(reduced_image, rectwv_coeff)

        # ds9 region files (to be saved in the work directory)
        if self.intermediate_results:
            save_four_ds9(rectwv_coeff)
            save_spectral_lines_ds9(rectwv_coeff)

        # compute median spectra employing the useful region of the
        # rectified image
        if self.intermediate_results:
            for imode, outfile in enumerate([
                    'median_spectra_full', 'median_spectra_slitlets',
                    'median_spectrum_slitlets'
            ]):
                median_image = median_slitlets_rectified(reduced_mos,
                                                         mode=imode)
                self.save_intermediate_img(median_image, outfile + '.fits')

        # save results in results directory
        self.logger.info('end rect.+wavecal. reduction of stare spectra')
        result = self.create_result(reduced_mos=reduced_mos,
                                    rectwv_coeff=rectwv_coeff)
        return result

    def set_base_headers(self, hdr):
        newhdr = super(GenerateRectwvCoeff, self).set_base_headers(hdr)
        # Update EXP to 0
        newhdr['EXP'] = 0
        return newhdr
Beispiel #17
0
class FullDitheredImagesRecipe(EmirRecipe):
    """Recipe for the reduction of imaging mode observations.

    Recipe to reduce observations obtained in imaging mode, considering
    different possibilities depending on the size of the offsets
    between individual images.
    In particular, the following observing modes are considered: stare imaging,
    nodded beamswitched imaging, and dithered imaging.

    A critical piece of information here is a table that clearly specifies
    which images can be labeled as *science*, and which ones as *sky*.
    Note that some images are used both as *science* and *sky*
    (when the size of the targets is small compared to the offsets).

    **Observing modes:**

     * StareImage
     * Nodded/Beam-switched images
     * Dithered images


    **Inputs:**

     * Science frames + [Sky Frames]
     * Observing mode name: **stare image**, **nodded beamswitched image**,
       or **dithered imaging**
     * A table relating each science image with its sky image(s) (TBD if
       it's in the FITS header and/or in other format)
     * Offsets between them (Offsets must be integer)
     * Master Dark
     * Bad pixel mask (BPM)
     * Non-linearity correction polynomials
     * Master flat (twilight/dome flats)
     * Master background (thermal background, only in K band)
     * Exposure Time (must be the same in all the frames)
     * Airmass for each frame
     * Detector model (gain, RN, lecture mode)
     * Average extinction in the filter
     * Astrometric calibration (TBD)

    **Outputs:**

     * Image with three extensions: final image scaled to the individual
       exposure time, variance  and exposure time map OR number of images
       combined (TBD)

    **Procedure:**

    Images are corrected from dark, non-linearity and flat. Then, an iterative
    process starts:

     * Sky is computed from each frame, using the list of sky images of each
       science frame. The objects are avoided using a mask (from the second
       iteration on).

     * The relative offsets are the nominal from the telescope. From the second
       iteration on, we refine them using objects of appropriate brightness
       (not too bright, not to faint).

     * We combine the sky-subtracted images, output is: a new image, a variance
       image and a exposure map/number of images used map.

     * An object mask is generated.

     * We recompute the sky map, using the object mask as an additional input.
       From here we iterate (typically 4 times).

     * Finally, the images are corrected from atmospheric extinction and flux
       calibrated.

     * A preliminary astrometric calibration can always be used (using
       the central coordinates of the pointing and the plate scale
       in the detector).
       A better calibration might be computed using available stars (TBD).

    """
    obresult = ObservationResultRequirement(
        query_opts=ResultOf('result_image', node='children'))

    master_bpm = reqs.MasterBadPixelMaskRequirement()

    offsets = Requirement(prods.CoordinateList2DType,
                          'List of pairs of offsets',
                          optional=True)
    refine_offsets = Parameter(False, 'Refine offsets by cross-correlation')
    iterations = Parameter(2, 'Iterations of the recipe')
    extinction = Parameter(0.0, 'Mean atmospheric extinction')

    sky_images = Parameter(
        5, 'Images used to estimate the '
        'background before and after current image')

    sky_images_sep_time = Parameter(
        10, 'Maximum time interval between target and sky images [minutes]')

    result_image = Result(prods.ProcessedImage)
    result_sky = Result(prods.ProcessedImage, optional=True)

    def run(self, rinput):

        target_is_sky = True
        obresult = rinput.obresult
        sky_images = rinput.sky_images
        sky_images_sep_time = rinput.sky_images_sep_time
        baseshape = (2048, 2048)
        user_offsets = rinput.offsets
        extinction = rinput.extinction

        images_info = self.initial_classification(obresult, target_is_sky)

        # Resizing target frames
        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]
        finalshape, offsetsp, refpix, offset_fc0 = self.compute_size(
            target_info, baseshape, user_offsets)

        self.resize(target_info, baseshape, offsetsp, finalshape)

        result = self.process_basic(images_info,
                                    target_is_sky=target_is_sky,
                                    extinction=extinction)

        if rinput.refine_offsets:
            self.logger.debug("Compute cross-correlation of images")
            # regions_c = self.compute_regions(finalshape, box=200, corners=True)

            # Regions frm bright objects
            regions_c = self.compute_regions_from_objs(result[0].data,
                                                       finalshape,
                                                       box=40)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    images_info, regions_c, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offset_xy0 = numpy.fliplr(offset_fc0)
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = numpy.fliplr(offsets_xy_t)
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape2, offsetsp2 = narray.combine_shape(
                    baseshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s",
                                  offsetsp2)
                self.logger.info('Shape of resized array (crosscorr) is %s',
                                 finalshape2)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                self.resize(target_info, baseshape, offsetsp2, finalshape2)
            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        result = self.process_basic(images_info,
                                    target_is_sky=target_is_sky,
                                    extinction=extinction)

        step = 1

        while step <= rinput.iterations:
            result = self.process_advanced(images_info,
                                           result,
                                           step,
                                           target_is_sky,
                                           maxsep=sky_images_sep_time,
                                           nframes=sky_images,
                                           extinction=extinction)
            step += 1

        return self.create_result(result_image=result)

    def compute_offset_xy_crosscor_regions(self,
                                           iinfo,
                                           regions,
                                           refine=False,
                                           tol=0.5):

        names = [frame.lastname for frame in iinfo]
        print(names)
        print(regions)
        with nfcom.manage_fits(names) as imgs:
            arrs = [img[0].data for img in imgs]
            offsets_xy = offsets_from_crosscor_regions(arrs,
                                                       regions,
                                                       refine=refine,
                                                       order='xy',
                                                       tol=tol)
            self.logger.debug("offsets_xy cross-corr %s", offsets_xy)
            # Offsets in numpy order, swaping
        return offsets_xy

    def compute_size(self, images_info, baseshape, user_offsets=None):

        # Reference pixel in the center of the frame
        refpix = numpy.array([[baseshape[0] / 2.0, baseshape[1] / 2.0]])

        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]

        if user_offsets is not None:
            self.logger.info('Using offsets from parameters')
            base_ref = numpy.asarray(user_offsets)
            list_of_offsets = -(base_ref - base_ref[0])
        else:
            self.logger.debug('Computing offsets from WCS information')
            with nfcom.manage_fits(img.origin
                                   for img in target_info) as images:
                list_of_offsets = offsets_from_wcs_imgs(images, refpix)

        # FIXME: I am using offsets in row/columns
        # the values are provided in XY so flip-lr
        list_of_offsets = numpy.fliplr(list_of_offsets)

        # Insert pixel offsets between frames
        for iinfo, off in zip(target_info, list_of_offsets):
            # Insert pixel offsets between frames
            iinfo.pix_offset = off

            self.logger.debug('Frame %s, offset=%s', iinfo.label, off)

        self.logger.info('Computing relative offsets')
        offsets = [iinfo.pix_offset for iinfo in target_info]
        offsets = numpy.round(offsets).astype('int')

        finalshape, offsetsp = narray.combine_shape(baseshape, offsets)
        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)
        return finalshape, offsetsp, refpix, list_of_offsets

    def process_basic(self, images_info, target_is_sky=True, extinction=0.0):

        step = 0

        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]
        sky_info = [iinfo for iinfo in images_info if iinfo.valid_sky]

        self.logger.info("Step %d, SF: compute superflat", step)
        sf_arr = self.compute_superflat(images_info)

        # Apply superflat
        self.logger.info("Step %d, SF: apply superflat", step)
        for iinfo in images_info:
            self.correct_superflat(iinfo, sf_arr, step=step, save=True)

        self.logger.info('Simple sky correction')
        if target_is_sky:
            # Each frame is the closest sky frame available
            for iinfo in images_info:
                self.compute_simple_sky_for_frame(iinfo, iinfo)
        else:
            # Not implemented
            self.compute_simple_sky(target_info, sky_info)

        # Combining the frames
        self.logger.info("Step %d, Combining target frames", step)
        result = self.combine_frames(target_info, extinction=extinction)
        self.logger.info('Step %d, finished', step)

        return result

    def process_advanced(self,
                         images_info,
                         result,
                         step,
                         target_is_sky=True,
                         maxsep=5.0,
                         nframes=6,
                         extinction=0):

        seeing_fwhm = None
        baseshape = (2048, 2048)
        target_info = [iinfo for iinfo in images_info if iinfo.valid_target]
        sky_info = [iinfo for iinfo in images_info if iinfo.valid_sky]
        self.logger.info('Step %d, generating segmentation image', step)

        objmask, seeing_fwhm = self.create_mask(result, seeing_fwhm, step=step)

        for frame in target_info:
            frame.objmask = name_object_mask(frame.label, step)
            self.logger.info('Step %d, create object mask %s', step,
                             frame.objmask)
            frame.objmask_data = objmask[frame.valid_region]
            fits.writeto(frame.objmask, frame.objmask_data, overwrite=True)

        if not target_is_sky:
            # Empty object mask for sky frames
            bogus_objmask = numpy.zeros(baseshape, dtype='uint8')

            for frame in sky_info:
                frame.objmask_data = bogus_objmask

        self.logger.info("Step %d, SF: compute superflat", step)
        sf_arr = self.compute_superflat(sky_info, segmask=objmask, step=step)

        # Apply superflat
        self.logger.info("Step %d, SF: apply superflat", step)
        for iinfo in images_info:
            self.correct_superflat(iinfo, sf_arr, step=step, save=True)

        self.logger.info('Step %d, advanced sky correction (SC)', step)
        self.compute_advanced_sky(target_info,
                                  objmask,
                                  skyframes=sky_info,
                                  target_is_sky=target_is_sky,
                                  maxsep=maxsep,
                                  nframes=nframes,
                                  step=step)

        # Combining the images
        self.logger.info("Step %d, Combining the images", step)
        # FIXME: only for science
        result = self.combine_frames(target_info, extinction, step=step)
        return result

    def compute_simple_sky_for_frame(self, frame, skyframe, step=0, save=True):
        self.logger.info('Correcting sky in frame %s', frame.lastname)
        self.logger.info('with sky computed from frame %s', skyframe.lastname)

        if hasattr(skyframe, 'median_sky'):
            sky = skyframe.median_sky
        else:

            with fits.open(skyframe.lastname, mode='readonly') as hdulist:
                data = hdulist['primary'].data
                valid = data[frame.valid_region]

                if skyframe.objmask_data is not None:
                    self.logger.debug('object mask defined')
                    msk = frame.objmask_data
                    sky = numpy.median(valid[msk == 0])
                else:
                    self.logger.debug('object mask empty')
                    sky = numpy.median(valid)

            self.logger.debug('median sky value is %f', sky)
            skyframe.median_sky = sky

        dst = name_skysub_proc(frame.label, step)
        prev = frame.lastname

        if save:
            shutil.copyfile(prev, dst)
        else:
            os.rename(prev, dst)

        frame.lastname = dst

        with fits.open(frame.lastname, mode='update') as hdulist:
            data = hdulist['primary'].data
            valid = data[frame.valid_region]
            valid -= sky

    def compute_simple_sky(self, frame, skyframe, step=0, save=True):
        raise NotImplementedError

    def correct_superflat(self, frame, fitted, step=0, save=True):

        frame.flat_corrected = name_skyflat_proc(frame.label, step)
        if save:
            shutil.copyfile(frame.resized_base, frame.flat_corrected)
        else:
            os.rename(frame.resized_base, frame.flat_corrected)

        self.logger.info("Step %d, SF: apply superflat to frame %s", step,
                         frame.flat_corrected)
        with fits.open(frame.flat_corrected, mode='update') as hdulist:
            data = hdulist['primary'].data
            datar = data[frame.valid_region]
            data[frame.valid_region] = narray.correct_flatfield(datar, fitted)

            frame.lastname = frame.flat_corrected

    def initial_classification(self, obresult, target_is_sky=False):
        """Classify input frames, """
        # lists of targets and sky frames

        with obresult.frames[0].open() as baseimg:
            # Initial checks
            has_bpm_ext = 'BPM' in baseimg
            self.logger.debug('images have BPM extension: %s', has_bpm_ext)

        images_info = []
        for f in obresult.frames:
            with f.open() as img:
                # Getting some metadata from FITS header
                hdr = img[0].header

                iinfo = ImageInfo(f)

                finfo = {}
                iinfo.metadata = finfo

                finfo['uuid'] = hdr['UUID']
                finfo['exposure'] = hdr['EXPTIME']
                # frame.baseshape = get_image_shape(hdr)
                finfo['airmass'] = hdr['airmass']
                finfo['mjd'] = hdr['tstamp']

                iinfo.label = 'result_image_{}'.format(finfo['uuid'])
                iinfo.mask = nfcom.Extension("BPM")
                # Insert pixel offsets between frames
                iinfo.objmask_data = None
                iinfo.valid_target = False
                iinfo.valid_sky = False

                # FIXME: hardcode itype for the moment
                iinfo.itype = 'TARGET'
                if iinfo.itype == 'TARGET':
                    iinfo.valid_target = True
                    #targetframes.append(iinfo)
                    if target_is_sky:
                        iinfo.valid_sky = True
                        #skyframes.append(iinfo)
                if iinfo.itype == 'SKY':
                    iinfo.valid_sky = True
                    #skyframes.append(iinfo)
                images_info.append(iinfo)

        return images_info

    def compute_superflat(self, images_info, segmask=None, step=0):

        self.logger.info("Step %d, SF: combining the frames without offsets",
                         step)

        base_imgs = [img.resized_base for img in images_info]
        with nfcom.manage_fits(base_imgs) as imgs:

            data = []
            masks = []

            for img, img_info in zip(imgs, images_info):
                self.logger.debug('Step %d, opening resized frame %s', step,
                                  img_info.resized_base)
                data.append(img['primary'].data[img_info.valid_region])

            scales = [numpy.median(d) for d in data]

            if segmask is not None:
                masks = [segmask[frame.valid_region] for frame in images_info]
            else:
                for frame in images_info:
                    self.logger.debug('Step %d, opening resized mask %s', step,
                                      frame.resized_mask)
                    hdulist = fits.open(frame.resized_mask,
                                        memmap=True,
                                        mode='readonly')
                    #filelist.append(hdulist)
                    masks.append(hdulist['primary'].data[frame.valid_region])
                masks = None

            self.logger.debug('Step %d, combining %d frames', step, len(data))
            sf_data, _sf_var, sf_num = nacom.median(
                data,
                masks,
                scales=scales,
                dtype='float32',
                #blank=1.0 / scales[0]
            )

        # Normalize, flat has mean = 1
        sf_data[sf_data == 0] = 1e-5
        sf_data /= sf_data.mean()
        #sf_data[sf_data <= 0] = 1.0

        # Auxiliary data
        sfhdu = fits.PrimaryHDU(sf_data)
        self.save_intermediate_img(sfhdu, name_skyflat('comb', step))
        return sf_data

    def run_single(self, rinput):

        # FIXME: remove this, is deprecated

        obresult = rinput.obresult

        # just in case images are in result, instead of frames
        if not obresult.frames:
            frames = obresult.results
        else:
            frames = obresult.frames

        img_info = []
        data_hdul = []
        for f in frames:
            img = f.open()
            data_hdul.append(img)
            info = {}
            info['tstamp'] = img[0].header['tstamp']
            info['airmass'] = img[0].header['airmass']
            img_info.append(info)

        channels = FULL

        use_errors = True
        # Initial checks
        baseimg = data_hdul[0]
        has_num_ext = 'NUM' in baseimg
        has_bpm_ext = 'BPM' in baseimg
        baseshape = baseimg[0].shape
        subpixshape = baseshape
        base_header = baseimg[0].header
        compute_sky = 'NUM-SK' not in base_header
        compute_sky_advanced = False

        self.logger.debug('base image is: %s',
                          self.datamodel.get_imgid(baseimg))
        self.logger.debug('images have NUM extension: %s', has_num_ext)
        self.logger.debug('images have BPM extension: %s', has_bpm_ext)
        self.logger.debug('compute sky is needed: %s', compute_sky)

        if compute_sky:
            self.logger.info('compute sky simple')
            sky_result = self.compute_sky_simple(data_hdul, use_errors=False)
            self.save_intermediate_img(sky_result, 'sky_init.fits')
            sky_result.writeto('sky_init.fits', overwrite=True)
            sky_data = sky_result[0].data
            self.logger.debug('sky image has shape %s', sky_data.shape)

            self.logger.info('sky correction in individual images')
            corrector = proc.SkyCorrector(
                sky_data,
                self.datamodel,
                calibid=self.datamodel.get_imgid(sky_result))
            # If we do not update keyword SKYADD
            # there is no sky subtraction
            for m in data_hdul:
                m[0].header['SKYADD'] = True
            # this is a little hackish
            # sky corrected
            data_hdul_s = [corrector(m) for m in data_hdul]
            base_header = data_hdul_s[0][0].header
        else:
            sky_result = None
            data_hdul_s = data_hdul

        self.logger.info('Computing offsets from WCS information')

        finalshape, offsetsp, refpix, offset_xy0 = self.compute_offset_wcs_imgs(
            data_hdul_s, baseshape, subpixshape)

        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)

        # Resizing target imgs
        data_arr_sr, regions = narray.resize_arrays(
            [m[0].data for m in data_hdul_s],
            subpixshape,
            offsetsp,
            finalshape,
            fill=1)

        if has_num_ext:
            self.logger.debug('Using NUM extension')
            masks = [
                numpy.where(m['NUM'].data, 0, 1).astype('int16')
                for m in data_hdul
            ]
        elif has_bpm_ext:
            self.logger.debug('Using BPM extension')
            #
            masks = [
                numpy.where(m['BPM'].data, 1, 0).astype('int16')
                for m in data_hdul
            ]
        else:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            masks = [false_mask for _ in data_arr_sr]

        self.logger.debug('resize bad pixel masks')
        mask_arr_r, _ = narray.resize_arrays(masks,
                                             subpixshape,
                                             offsetsp,
                                             finalshape,
                                             fill=1)

        if self.intermediate_results:
            self.logger.debug('save resized intermediate img')
            for idx, arr_r in enumerate(data_arr_sr):
                self.save_intermediate_array(arr_r, 'interm1_%03d.fits' % idx)

        hdulist = self.combine2(data_arr_sr, mask_arr_r, data_hdul, offsetsp,
                                use_errors)

        self.save_intermediate_img(hdulist, 'result_initial1.fits')

        compute_cross_offsets = True
        if compute_cross_offsets:

            self.logger.debug("Compute cross-correlation of images")
            # regions_c = self.compute_regions(finalshape, box=200, corners=True)

            # Regions frm bright objects
            regions_c = self.compute_regions_from_objs(hdulist[0].data,
                                                       finalshape,
                                                       box=20)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    data_arr_sr, regions_c, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = offsets_xy_t[:, ::-1]
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape, offsetsp = narray.combine_shape(
                    subpixshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s", offsetsp)
                self.logger.info('Shape of resized array (crosscorr) is %s',
                                 finalshape)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                data_arr_sr, regions = narray.resize_arrays(
                    [m[0].data for m in data_hdul_s],
                    subpixshape,
                    offsetsp,
                    finalshape,
                    fill=1)

                if self.intermediate_results:
                    self.logger.debug('save resized intermediate2 img')
                    for idx, arr_r in enumerate(data_arr_sr):
                        self.save_intermediate_array(arr_r,
                                                     'interm2_%03d.fits' % idx)

                self.logger.debug('resize bad pixel masks')
                mask_arr_r, _ = narray.resize_arrays(masks,
                                                     subpixshape,
                                                     offsetsp,
                                                     finalshape,
                                                     fill=1)

                hdulist = self.combine2(data_arr_sr, mask_arr_r, data_hdul,
                                        offsetsp, use_errors)

                self.save_intermediate_img(hdulist, 'result_initial2.fits')
            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        catalog, objmask = self.create_object_catalog(hdulist[0].data,
                                                      border=50)

        data_arr_sky = [sky_result[0].data for _ in data_arr_sr]
        data_arr_0 = [(d[r] + s)
                      for d, r, s in zip(data_arr_sr, regions, data_arr_sky)]
        data_arr_r = [d.copy() for d in data_arr_sr]

        for inum in range(1, rinput.iterations + 1):
            # superflat
            sf_data = self.compute_superflat(data_arr_0, objmask, regions,
                                             channels)
            fits.writeto('superflat_%d.fits' % inum, sf_data, overwrite=True)
            # apply superflat
            data_arr_rf = data_arr_r
            for base, arr, reg in zip(data_arr_rf, data_arr_0, regions):
                arr_f = arr / sf_data
                #arr_f = arr
                base[reg] = arr_f

            # compute sky advanced
            data_arr_sky = []
            data_arr_rfs = []
            self.logger.info('Step %d, SC: computing advanced sky', inum)
            scale = rinput.sky_images_sep_time * 60
            tstamps = numpy.array([info['tstamp'] for info in img_info])
            for idx, hdu in enumerate(data_hdul):
                diff1 = tstamps - tstamps[idx]
                idxs1 = (diff1 > 0) & (diff1 < scale)
                idxs2 = (diff1 < 0) & (diff1 > -scale)
                l1, = numpy.nonzero(idxs1)
                l2, = numpy.nonzero(idxs2)
                limit1 = l1[-rinput.sky_images:]
                limit2 = l2[:rinput.sky_images]
                len_l1 = len(limit1)
                len_l2 = len(limit2)
                self.logger.info('For image %s, using %d-%d images)', idx,
                                 len_l1, len_l2)
                if len_l1 + len_l2 == 0:
                    self.logger.error('No sky image available for frame %d',
                                      idx)
                    raise ValueError('No sky image')
                skydata = []
                skymasks = []
                skyscales = []
                my_region = regions[idx]
                my_sky_scale = numpy.median(data_arr_rf[idx][my_region])
                for i in numpy.concatenate((limit1, limit2)):
                    region_s = regions[i]
                    data_s = data_arr_rf[i][region_s]
                    mask_s = objmask[region_s]
                    scale_s = numpy.median(data_s)
                    skydata.append(data_s)
                    skymasks.append(mask_s)
                    skyscales.append(scale_s)
                self.logger.debug('computing background with %d frames',
                                  len(skydata))
                sky, _, num = nacom.median(skydata, skymasks, scales=skyscales)
                # rescale
                sky *= my_sky_scale

                binmask = num == 0

                if numpy.any(binmask):
                    # We have pixels without
                    # sky background information
                    self.logger.warn(
                        'pixels without sky information when correcting %d',
                        idx)

                    # FIXME: during development, this is faster
                    # sky[binmask] = sky[num != 0].mean()
                    # To continue we interpolate over the patches
                    narray.fixpix2(sky, binmask, out=sky, iterations=1)

                name = 'sky_%d_%03d.fits' % (inum, idx)
                fits.writeto(name, sky, overwrite=True)
                name = 'sky_binmask_%d_%03d.fits' % (inum, idx)
                fits.writeto(name, binmask.astype('int16'), overwrite=True)

                data_arr_sky.append(sky)
                arr = numpy.copy(data_arr_rf[idx])
                arr[my_region] = data_arr_rf[idx][my_region] - sky
                data_arr_rfs.append(arr)
                # subtract sky advanced

            if self.intermediate_results:
                self.logger.debug('save resized intermediate img')
                for idx, arr_r in enumerate(data_arr_rfs):
                    self.save_intermediate_array(
                        arr_r, 'interm_%d_%03d.fits' % (inum, idx))

            hdulist = self.combine2(data_arr_rfs, mask_arr_r, data_hdul,
                                    offsetsp, use_errors)

            self.save_intermediate_img(hdulist, 'result_%d.fits' % inum)

            # For next step
            catalog, objmask = self.create_object_catalog(hdulist[0].data,
                                                          border=50)

            data_arr_0 = [
                (d[r] + s)
                for d, r, s in zip(data_arr_rfs, regions, data_arr_sky)
            ]
            data_arr_r = [d.copy() for d in data_arr_rfs]

        result = self.create_result(frame=hdulist)
        self.logger.info('end of dither recipe')
        return result

    def compute_sky_advanced(self, data_hdul, omasks, base_header, use_errors):
        method = narray.combine.mean

        self.logger.info('recombine images with segmentation mask')
        sky_data = method([m[0].data for m in data_hdul],
                          masks=omasks,
                          dtype='float32')

        hdu = fits.PrimaryHDU(sky_data[0], header=base_header)
        points_no_data = (sky_data[2] == 0).sum()

        self.logger.debug('update created sky image result header')
        skyid = str(uuid.uuid1())
        hdu.header['UUID'] = skyid
        hdu.header['history'] = "Combined {} images using '{}'".format(
            len(data_hdul), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))

        msg = "missing pixels, total: {}, fraction: {:3.1f}".format(
            points_no_data, points_no_data / sky_data[2].size)
        hdu.header['history'] = msg
        self.logger.debug(msg)

        if use_errors:
            varhdu = fits.ImageHDU(sky_data[1], name='VARIANCE')
            num = fits.ImageHDU(sky_data[2], name='MAP')
            sky_result = fits.HDUList([hdu, varhdu, num])
        else:
            sky_result = fits.HDUList([hdu])

        return sky_result

    def combine_frames(self, frames, extinction, out=None, step=0):
        self.logger.debug('Step %d, opening sky-subtracted frames', step)

        def fits_open(name):
            """Open FITS with memmap in readonly mode"""
            return fits.open(name, mode='readonly', memmap=True)

        frameslll = [
            fits_open(frame.lastname) for frame in frames if frame.valid_target
        ]
        self.logger.debug('Step %d, opening mask frames', step)
        mskslll = [
            fits_open(frame.resized_mask) for frame in frames
            if frame.valid_target
        ]

        self.logger.debug('Step %d, combining %d frames', step, len(frameslll))
        try:
            extinc = [
                pow(10, -0.4 * frame.metadata['airmass'] * extinction)
                for frame in frames if frame.valid_target
            ]
            data = [i['primary'].data for i in frameslll]
            masks = [i['primary'].data for i in mskslll]
            headers = [i['primary'].header for i in frameslll]

            out = nacom.median(data,
                               masks,
                               scales=extinc,
                               dtype='float32',
                               out=out)

            base_header = headers[0]
            hdu = fits.PrimaryHDU(out[0], header=base_header)
            hdu.header['history'] = "Combined %d images using '%s'" % (
                len(frameslll), 'median')
            hdu.header['history'] = 'Combination time {}'.format(
                datetime.datetime.utcnow().isoformat())
            for img in frameslll:
                hdu.header['history'] = "Image {}".format(
                    img[0].header['uuid'])
            prevnum = base_header.get('NUM-NCOM', 1)
            hdu.header['NUM-NCOM'] = prevnum * len(frameslll)
            hdu.header['NUMRNAM'] = 'FullDitheredImagesRecipe'
            hdu.header['UUID'] = str(uuid.uuid1())
            hdu.header['OBSMODE'] = 'FULL_DITHERED_IMAGE'
            # Headers of last image
            hdu.header['TSUTC2'] = headers[-1]['TSUTC2']

            varhdu = fits.ImageHDU(out[1], name='VARIANCE')
            num = fits.ImageHDU(out[2].astype('uint8'), name='MAP')

            result = fits.HDUList([hdu, varhdu, num])
            # saving the three extensions
            fits.writeto('result_i%0d.fits' % step, out[0], overwrite=True)
            fits.writeto('result_i%0d_var.fits' % step, out[1], overwrite=True)
            fits.writeto('result_i%0d_npix.fits' % step,
                         out[2],
                         overwrite=True)

            result.writeto('result_i%0d_full.fits' % step, overwrite=True)
            return result

        finally:
            self.logger.debug('Step %d, closing sky-subtracted frames', step)
            for f in frameslll:
                f.close()
            self.logger.debug('Step %d, closing mask frames', step)
            for f in mskslll:
                f.close()

    def resize(self,
               frames,
               shape,
               offsetsp,
               finalshape,
               window=None,
               scale=1,
               step=0):
        self.logger.info('Resizing frames and masks')
        for frame, rel_offset in zip(frames, offsetsp):
            if frame.valid_target:
                region, _ = narray.subarray_match(finalshape, rel_offset,
                                                  shape)
                # Valid region
                frame.valid_region = region
                # Relative offset
                frame.rel_offset = rel_offset
                # names of frame and mask
                framen, maskn = name_redimensioned_frames(frame.label, step)
                frame.resized_base = framen
                frame.resized_mask = maskn
                self.logger.debug(
                    '%s, valid region is %s, relative offset is %s',
                    frame.label, custom_region_to_str(region), rel_offset)
                self.resize_frame_and_mask(frame, finalshape, framen, maskn,
                                           window, scale)

    def resize_frame_and_mask(self, frame, finalshape, framen, maskn, window,
                              scale):
        self.logger.info('Resizing frame %s', frame.label)
        with frame.origin.open() as hdul:
            baseshape = hdul[0].data.shape

            # FIXME: Resize_fits saves the resized image in framen
            resize_fits(hdul,
                        framen,
                        finalshape,
                        frame.valid_region,
                        window=window,
                        scale=scale,
                        dtype='float32')

        self.logger.info('Resizing mask %s', frame.label)
        # We don't conserve the sum of the values of the frame here, just
        # expand the mask

        if frame.mask is None:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            hdum = fits.HDUList(fits.PrimaryHDU(false_mask))
            frame.mask = hdum  #DataFrame(frame=hdum)
        elif isinstance(frame.mask, nfcom.Extension):
            ename = frame.mask.name
            with frame.origin.open() as hdul:
                frame.mask = fits.HDUList(hdul[ename].copy())

        resize_fits(frame.mask,
                    maskn,
                    finalshape,
                    frame.valid_region,
                    fill=1,
                    window=window,
                    scale=scale,
                    conserve=False)

    def create_mask(self, img, seeing_fwhm, step=0):

        #
        remove_border = True

        # sextractor takes care of bad pixels

        # if seeing_fwhm is not None and seeing_fwhm > 0:
        #    sex.config['SEEING_FWHM'] = seeing_fwhm * sex.config['PIXEL_SCALE']

        if remove_border:
            weigthmap = 'weights4rms.fits'

            # Create weight map, remove n pixs from either side
            # using a Hannig filter
            # npix = 90
            # w1 = npix
            # w2 = npix
            # wmap = numpy.ones_like(sf_data[0])

            # cos_win1 = numpy.hanning(2 * w1)
            # cos_win2 = numpy.hanning(2 * w2)

            # wmap[:,:w1] *= cos_win1[:w1]
            # wmap[:,-w1:] *= cos_win1[-w1:]
            # wmap[:w2,:] *= cos_win2[:w2, numpy.newaxis]
            # wmap[-w2:,:] *= cos_win2[-w2:, numpy.newaxis]

            # Take the number of combined images from the combined image
            wm = img[2].data.copy()
            # Dont search objects where nimages < lower
            # FIXME: this is a magic number
            # We ignore objects in regions where we have less
            # than 10% of the images
            lower = wm.max() // 10
            border = (wm < lower)
            fits.writeto(weigthmap, border.astype('uint8'), overwrite=True)

            # sex.config['WEIGHT_TYPE'] = 'MAP_WEIGHT'
            # FIXME: this is a magic number
            # sex.config['WEIGHT_THRESH'] = 50
            # sex.config['WEIGHT_IMAGE'] = weigthmap
        else:
            border = None

        data_res = img[0].data
        bkg = sep.Background(data_res)
        data_sub = data_res - bkg

        self.logger.info('Runing source extraction in previous result')
        objects, objmask = sep.extract(data_sub,
                                       1.5,
                                       err=bkg.globalrms,
                                       mask=border,
                                       segmentation_map=True)
        fits.writeto(name_segmask(step), objmask, overwrite=True)

        # # Plot objects
        # # FIXME, plot sextractor objects on top of image
        # patches = []
        # fwhms = []
        # nfirst = 0
        # catalog_f = sopen(sex.config['CATALOG_NAME'])
        # try:
        #     star = catalog_f.readline()
        #     while star:
        #         flags = star['FLAGS']
        #         # ignoring those objects with corrupted apertures
        #         if flags & sexcatalog.CORRUPTED_APER:
        #             star = catalog_f.readline()
        #             continue
        #         center = (star['X_IMAGE'], star['Y_IMAGE'])
        #         wd = 10 * star['A_IMAGE']
        #         hd = 10 * star['B_IMAGE']
        #         color = 'red'
        #         e = Ellipse(center, wd, hd, star['THETA_IMAGE'], color=color)
        #         patches.append(e)
        #         fwhms.append(star['FWHM_IMAGE'])
        #         nfirst += 1
        #         # FIXME Plot a ellipse
        #         star = catalog_f.readline()
        # finally:
        #     catalog_f.close()
        #
        # p = PatchCollection(patches, alpha=0.4)
        # ax = self._figure.gca()
        # ax.add_collection(p)
        # self._figure.canvas.draw()
        # self._figure.savefig('figure-segmentation-overlay_%01d.png' % step)
        #
        # self.figure_fwhm_histogram(fwhms, step=step)
        #
        # # mode with an histogram
        # hist, edges = numpy.histogram(fwhms, 50)
        # idx = hist.argmax()
        #
        # seeing_fwhm = 0.5 * (edges[idx] + edges[idx + 1])
        # if seeing_fwhm <= 0:
        #     _logger.warning(
        #         'Seeing FHWM %f pixels is negative, reseting', seeing_fwhm)
        #     seeing_fwhm = None
        # else:
        #     _logger.info('Seeing FHWM %f pixels (%f arcseconds)',
        #                  seeing_fwhm, seeing_fwhm * sex.config['PIXEL_SCALE'])
        # objmask = fits.getdata(name_segmask(step))

        return objmask, seeing_fwhm

    def compute_advanced_sky(self,
                             targetframes,
                             objmask,
                             skyframes=None,
                             target_is_sky=False,
                             maxsep=5.0,
                             nframes=10,
                             step=0,
                             save=True):

        if target_is_sky:
            skyframes = targetframes
            # Each frame is its closest sky frame
            nframes += 1
        elif skyframes is None:
            raise ValueError('skyframes not defined')

        # build kdtree
        sarray = numpy.array([frame.metadata['mjd'] for frame in skyframes])
        # shape must be (n, 1)
        sarray = numpy.expand_dims(sarray, axis=1)

        # query
        tarray = numpy.array([frame.metadata['mjd'] for frame in targetframes])
        # shape must be (n, 1)
        tarray = numpy.expand_dims(tarray, axis=1)

        kdtree = KDTree(sarray)

        # 1 / minutes in a Julian day
        SCALE = 60.0
        # max_time_sep = ri.sky_images_sep_time / 1440.0
        _dis, idxs = kdtree.query(tarray,
                                  k=nframes,
                                  distance_upper_bound=maxsep * SCALE)

        nsky = len(sarray)

        for tid, idss in enumerate(idxs):
            try:
                tf = targetframes[tid]
                self.logger.info('Step %d, SC: computing advanced sky for %s',
                                 step, tf.label)
                # filter(lambda x: x < nsky, idss)
                locskyframes = []
                for si in idss:
                    if tid == si:
                        # this sky frame it is the current frame, reject
                        continue
                    if si < nsky:
                        self.logger.debug('Step %d, SC: %s is a sky frame',
                                          step, skyframes[si].label)
                        locskyframes.append(skyframes[si])
                self.compute_advanced_sky_for_frame(tf,
                                                    locskyframes,
                                                    step=step,
                                                    save=save)
            except IndexError:
                self.logger.error('No sky image available for frame %s',
                                  tf.lastname)
                raise

    def compute_advanced_sky_for_frame(self,
                                       frame,
                                       skyframes,
                                       step=0,
                                       save=True):
        self.logger.info('Correcting sky in frame %s', frame.lastname)
        self.logger.info('with sky computed from frames')
        for i in skyframes:
            self.logger.info('%s', i.flat_corrected)

        data = []
        scales = []
        masks = []
        # handle the FITS file to close it finally
        desc = []
        try:
            for i in skyframes:
                filename = i.flat_corrected
                hdulist = fits.open(filename, mode='readonly', memmap=True)

                data.append(hdulist['primary'].data[i.valid_region])
                desc.append(hdulist)
                #scales.append(numpy.median(data[-1]))
                if i.objmask_data is not None:
                    masks.append(i.objmask_data)
                    self.logger.debug('object mask is shared')
                elif i.objmask is not None:
                    hdulistmask = fits.open(i.objmask,
                                            mode='readonly',
                                            memmap=True)
                    masks.append(hdulistmask['primary'].data)
                    desc.append(hdulistmask)
                    self.logger.debug('object mask is particular')
                else:
                    self.logger.warn('no object mask for %s', filename)

            self.logger.debug('computing background with %d frames', len(data))
            sky, _, num = nacom.median(data, masks)  #, scales=scales)

        finally:
            # Closing all FITS files
            for hdl in desc:
                hdl.close()

        if numpy.any(num == 0):
            # We have pixels without
            # sky background information
            self.logger.warn(
                'pixels without sky information when correcting %s',
                frame.flat_corrected)
            binmask = num == 0
            # FIXME: during development, this is faster
            # sky[binmask] = sky[num != 0].mean()

            # To continue we interpolate over the patches
            narray.fixpix2(sky, binmask, out=sky, iterations=1)

            name = name_skybackgroundmask(frame.label, step)
            fits.writeto(name, binmask.astype('int16'), overwrite=True)

        name_sky = name_skybackground(frame.label, step)
        fits.writeto(name_sky, sky, overwrite=True)

        dst = name_skysub_proc(frame.label, step)
        prev = frame.lastname
        shutil.copyfile(prev, dst)
        frame.lastname = dst

        with fits.open(frame.lastname, mode='update') as hdulist:
            data = hdulist['primary'].data
            valid = data[frame.valid_region]
            valid -= sky

    def compute_regions_from_objs(self, arr, finalshape, box=50, corners=True):
        regions = []
        catalog, mask = self.create_object_catalog(arr, border=300)

        self.save_intermediate_array(mask, 'objmask.fits')
        # with the catalog, compute 5 objects

        LIMIT_AREA = 5000
        NKEEP = 1
        idx_small = catalog['npix'] < LIMIT_AREA
        objects_small = catalog[idx_small]
        idx_flux = objects_small['flux'].argsort()
        objects_nth = objects_small[idx_flux][-NKEEP:]
        for obj in objects_nth:
            print('ref is', obj['x'], obj['y'])
            region = nautils.image_box2d(obj['x'], obj['y'], finalshape,
                                         (box, box))
            print(region)
            regions.append(region)
        return regions

    def create_object_catalog(self, arr, threshold=3.0, border=0):

        if border > 0:
            wmap = numpy.ones_like(arr)
            wmap[border:-border, border:-border] = 0
        else:
            wmap = None

        bkg = sep.Background(arr)
        data_sub = arr - bkg
        objects, objmask = sep.extract(data_sub,
                                       threshold,
                                       err=bkg.globalrms *
                                       numpy.ones_like(data_sub),
                                       mask=wmap,
                                       segmentation_map=True)
        return objects, objmask
Beispiel #18
0
class TestPointSourceRecipe(EmirRecipe):

    # Recipe Requirements
    #
    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    master_sky = reqs.MasterSkyRequirement()

    shift_coordinates = Parameter(
        True, 'Use header information to'
        ' shift the pinhole positions from (0,0) '
        'to X_DTU, Y_DTU')
    box_half_size = Parameter(4, 'Half of the computation box size in pixels')
    recenter = Parameter(True, 'Recenter the pinhole coordinates')
    max_recenter_radius = Parameter(2.0, 'Maximum distance for recentering')

    # Recipe Results
    frame = Result(prods.ProcessedImage)
    positions = Result(tarray.ArrayType)
    positions_alt = Result(tarray.ArrayType)
    DTU = Result(tarray.ArrayType)
    filter = Result(str)
    readmode = Result(str)
    ROTANG = Result(float)
    DETPA = Result(float)
    DTUPA = Result(float)
    param_recenter = Result(bool)
    param_max_recenter_radius = Result(float)
    param_box_half_size = Result(float)

    def run(self, rinput):
        self.logger.info('starting processing for object detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        self.logger.debug('finding point sources')

        try:
            filtername = hdr['FILTER']
            readmode = hdr['READMODE']
            rotang = hdr['ROTANG']
            detpa = hdr['DETPA']
            dtupa = hdr['DTUPA']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
        except KeyError as error:
            self.logger.error(error)
            raise RecipeError(error)

        data = hdulist[0].data

        # Copy needed in numpy 1.7
        # This seems already bitswapped??
        # FIXME: check this works offline/online
        # ndata = data.byteswap().newbyteorder()
        # data = data.byteswap(inplace=True).newbyteorder()

        snr_detect = 5.0
        fwhm = 4.0
        npixels = 15
        box_shape = [64, 64]
        self.logger.info('point source detection2')
        self.logger.info('using internal mask to remove corners')
        # Corners
        mask = numpy.zeros_like(data, dtype='int32')
        mask[2000:, 0:80] = 1
        mask[2028:, 2000:] = 1
        mask[:50, 1950:] = 1
        mask[:100, :50] = 1
        # Remove corner regions

        self.logger.info('compute background map, %s', box_shape)
        bkg = sep.Background(data)

        self.logger.info('reference fwhm is %5.1f pixels', fwhm)
        self.logger.info('detect threshold, %3.1f over background', snr_detect)
        self.logger.info('convolve with gaussian kernel, FWHM %3.1f pixels',
                         fwhm)
        sigma = fwhm * gaussian_fwhm_to_sigma
        #
        kernel = Gaussian2DKernel(sigma)
        kernel.normalize()

        thresh = snr_detect * bkg.globalrms
        data_s = data - bkg.back()
        objects, segmap = sep.extract(data - bkg.back(),
                                      thresh,
                                      minarea=npixels,
                                      filter_kernel=kernel.array,
                                      segmentation_map=True,
                                      mask=mask)
        fits.writeto('segmap.fits', segmap)
        self.logger.info('detected %d objects', len(objects))

        # Hardcoded values
        rs2 = 15.0
        fit_rad = 10.0
        flux_min = 1000.0
        flux_max = 30000.0
        self.logger.debug('Flux limit is %6.1f %6.1f', flux_min, flux_max)
        # FIXME: this should be a view, not a copy
        xall = objects['x']
        yall = objects['y']
        mm = numpy.array([xall, yall]).T
        self.logger.info('computing FWHM')
        # Find objects with pairs inside fit_rad
        kdtree = KDTree(mm)
        nearobjs = (kdtree.query_ball_tree(kdtree, r=fit_rad))
        positions = []
        for idx, obj in enumerate(objects):
            x0 = obj['x']
            y0 = obj['y']
            sl = image_box2d(x0, y0, data.shape, (fit_rad, fit_rad))
            # sl_sky = image_box2d(x0, y0, data.shape, (rs2, rs2))
            part_s = data_s[sl]
            # Logical coordinates
            xx0 = x0 - sl[1].start
            yy0 = y0 - sl[0].start

            _, fwhm_x, fwhm_y = compute_fwhm_2d_simple(part_s, xx0, yy0)

            if min(fwhm_x, fwhm_x) < 3:
                continue
            if flux_min > obj['peak'] or flux_max < obj['peak']:
                continue
            # nobjs is the number of object inside fit_rad
            nobjs = len(nearobjs[idx])

            flag = 0 if nobjs == 1 else 1

            positions.append([idx, x0, y0, obj['peak'], fwhm_x, fwhm_y, flag])

        self.logger.info('saving photometry')
        positions = numpy.array(positions)
        positions_alt = positions
        self.logger.info('end processing for object detection')

        result = self.create_result(
            frame=hdulist,
            positions=positions_alt,
            positions_alt=positions_alt,
            filter=filtername,
            DTU=dtub,
            readmode=readmode,
            ROTANG=rotang,
            DETPA=detpa,
            DTUPA=dtupa,
            param_recenter=rinput.recenter,
            param_max_recenter_radius=rinput.max_recenter_radius,
            param_box_half_size=rinput.box_half_size)
        return result
Beispiel #19
0
class CosmeticsRecipe(EmirRecipe):
    """Detector Cosmetics.

    Recipe to find and tag bad pixels in the detector.
    """

    obresult = ObservationResultRequirement()
    insconf = InstrumentConfigurationRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    lowercut = Parameter(
        4.0, 'Values below this sigma level are flagged as dead pixels')
    uppercut = Parameter(
        4.0, 'Values above this sigma level are flagged as hot pixels')
    maxiter = Parameter(30, 'Maximum number of iterations')

    ratioframe = Result(prods.ProcessedImage)
    maskframe = Result(prods.MasterBadPixelMask)

    def run(self, rinput):

        # FIXME:
        # We need 2 flats
        # Of different exposure times
        #
        # And their calibrations
        #

        if len(rinput.obresult.frames) < 2:
            raise RecipeError('The recipe requires 2 flat frames')

        iinfo = []
        for frame in rinput.obresult.frames:
            with frame.open() as hdulist:
                iinfo.append(gather_info(hdulist))

        # Loading calibrations
        with rinput.master_bias.open() as hdul:
            readmode = hdul[0].header.get('READMODE', 'undefined')
            if readmode.lower() in ['simple', 'bias']:
                self.logger.debug('loading bias')
                mbias = hdul[0].data
                bias_corrector = proc.BiasCorrector(mbias)
            else:
                self.logger.debug('ignoring bias')
                bias_corrector = numina.util.node.IdNode()

        with rinput.master_dark.open() as mdark_hdul:
            self.logger.debug('loading dark')
            mdark = mdark_hdul[0].data
            dark_corrector = proc.DarkCorrector(mdark)

        flow = numina.util.flow.SerialFlow([bias_corrector, dark_corrector])

        self.logger.info('processing flat #1')
        with rinput.obresult.frames[0].open() as hdul:
            other = flow(hdul)
            f1 = other[0].data.copy() * iinfo[0]['texp'] * 1e-3

        self.logger.info('processing flat #2')
        with rinput.obresult.frames[1].open() as hdul:
            other = flow(hdul)
            f2 = other[0].data.copy() * iinfo[1]['texp'] * 1e-3

        # Preprocess...

        maxiter = rinput.maxiter
        lowercut = rinput.lowercut
        uppercut = rinput.uppercut

        ninvalid = 0
        mask = None

        if mask:
            m = fits.getdata(mask)
            ninvalid = numpy.count_nonzero(m)
        else:
            m = numpy.zeros_like(f1, dtype='int')

        for niter in range(1, maxiter + 1):
            self.logger.debug('iter %d', niter)
            ratio, m, sigma = cosmetics(f1,
                                        f2,
                                        m,
                                        lowercut=lowercut,
                                        uppercut=uppercut)

            if self.intermediate_results:
                with warnings.catch_warnings():
                    warnings.simplefilter('ignore')
                    fits.writeto('numina-cosmetics-i%02d.fits' % niter,
                                 ratio,
                                 overwrite=True)
                    fits.writeto('numina-mask-i%02d.fits' % niter,
                                 m,
                                 overwrite=True)
                    fits.writeto('numina-sigma-i%02d.fits' % niter,
                                 m * 0.0 + sigma,
                                 overwrite=True)
            self.logger.debug('iter %d, invalid points in input mask: %d',
                              niter, ninvalid)
            self.logger.debug('iter %d, estimated sigma is %f', niter, sigma)
            n_ninvalid = numpy.count_nonzero(m)

            # Probably there is something wrong here
            # too much defective pixels
            if ninvalid / m.size >= 0.10:
                # This should set a flag in the output
                msg = 'defective pixels are greater than 10%'
                self.logger.warning(msg)

            if n_ninvalid == ninvalid:
                self.logger.info('convergence reached after %d iterations',
                                 niter)
                break
            self.logger.info('new invalid points: %d', n_ninvalid - ninvalid)
            ninvalid = n_ninvalid
        else:
            # This should set a flag in the output
            msg = 'convergence not reached after %d iterations' % maxiter
            self.logger.warning(msg)

        self.logger.info('number of dead pixels %d',
                         numpy.count_nonzero(m == PIXEL_DEAD))
        self.logger.info('number of hot pixels %d',
                         numpy.count_nonzero(m == PIXEL_HOT))

        if self.intermediate_results:
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                fits.writeto('numina-cosmetics.fits', ratio, overwrite=True)
                fits.writeto('numina-mask.fits', m, overwrite=True)
                fits.writeto('numina-sigma.fits',
                             sigma * numpy.ones_like(m),
                             overwrite=True)

        hdu = fits.PrimaryHDU(ratio)
        hdr = hdu.header
        hdr['NUMXVER'] = (__version__, 'Numina package version')
        hdr['NUMRNAM'] = (self.__class__.__name__, 'Numina recipe name')
        hdr['NUMRVER'] = (self.__version__, 'Numina recipe version')
        ratiohdl = fits.HDUList([hdu])

        maskhdu = fits.PrimaryHDU(m)
        hdr = maskhdu.header
        hdr['NUMXVER'] = (__version__, 'Numina package version')
        hdr['NUMRNAM'] = (self.__class__.__name__, 'Numina recipe name')
        hdr['NUMRVER'] = (self.__version__, 'Numina recipe version')
        maskhdl = fits.HDUList([maskhdu])

        res = self.create_result(ratioframe=ratiohdl, maskframe=maskhdl)
        return res
Beispiel #20
0
class GainRecipe1(EmirRecipe):
    """Detector Gain Recipe.

    Recipe to calibrate the detector gain.
    """

    obresult = ObservationResultRequirement()
    region = Parameter('channel', 'Region used to compute: '
                       '(full|quadrant|channel)',
                       choices=['full', 'quadrant', 'channel'])

    gain = Product(MasterGainMap(None, None, None))
    ron = Product(MasterRONMap(None, None))

    def region(self, reqs):
        mm = reqs['region'].tolower()
        if mm == 'full':
            return ((slice(0, 2048), slice(0, 2048)))
        elif mm == 'quadrant':
            return QUADRANTS
        elif mm == 'channel':
            return CHANNELS
        else:
            raise ValueError

    def run(self, rinput):

        resets = []
        ramps = []

        for frame in rinput.obresult.frames:
            if frame.itype == 'RESET':
                resets.append(frame.label)
                _logger.debug('%s is RESET', frame.label)
            elif frame.itype == 'RAMP':
                ramps.append(frame.label)
                _logger.debug('%s is RAMP', frame.label)
            else:
                raise RecipeError('frame is neither a RAMP nor a RESET')

        channels = self.region(rinput)
        result_gain = numpy.zeros((len(channels), ))
        result_ron = numpy.zeros_like(result_gain)

        counts = numpy.zeros((len(ramps), len(channels)))
        variance = numpy.zeros_like(counts)

        last_reset = resets[-1]
        _logger.debug('opening last reset image %s', last_reset)
        last_reset_data = fits.getdata(last_reset)

        for i, di in enumerate(ramps):
            with fits.open(di, mode='readonly') as fd:
                restdata = fd[0].data - last_reset_data
                for j, channel in enumerate(channels):
                    c = restdata[channel].mean()
                    _logger.debug('%f counts in channel', c)
                    counts[i, j] = c
                    v = restdata[channel].var(ddof=1)
                    _logger.debug('%f variance in channel', v)
                    variance[i, j] = v

        for j, _ in enumerate(channels):
            res = scipy.stats.linregress(counts[:, j], variance[:, j])
            slope, intercept, _r_value, _p_value, _std_err = res

            result_gain[j] = 1.0 / slope
            result_ron[j] = math.sqrt(intercept)
        cube = numpy.zeros((2, 2048, 2048))

        for gain, var, channel in zip(result_gain, result_ron, channels):
            cube[0][channel] = gain
            cube[1][channel] = var

        hdu = fits.PrimaryHDU(cube[0])
        hduvar = fits.ImageHDU(cube[1])
        hdulist = fits.HDUList([hdu, hduvar])

        gain = MasterGainMap(mean=result_gain,
                             var=numpy.array([]),
                             frame=DataFrame(hdulist))
        ron = MasterRONMap(mean=result_ron, var=numpy.array([]))
        return self.create_result(gain=gain, ron=ron)
Beispiel #21
0
class FullDitheredImagesRecipe(JoinDitheredImagesRecipe):
    obresult = ObservationResultRequirement(
        query_opts=ResultOf('frame', node='children'))
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    # extinction = Extinction_Requirement()
    # sources = Catalog_Requirement()
    # offsets = Offsets_Requirement()
    offsets = Requirement(prods.CoordinateList2DType,
                          'List of pairs of offsets',
                          optional=True)

    iterations = Parameter(4, 'Iterations of the recipe')
    sky_images = Parameter(
        5, 'Images used to estimate the '
        'background before and after current image')
    sky_images_sep_time = reqs.SkyImageSepTime_Requirement()
    check_photometry_levels = Parameter(
        [0.5, 0.8], 'Levels to check the flux of the objects')
    check_photometry_actions = Parameter(['warn', 'warn', 'default'],
                                         'Actions to take on images')

    frame = Result(prods.ProcessedImage)
    sky = Result(prods.ProcessedImage, optional=True)
    catalog = Result(prods.SourcesCatalog, optional=True)

    def run(self, rinput):
        partial_result = self.run_single(rinput)
        return partial_result

    def run_single(self, rinput):

        obresult = rinput.obresult

        # just in case images are in result, instead of frames
        if not obresult.frames:
            frames = obresult.results
        else:
            frames = obresult.frames

        img_info = []
        data_hdul = []
        for f in frames:
            img = f.open()
            data_hdul.append(img)
            info = {}
            info['tstamp'] = img[0].header['tstamp']
            info['airmass'] = img[0].header['airmass']
            img_info.append(info)

        channels = FULL

        use_errors = True
        # Initial checks
        baseimg = data_hdul[0]
        has_num_ext = 'NUM' in baseimg
        has_bpm_ext = 'BPM' in baseimg
        baseshape = baseimg[0].shape
        subpixshape = baseshape
        base_header = baseimg[0].header
        compute_sky = 'NUM-SK' not in base_header
        compute_sky_advanced = False

        self.logger.debug('base image is: %s',
                          self.datamodel.get_imgid(baseimg))
        self.logger.debug('images have NUM extension: %s', has_num_ext)
        self.logger.debug('images have BPM extension: %s', has_bpm_ext)
        self.logger.debug('compute sky is needed: %s', compute_sky)

        if compute_sky:
            self.logger.info('compute sky simple')
            sky_result = self.compute_sky_simple(data_hdul, use_errors=False)
            self.save_intermediate_img(sky_result, 'sky_init.fits')
            sky_result.writeto('sky_init.fits', overwrite=True)
            sky_data = sky_result[0].data
            self.logger.debug('sky image has shape %s', sky_data.shape)

            self.logger.info('sky correction in individual images')
            corrector = proc.SkyCorrector(
                sky_data,
                self.datamodel,
                calibid=self.datamodel.get_imgid(sky_result))
            # If we do not update keyword SKYADD
            # there is no sky subtraction
            for m in data_hdul:
                m[0].header['SKYADD'] = True
            # this is a little hackish
            # sky corrected
            data_hdul_s = [corrector(m) for m in data_hdul]
            base_header = data_hdul_s[0][0].header
        else:
            sky_result = None
            data_hdul_s = data_hdul

        self.logger.info('Computing offsets from WCS information')

        finalshape, offsetsp, refpix, offset_xy0 = self.compute_offset_wcs_imgs(
            data_hdul_s, baseshape, subpixshape)

        self.logger.debug("Relative offsetsp %s", offsetsp)
        self.logger.info('Shape of resized array is %s', finalshape)

        # Resizing target imgs
        data_arr_sr, regions = resize_arrays([m[0].data for m in data_hdul_s],
                                             subpixshape,
                                             offsetsp,
                                             finalshape,
                                             fill=1)

        if has_num_ext:
            self.logger.debug('Using NUM extension')
            masks = [
                numpy.where(m['NUM'].data, 0, 1).astype('int16')
                for m in data_hdul
            ]
        elif has_bpm_ext:
            self.logger.debug('Using BPM extension')
            #
            masks = [
                numpy.where(m['BPM'].data, 1, 0).astype('int16')
                for m in data_hdul
            ]
        else:
            self.logger.warning('BPM missing, use zeros instead')
            false_mask = numpy.zeros(baseshape, dtype='int16')
            masks = [false_mask for _ in data_arr_sr]

        self.logger.debug('resize bad pixel masks')
        mask_arr_r, _ = resize_arrays(masks,
                                      subpixshape,
                                      offsetsp,
                                      finalshape,
                                      fill=1)

        if self.intermediate_results:
            self.logger.debug('save resized intermediate img')
            for idx, arr_r in enumerate(data_arr_sr):
                self.save_intermediate_array(arr_r, 'interm1_%03d.fits' % idx)

        hdulist = self.combine2(data_arr_sr, mask_arr_r, data_hdul, offsetsp,
                                use_errors)

        self.save_intermediate_img(hdulist, 'result_initial1.fits')

        compute_cross_offsets = True
        if compute_cross_offsets:

            self.logger.debug("Compute cross-correlation of images")
            # regions_c = self.compute_regions(finalshape, box=200, corners=True)

            # Regions frm bright objects
            regions_c = self.compute_regions_from_objs(hdulist[0].data,
                                                       finalshape,
                                                       box=20)

            try:

                offsets_xy_c = self.compute_offset_xy_crosscor_regions(
                    data_arr_sr, regions_c, refine=True, tol=1)
                #
                # Combined offsets
                # Offsets in numpy order, swaping
                offsets_xy_t = offset_xy0 - offsets_xy_c
                offsets_fc = offsets_xy_t[:, ::-1]
                offsets_fc_t = numpy.round(offsets_fc).astype('int')
                self.logger.debug('Total offsets: %s', offsets_xy_t)
                self.logger.info('Computing relative offsets from cross-corr')
                finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)
                #
                self.logger.debug("Relative offsetsp (crosscorr) %s", offsetsp)
                self.logger.info('Shape of resized array (crosscorr) is %s',
                                 finalshape)

                # Resizing target imgs
                self.logger.debug("Resize to final offsets")
                data_arr_sr, regions = resize_arrays(
                    [m[0].data for m in data_hdul_s],
                    subpixshape,
                    offsetsp,
                    finalshape,
                    fill=1)

                if self.intermediate_results:
                    self.logger.debug('save resized intermediate2 img')
                    for idx, arr_r in enumerate(data_arr_sr):
                        self.save_intermediate_array(arr_r,
                                                     'interm2_%03d.fits' % idx)

                self.logger.debug('resize bad pixel masks')
                mask_arr_r, _ = resize_arrays(masks,
                                              subpixshape,
                                              offsetsp,
                                              finalshape,
                                              fill=1)

                hdulist = self.combine2(data_arr_sr, mask_arr_r, data_hdul,
                                        offsetsp, use_errors)

                self.save_intermediate_img(hdulist, 'result_initial2.fits')
            except Exception as error:
                self.logger.warning('Error during cross-correlation, %s',
                                    error)

        catalog, objmask = self.create_object_catalog(hdulist[0].data,
                                                      border=50)

        data_arr_sky = [sky_result[0].data for _ in data_arr_sr]
        data_arr_0 = [(d[r] + s)
                      for d, r, s in zip(data_arr_sr, regions, data_arr_sky)]
        data_arr_r = [d.copy() for d in data_arr_sr]

        for inum in range(1, rinput.iterations + 1):
            # superflat
            sf_data = self.compute_superflat(data_arr_0, objmask, regions,
                                             channels)
            fits.writeto('superflat_%d.fits' % inum, sf_data, overwrite=True)
            # apply superflat
            data_arr_rf = data_arr_r
            for base, arr, reg in zip(data_arr_rf, data_arr_0, regions):
                arr_f = arr / sf_data
                #arr_f = arr
                base[reg] = arr_f

            # compute sky advanced
            data_arr_sky = []
            data_arr_rfs = []
            self.logger.info('Step %d, SC: computing advanced sky', inum)
            scale = rinput.sky_images_sep_time * 60
            tstamps = numpy.array([info['tstamp'] for info in img_info])
            for idx, hdu in enumerate(data_hdul):
                diff1 = tstamps - tstamps[idx]
                idxs1 = (diff1 > 0) & (diff1 < scale)
                idxs2 = (diff1 < 0) & (diff1 > -scale)
                l1, = numpy.nonzero(idxs1)
                l2, = numpy.nonzero(idxs2)
                limit1 = l1[-rinput.sky_images:]
                limit2 = l2[:rinput.sky_images]
                len_l1 = len(limit1)
                len_l2 = len(limit2)
                self.logger.info('For image %s, using %d-%d images)', idx,
                                 len_l1, len_l2)
                if len_l1 + len_l2 == 0:
                    self.logger.error('No sky image available for frame %d',
                                      idx)
                    raise ValueError('No sky image')
                skydata = []
                skymasks = []
                skyscales = []
                my_region = regions[idx]
                my_sky_scale = numpy.median(data_arr_rf[idx][my_region])
                for i in numpy.concatenate((limit1, limit2)):
                    region_s = regions[i]
                    data_s = data_arr_rf[i][region_s]
                    mask_s = objmask[region_s]
                    scale_s = numpy.median(data_s)
                    skydata.append(data_s)
                    skymasks.append(mask_s)
                    skyscales.append(scale_s)
                self.logger.debug('computing background with %d frames',
                                  len(skydata))
                sky, _, num = median(skydata, skymasks, scales=skyscales)
                # rescale
                sky *= my_sky_scale

                binmask = num == 0

                if numpy.any(binmask):
                    # We have pixels without
                    # sky background information
                    self.logger.warn(
                        'pixels without sky information when correcting %d',
                        idx)

                    # FIXME: during development, this is faster
                    # sky[binmask] = sky[num != 0].mean()
                    # To continue we interpolate over the patches
                    fixpix2(sky, binmask, out=sky, iterations=1)

                name = 'sky_%d_%03d.fits' % (inum, idx)
                fits.writeto(name, sky, overwrite=True)
                name = 'sky_binmask_%d_%03d.fits' % (inum, idx)
                fits.writeto(name, binmask.astype('int16'), overwrite=True)

                data_arr_sky.append(sky)
                arr = numpy.copy(data_arr_rf[idx])
                arr[my_region] = data_arr_rf[idx][my_region] - sky
                data_arr_rfs.append(arr)
                # subtract sky advanced

            if self.intermediate_results:
                self.logger.debug('save resized intermediate img')
                for idx, arr_r in enumerate(data_arr_rfs):
                    self.save_intermediate_array(
                        arr_r, 'interm_%d_%03d.fits' % (inum, idx))

            hdulist = self.combine2(data_arr_rfs, mask_arr_r, data_hdul,
                                    offsetsp, use_errors)

            self.save_intermediate_img(hdulist, 'result_%d.fits' % inum)

            # For next step
            catalog, objmask = self.create_object_catalog(hdulist[0].data,
                                                          border=50)

            data_arr_0 = [
                (d[r] + s)
                for d, r, s in zip(data_arr_rfs, regions, data_arr_sky)
            ]
            data_arr_r = [d.copy() for d in data_arr_rfs]

        result = self.create_result(frame=hdulist)
        self.logger.info('end of dither recipe')
        return result

    def compute_superflat(self, data_arr_r, objmask, regions, channels):
        # superflat

        mask = [objmask[r] for r in regions]
        scales = [numpy.median(d) for d in data_arr_r]
        self.logger.debug('flat scaling %s', scales)
        sf_data, _sf_var, sf_num = flatcombine(data_arr_r,
                                               masks=mask,
                                               scales=scales)

        for channel in channels:
            mask = (sf_num[channel] == 0)
            if numpy.any(mask):
                fixpix2(sf_data[channel], mask, out=sf_data[channel])

        # Normalize, flat has mean = 1
        sf_data /= sf_data.mean()
        return sf_data

    def compute_sky_advanced(self, data_hdul, omasks, base_header, use_errors):
        method = combine.mean

        self.logger.info('recombine images with segmentation mask')
        sky_data = method([m[0].data for m in data_hdul],
                          masks=omasks,
                          dtype='float32')

        hdu = fits.PrimaryHDU(sky_data[0], header=base_header)
        points_no_data = (sky_data[2] == 0).sum()

        self.logger.debug('update created sky image result header')
        skyid = str(uuid.uuid1())
        hdu.header['UUID'] = skyid
        hdu.header['history'] = "Combined {} images using '{}'".format(
            len(data_hdul), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        for img in data_hdul:
            hdu.header['history'] = "Image {}".format(
                self.datamodel.get_imgid(img))

        msg = "missing pixels, total: {}, fraction: {:3.1f}".format(
            points_no_data, points_no_data / sky_data[2].size)
        hdu.header['history'] = msg
        self.logger.debug(msg)

        if use_errors:
            varhdu = fits.ImageHDU(sky_data[1], name='VARIANCE')
            num = fits.ImageHDU(sky_data[2], name='MAP')
            sky_result = fits.HDUList([hdu, varhdu, num])
        else:
            sky_result = fits.HDUList([hdu])

        return sky_result
Beispiel #22
0
class ABBASpectraFastRectwv(EmirRecipe):
    """Process AB or ABBA images applying a single wavelength calibration

    Note that the wavelength calibration has already been determined.

    """

    logger = logging.getLogger(__name__)

    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_flat = reqs.MasterSpectralFlatFieldRequirement()
    rectwv_coeff = reqs.RectWaveCoeffRequirement()

    pattern = Parameter('ABBA',
                        description='Observation pattern',
                        choices=['AB', 'ABBA'])
    # do not allow 'sum' as combination method in order to avoid
    # confusion with number of counts in resulting image
    method = Parameter('mean',
                       description='Combination method',
                       choices=['mean', 'median', 'sigmaclip'])
    method_kwargs = Parameter(dict(),
                              description='Arguments for combination method')
    voffset_pix = Parameter(0.0,
                            description='Shift (pixels) to move A into B',
                            optional=True)

    reduced_mos_abba = Result(prods.ProcessedMOS)
    reduced_mos_abba_combined = Result(prods.ProcessedMOS)

    def run(self, rinput):

        nimages = len(rinput.obresult.frames)
        pattern = rinput.pattern
        pattern_length = len(pattern)

        # check combination method
        if rinput.method != 'sigmaclip':
            if rinput.method_kwargs != {}:
                raise ValueError('Unexpected method_kwargs={}'.format(
                    rinput.method_kwargs))

        # check pattern sequence matches number of images
        if nimages % pattern_length != 0:
            raise ValueError('Number of images is not a multiple of pattern '
                             'length: {}, {}'.format(nimages, pattern_length))
        nsequences = nimages // pattern_length

        rectwv_coeff = rinput.rectwv_coeff
        self.logger.info(rectwv_coeff)
        self.logger.info('observation pattern: {}'.format(pattern))
        self.logger.info('nsequences.........: {}'.format(nsequences))

        full_set = pattern * nsequences
        self.logger.info('full set of images.: {}'.format(full_set))

        # build object to proceed with bpm, bias, dark and flat
        flow = self.init_filters(rinput)

        # available combination methods
        method = getattr(combine, rinput.method)
        method_kwargs = rinput.method_kwargs

        # basic reduction of A images
        list_a = [
            rinput.obresult.frames[i] for i, char in enumerate(full_set)
            if char == 'A'
        ]
        with contextlib.ExitStack() as stack:
            self.logger.info('starting basic reduction of A images')
            hduls = [stack.enter_context(fname.open()) for fname in list_a]
            reduced_image_a = combine_imgs(hduls,
                                           method=method,
                                           method_kwargs=method_kwargs,
                                           errors=False,
                                           prolog=None)
        reduced_image_a = flow(reduced_image_a)
        hdr = reduced_image_a[0].header
        self.set_base_headers(hdr)

        # basic reduction of B images
        list_b = [
            rinput.obresult.frames[i] for i, char in enumerate(full_set)
            if char == 'B'
        ]
        with contextlib.ExitStack() as stack:
            self.logger.info('starting basic reduction of B images')
            hduls = [stack.enter_context(fname.open()) for fname in list_b]
            reduced_image_b = combine_imgs(hduls,
                                           method=method,
                                           method_kwargs=method_kwargs,
                                           errors=False,
                                           prolog=None)
        reduced_image_b = flow(reduced_image_b)
        hdr = reduced_image_b[0].header
        self.set_base_headers(hdr)

        # save intermediate reduced_image_a and reduced_image_b
        self.save_intermediate_img(reduced_image_a, 'reduced_image_a.fits')
        self.save_intermediate_img(reduced_image_b, 'reduced_image_b.fits')

        # computation of A-B
        header_a = reduced_image_a[0].header
        header_b = reduced_image_b[0].header
        data_a = reduced_image_a[0].data.astype('float32')
        data_b = reduced_image_b[0].data.astype('float32')
        reduced_data = data_a - data_b

        # update reduced image header
        reduced_image = self.create_reduced_image(
            rinput,
            reduced_data,
            header_a,
            header_b,
            rinput.pattern,
            full_set,
            voffset_pix=0,
            header_mos_abba=None,
        )

        # save intermediate image in work directory
        self.save_intermediate_img(reduced_image, 'reduced_image.fits')

        # apply rectification and wavelength calibration
        self.logger.info('begin rect.+wavecal. reduction of ABBA spectra')
        reduced_mos_abba = apply_rectwv_coeff(reduced_image, rectwv_coeff)
        header_mos_abba = reduced_mos_abba[0].header

        # combine A and B data by shifting B on top of A
        voffset_pix = rinput.voffset_pix

        if voffset_pix is not None and voffset_pix != 0:
            self.logger.info(
                'correcting vertical offset (pixesl): {}'.format(voffset_pix))
            reduced_mos_abba_data = reduced_mos_abba[0].data.astype('float32')
            shifted_a_minus_b_data = shift_image2d(
                reduced_mos_abba_data,
                yoffset=-voffset_pix,
            ).astype('float32')
            reduced_mos_abba_combined_data = \
                reduced_mos_abba_data - shifted_a_minus_b_data
            # scale signal to exposure of a single image
            reduced_mos_abba_combined_data /= 2.0
        else:
            reduced_mos_abba_combined_data = None

        # update reduced combined image header
        reduced_mos_abba_combined = self.create_reduced_image(
            rinput,
            reduced_mos_abba_combined_data,
            header_a,
            header_b,
            rinput.pattern,
            full_set,
            voffset_pix,
            header_mos_abba=header_mos_abba)

        # ds9 region files (to be saved in the work directory)
        if self.intermediate_results:
            save_four_ds9(rectwv_coeff)
            save_spectral_lines_ds9(rectwv_coeff)

        # compute median spectra employing the useful region of the
        # rectified image
        if self.intermediate_results:
            for imode, outfile in enumerate([
                    'median_spectra_full', 'median_spectra_slitlets',
                    'median_spectrum_slitlets'
            ]):
                median_image = median_slitlets_rectified(reduced_mos_abba,
                                                         mode=imode)
                self.save_intermediate_img(median_image, outfile + '.fits')

        # save results in results directory
        self.logger.info('end rect.+wavecal. reduction of ABBA spectra')
        result = self.create_result(
            reduced_mos_abba=reduced_mos_abba,
            reduced_mos_abba_combined=reduced_mos_abba_combined)
        return result

    def create_reduced_image(self, rinput, reduced_data, header_a, header_b,
                             pattern, full_set, voffset_pix, header_mos_abba):
        with contextlib.ExitStack() as stack:
            hduls = [
                stack.enter_context(fname.open())
                for fname in rinput.obresult.frames
            ]
            # Copy header of first image
            base_header = hduls[0][0].header.copy()

            hdu = fits.PrimaryHDU(reduced_data, header=base_header)
            self.set_base_headers(hdu.header)

            self.logger.debug('update result header')
            if header_mos_abba is not None:
                self.logger.debug('update result header')
                crpix1 = header_mos_abba['crpix1']
                crval1 = header_mos_abba['crval1']
                cdelt1 = header_mos_abba['cdelt1']

                # update wavelength calibration in FITS header
                for keyword in ['crval1', 'crpix1', 'crval2', 'crpix2']:
                    if keyword in hdu.header:
                        hdu.header.remove(keyword)
                hdu.header['crpix1'] = (crpix1, 'reference pixel')
                hdu.header['crval1'] = (crval1, 'central wavelength at crpix1')
                hdu.header['cdelt1'] = \
                    (cdelt1, 'linear dispersion (Angstrom/pixel)')
                hdu.header['cunit1'] = ('Angstrom', 'units along axis1')
                hdu.header['ctype1'] = 'WAVELENGTH'
                hdu.header['crpix2'] = (0.0, 'reference pixel')
                hdu.header['crval2'] = (0.0, 'central value at crpix2')
                hdu.header['cdelt2'] = (1.0, 'increment')
                hdu.header['ctype2'] = 'PIXEL'
                hdu.header['cunit2'] = ('Pixel', 'units along axis2')
                for keyword in [
                        'cd1_1', 'cd1_2', 'cd2_1', 'cd2_2', 'PCD1_1', 'PCD1_2',
                        'PCD2_1', 'PCD2_2', 'PCRPIX1', 'PCRPIX2'
                ]:
                    if keyword in hdu.header:
                        hdu.header.remove(keyword)

            # update additional keywords
            hdu.header['UUID'] = str(uuid.uuid1())
            hdu.header['OBSMODE'] = pattern + ' pattern'
            hdu.header['TSUTC2'] = hduls[-1][0].header['TSUTC2']
            hdu.header['history'] = "Processed " + pattern + " pattern"
            hdu.header['NUM-NCOM'] = (len(hduls), 'Number of combined frames')

            # update history
            dm = emirdrp.datamodel.EmirDataModel()
            for img, key in zip(hduls, full_set):
                imgid = dm.get_imgid(img)
                hdu.header['HISTORY'] = "Image '{}' is '{}'".format(imgid, key)

        hdu.header['HISTORY'] = "Processed " + pattern + " pattern"
        hdu.header['HISTORY'] = '--- Reduction of A images ---'
        for line in header_a['HISTORY']:
            hdu.header['HISTORY'] = line
        hdu.header['HISTORY'] = '--- Reduction of B images ---'
        for line in header_b['HISTORY']:
            hdu.header['HISTORY'] = line
        if voffset_pix is not None and voffset_pix != 0:
            hdu.header['HISTORY'] = '--- Combination of AB spectra ---'
            hdu.header['HISTORY'] = "voffset_pix between A and B {}".format(
                voffset_pix)
        hdu.header.add_history('--- rinput.stored() (BEGIN) ---')
        for item in rinput.stored():
            value = getattr(rinput, item)
            cline = '{}: {}'.format(item, value)
            hdu.header.add_history(cline)
        hdu.header.add_history('--- rinput.stored() (END) ---')
        result = fits.HDUList([hdu])
        return result

    def set_base_headers(self, hdr):
        newhdr = super(ABBASpectraFastRectwv, self).set_base_headers(hdr)
        # Update EXP to 0
        newhdr['EXP'] = 0
        return newhdr

    def extract_tags_from_obsres(self, obsres, tag_keys):
        # this function is necessary to make use of recipe requirements
        # that are provided as lists
        final_tags = []
        for frame in obsres.frames:
            ref_img = frame.open()
            tag = self.extract_tags_from_ref(ref_img,
                                             tag_keys,
                                             base=obsres.labels)
            final_tags.append(tag)
        return final_tags
Beispiel #23
0
class ABBASpectraRectwv(EmirRecipe):
    """Process images in AB or ABBA  mode applying wavelength calibration

    Note that in this case the wavelength calibration has already been
    determined.

    """

    logger = logging.getLogger(__name__)

    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_flat = reqs.MasterSpectralFlatFieldRequirement()
    rectwv_coeff = reqs.RectWaveCoeffRequirement(optional=True)
    list_rectwv_coeff = reqs.ListOfRectWaveCoeffRequirement(optional=True)

    pattern = Parameter('ABBA',
                        description='Observation pattern',
                        choices=['AB', 'ABBA'])
    # do not allow 'sum' as combination method in order to avoid
    # confusion with number of counts in resulting image
    method = Parameter('mean',
                       description='Combination method',
                       choices=['mean', 'median', 'sigmaclip'])
    method_kwargs = Parameter(dict(),
                              description='Arguments for combination method',
                              optional=True)
    refine_target_along_slitlet = Parameter(
        dict(),
        description='Parameters to refine location of target along the slitlet'
    )

    reduced_mos_abba = Result(prods.ProcessedMOS)
    reduced_mos_abba_combined = Result(prods.ProcessedMOS)

    def run(self, rinput):

        nimages = len(rinput.obresult.frames)
        pattern = rinput.pattern
        pattern_length = len(pattern)

        # check combination method
        if rinput.method != 'sigmaclip':
            if rinput.method_kwargs != {}:
                raise ValueError('Unexpected method_kwargs={}'.format(
                    rinput.method_kwargs))

        # check pattern sequence matches number of images
        if nimages % pattern_length != 0:
            raise ValueError('Number of images is not a multiple of pattern '
                             'length: {}, {}'.format(nimages, pattern_length))
        nsequences = nimages // pattern_length

        # check rectification and wavelength calibration information
        if rinput.rectwv_coeff is None and rinput.list_rectwv_coeff is None:
            raise ValueError('No rectwv_coeff nor list_rectwv_coeff data have '
                             'been provided')
        elif rinput.rectwv_coeff is not None and \
                rinput.list_rectwv_coeff is not None:
            raise ValueError("rectwv_coeff and list_rectwv_coeff cannot be "
                             "used simultaneously")
        elif rinput.rectwv_coeff is not None:
            list_rectwv_coeff = [rinput.rectwv_coeff] * nimages
        elif rinput.list_rectwv_coeff is not None:
            if len(rinput.list_rectwv_coeff) != nimages:
                raise ValueError("Unexpected number of rectwv_coeff files "
                                 "in list_rectwv_coeff")
            else:
                list_rectwv_coeff = rinput.list_rectwv_coeff
                # check filter and grism are the same in all JSON files
                for item in ['grism', 'filter']:
                    list_values = []
                    for calib in list_rectwv_coeff:
                        list_values.append(calib.tags[item])
                    if len(set(list_values)) != 1:
                        raise ValueError(
                            'list_rectwv_coeff contains coefficients for '
                            'different {}s'.format(item))
        else:
            raise ValueError("Unexpected error!")

        # grism and filter names
        grism_name = list_rectwv_coeff[0].tags['grism']
        filter_name = list_rectwv_coeff[0].tags['filter']

        # compute offsets from WCS info in image headers
        with contextlib.ExitStack() as stack:
            hduls = [
                stack.enter_context(fname.open())
                for fname in rinput.obresult.frames
            ]
            sep_arcsec, spatial_scales = compute_wcs_offsets(hduls)

        sep_pixel = np.round(sep_arcsec / spatial_scales, 6)

        for i in range(nimages):
            self.logger.info(list_rectwv_coeff[i])
        self.logger.info(
            'observation pattern..................: {}'.format(pattern))
        self.logger.info(
            'nsequences...........................: {}'.format(nsequences))
        self.logger.info(
            'offsets (arcsec, from WCS)...........: {}'.format(sep_arcsec))
        self.logger.info(
            'spatial scales (arcsec/pix, from WCS): {}'.format(spatial_scales))
        self.logger.info(
            'spatial scales (pixels, from WCS)....: {}'.format(sep_pixel))

        full_set = pattern * nsequences
        self.logger.info(
            'full set of images...................: {}'.format(full_set))

        # basic parameters to determine useful pixels in the wavelength
        # direction
        dict_rtas = rinput.refine_target_along_slitlet

        valid_keys = [
            'npix_removed_near_ohlines', 'nwidth_medfilt',
            'save_individual_images', 'ab_different_target',
            'vpix_region_a_target', 'vpix_region_a_sky',
            'vpix_region_b_target', 'vpix_region_b_sky',
            'list_valid_wvregions_a', 'list_valid_wvregions_b'
        ]
        for dumkey in dict_rtas.keys():
            if dumkey not in valid_keys:
                raise ValueError('Unexpected key={}'.format(dumkey))

        if 'vpix_region_a_target' in dict_rtas.keys():
            vpix_region_a_target = dict_rtas['vpix_region_a_target']
        else:
            vpix_region_a_target = None

        if 'vpix_region_a_sky' in dict_rtas.keys():
            vpix_region_a_sky = dict_rtas['vpix_region_a_sky']
        else:
            vpix_region_a_sky = None

        if 'vpix_region_b_target' in dict_rtas.keys():
            vpix_region_b_target = dict_rtas['vpix_region_b_target']
        else:
            vpix_region_b_target = None

        if 'vpix_region_b_sky' in dict_rtas.keys():
            vpix_region_b_sky = dict_rtas['vpix_region_b_sky']
        else:
            vpix_region_b_sky = None

        if 'ab_different_target' in dict_rtas.keys():
            ab_different_target = int(dict_rtas['ab_different_target'])
            if ab_different_target not in [-1, 0, 1, 9]:
                raise ValueError('Invalid ab_different_target={} value'.format(
                    ab_different_target))
        else:
            raise ValueError('Missing ab_different_target value')

        try:
            npix_removed_near_ohlines = \
                int(dict_rtas['npix_removed_near_ohlines'])
        except KeyError:
            npix_removed_near_ohlines = 0
        except ValueError:
            raise ValueError('wrong value: npix_removed_near_ohlines='
                             '{}'.format(
                                 dict_rtas['npix_removed_near_ohlines']))

        if 'list_valid_wvregions_a' in dict_rtas.keys():
            if vpix_region_a_target is None:
                raise ValueError('Unexpected list_valid_wvregions_a when '
                                 'vpix_region_a_target is not set')
            list_valid_wvregions_a = dict_rtas['list_valid_wvregions_a']
        else:
            list_valid_wvregions_a = None

        if 'list_valid_wvregions_b' in dict_rtas.keys():
            if vpix_region_b_target is None:
                raise ValueError('Unexpected list_valid_wvregions_b when '
                                 'vpix_region_b_target is not set')

            list_valid_wvregions_b = dict_rtas['list_valid_wvregions_b']
        else:
            list_valid_wvregions_b = None

        try:
            nwidth_medfilt = int(dict_rtas['nwidth_medfilt'])
        except KeyError:
            nwidth_medfilt = 0
        except ValueError:
            raise ValueError('wrong value: nwidth_medfilt={}'.format(
                dict_rtas['nwidth_medfilt']))

        try:
            save_individual_images = int(dict_rtas['save_individual_images'])
        except KeyError:
            save_individual_images = 0
        except ValueError:
            raise ValueError('wrong value: save_individual_images={}'.format(
                dict_rtas['save_individual_images']))

        self.logger.info(
            'npix_removed_near_ohlines: {}'.format(npix_removed_near_ohlines))
        self.logger.info('nwidth_medfilt: {}'.format(nwidth_medfilt))
        self.logger.info(
            'vpix_region_a_target: {}'.format(vpix_region_a_target))
        self.logger.info(
            'vpix_region_b_target: {}'.format(vpix_region_b_target))
        self.logger.info(
            'list_valid_wvregions_a: {}'.format(list_valid_wvregions_a))
        self.logger.info(
            'list_valid_wvregions_b: {}'.format(list_valid_wvregions_b))

        # build object to proceed with bpm, bias, dark and flat
        flow = self.init_filters(rinput)

        # basic reduction, rectification and wavelength calibration of
        # all the individual images
        list_reduced_mos_images = []
        self.logger.info('starting reduction of individual images')
        for i, char in enumerate(full_set):
            frame = rinput.obresult.frames[i]
            self.logger.info('image {} ({} of {})'.format(
                char, i + 1, nimages))
            self.logger.info('image: {}'.format(frame.filename))
            with frame.open() as f:
                base_header = f[0].header.copy()
                data = f[0].data.astype('float32')
            grism_name_ = base_header['grism']
            if grism_name_ != grism_name:
                raise ValueError('Incompatible grism name in '
                                 'rectwv_coeff.json file and FITS image')
            filter_name_ = base_header['filter']
            if filter_name_ != filter_name:
                raise ValueError('Incompatible filter name in '
                                 'rectwv_coeff.json file and FITS image')
            hdu = fits.PrimaryHDU(data, header=base_header)
            hdu.header['UUID'] = str(uuid.uuid1())
            hdul = fits.HDUList([hdu])
            # basic reduction
            reduced_image = flow(hdul)
            hdr = reduced_image[0].header
            self.set_base_headers(hdr)
            # rectification and wavelength calibration
            reduced_mos_image = apply_rectwv_coeff(reduced_image,
                                                   list_rectwv_coeff[i])
            if save_individual_images != 0:
                self.save_intermediate_img(
                    reduced_mos_image, 'reduced_mos_image_' + char + '_' +
                    frame.filename[:10] + '.fits')
            list_reduced_mos_images.append(reduced_mos_image)

        # intermediate PDF file with crosscorrelation plots
        if self.intermediate_results:
            from matplotlib.backends.backend_pdf import PdfPages
            pdf = PdfPages('crosscorrelation_ab.pdf')
        else:
            pdf = None

        # compute offsets between images
        self.logger.info('computing offsets between individual images')
        first_a = True
        first_b = True
        xisok_a = None
        xisok_b = None
        reference_profile_a = None
        reference_profile_b = None
        refine_a = vpix_region_a_target is not None
        refine_b = vpix_region_b_target is not None
        list_offsets = []
        for i, char in enumerate(full_set):
            if char == 'A' and refine_a:
                refine_image = True
            elif char == 'B' and refine_b:
                refine_image = True
            else:
                refine_image = False
            if refine_image:
                frame = rinput.obresult.frames[i]
                isky = get_isky(i, pattern)
                frame_sky = rinput.obresult.frames[isky]
                self.logger.info('image {} ({} of {})'.format(
                    char, i + 1, nimages))
                self.logger.info('image: {}'.format(frame.filename))
                self.logger.info('(sky): {}'.format(frame_sky.filename))
                data = list_reduced_mos_images[i][0].data.copy()
                data_sky = list_reduced_mos_images[isky][0].data
                data -= data_sky
                base_header = list_reduced_mos_images[i][0].header

                # get useful pixels in the wavelength direction
                if char == 'A' and first_a:
                    xisok_a = useful_mos_xpixels(
                        data,
                        base_header,
                        vpix_region=vpix_region_a_target,
                        npix_removed_near_ohlines=npix_removed_near_ohlines,
                        list_valid_wvregions=list_valid_wvregions_a,
                        debugplot=0)
                elif char == 'B' and first_b:
                    xisok_b = useful_mos_xpixels(
                        data,
                        base_header,
                        vpix_region=vpix_region_b_target,
                        npix_removed_near_ohlines=npix_removed_near_ohlines,
                        list_valid_wvregions=list_valid_wvregions_b,
                        debugplot=0)

                if char == 'A':
                    nsmin = vpix_region_a_target[0]
                    nsmax = vpix_region_a_target[1]
                    xisok = xisok_a
                    if vpix_region_a_sky is not None:
                        nsmin_sky = vpix_region_a_sky[0]
                        nsmax_sky = vpix_region_a_sky[1]
                        skysubtraction = True
                    else:
                        nsmin_sky = None
                        nsmax_sky = None
                        skysubtraction = False
                elif char == 'B':
                    nsmin = vpix_region_b_target[0]
                    nsmax = vpix_region_b_target[1]
                    xisok = xisok_b
                    if vpix_region_b_sky is not None:
                        nsmin_sky = vpix_region_b_sky[0]
                        nsmax_sky = vpix_region_b_sky[1]
                        skysubtraction = True
                    else:
                        nsmin_sky = None
                        nsmax_sky = None
                        skysubtraction = False
                else:
                    raise ValueError('Unexpected char value: {}'.format(char))

                # initial slitlet region
                slitlet2d = data[(nsmin - 1):nsmax, :].copy()
                if skysubtraction:
                    slitlet2d_sky = data[(nsmin_sky - 1):nsmax_sky, :].copy()
                    median_sky = np.median(slitlet2d_sky, axis=0)
                    slitlet2d -= median_sky

                # selected wavelength regions after blocking OH lines
                slitlet2d_blocked = slitlet2d[:, xisok]

                # apply median filter in the X direction
                if nwidth_medfilt > 1:
                    slitlet2d_blocked_smoothed = ndimage.filters.median_filter(
                        slitlet2d_blocked, size=(1, nwidth_medfilt))
                else:
                    slitlet2d_blocked_smoothed = slitlet2d_blocked

                # ---
                # convert to 1D series of spatial profiles (note: this
                # option does not work very well when the signal is low;
                # a simple mean profile does a better job)
                # profile = slitlet2d_blocked_smoothed.transpose().ravel()
                # ---
                profile = np.mean(slitlet2d_blocked_smoothed, axis=1)
                if char == 'A' and first_a:
                    reference_profile_a = profile.copy()
                elif char == 'B' and first_b:
                    reference_profile_b = profile.copy()
                # ---
                # # To write pickle file
                # import pickle
                # with open(frame.filename[:10] + '_profile.pkl', 'wb') as f:
                #     pickle.dump(profile, f)
                # # To read pickle file
                # with open('test.pkl', 'rb') as f:
                #     x = pickle.load(f)
                # ---

                # crosscorrelation to find offset
                naround_zero = (nsmax - nsmin) // 3
                if char == 'A':
                    reference_profile = reference_profile_a
                elif char == 'B':
                    reference_profile = reference_profile_b
                else:
                    raise ValueError('Unexpected char value: {}'.format(char))
                offset, fpeak = periodic_corr1d(
                    sp_reference=reference_profile,
                    sp_offset=profile,
                    remove_mean=False,
                    frac_cosbell=0.10,
                    zero_padding=11,
                    fminmax=None,
                    nfit_peak=5,
                    naround_zero=naround_zero,
                    sp_label='spatial profile',
                    plottitle='Image #{} (type {}), {}'.format(
                        i + 1, char, frame.filename[:10]),
                    pdf=pdf)
                # round to 4 decimal places
                if abs(offset) < 1E-4:
                    offset = 0.0  # avoid -0.0
                else:
                    offset = round(offset, 4)
                list_offsets.append(offset)

                # end of loop
                if char == 'A' and first_a:
                    first_a = False
                elif char == 'B' and first_b:
                    first_b = False
            else:
                list_offsets.append(0.0)
        self.logger.info('computed offsets: {}'.format(list_offsets))

        self.logger.info('correcting vertical offsets between individual '
                         'images')
        list_a = []
        list_b = []
        for i, (char, offset) in enumerate(zip(full_set, list_offsets)):
            frame = rinput.obresult.frames[i]
            self.logger.info('image {} ({} of {})'.format(
                char, i + 1, nimages))
            self.logger.info('image: {}'.format(frame.filename))
            reduced_mos_image = list_reduced_mos_images[i]
            data = reduced_mos_image[0].data
            base_header = reduced_mos_image[0].header
            self.logger.info(
                'correcting vertical offset (pixesl): {}'.format(offset))
            if offset != 0:
                reduced_mos_image[0].data = shift_image2d(
                    data, yoffset=-offset).astype('float32')
            base_header['HISTORY'] = 'Applying voffset_pix {}'.format(offset)
            if save_individual_images != 0:
                self.save_intermediate_img(
                    reduced_mos_image, 'reduced_mos_image_refined_' + char +
                    '_' + frame.filename[:10] + '.fits')

            # store reduced_mos_image
            if char == 'A':
                list_a.append(reduced_mos_image)
            elif char == 'B':
                list_b.append(reduced_mos_image)
            else:
                raise ValueError('Unexpected char value: {}'.format(char))

        # combination method
        method = getattr(combine, rinput.method)
        method_kwargs = rinput.method_kwargs

        # final combination of A images
        self.logger.info('combining individual A images')
        reduced_mos_image_a = combine_imgs(list_a,
                                           method=method,
                                           method_kwargs=method_kwargs,
                                           errors=False,
                                           prolog=None)
        self.save_intermediate_img(reduced_mos_image_a,
                                   'reduced_mos_image_a.fits')

        # final combination of B images
        self.logger.info('combining individual B images')
        reduced_mos_image_b = combine_imgs(list_b,
                                           method=method,
                                           method_kwargs=method_kwargs,
                                           errors=False,
                                           prolog=None)
        self.save_intermediate_img(reduced_mos_image_b,
                                   'reduced_mos_image_b.fits')

        self.logger.info('mixing A and B spectra')
        header_a = reduced_mos_image_a[0].header
        header_b = reduced_mos_image_b[0].header
        data_a = reduced_mos_image_a[0].data.astype('float32')
        data_b = reduced_mos_image_b[0].data.astype('float32')

        reduced_mos_abba_data = data_a - data_b

        # update reduced mos image header
        reduced_mos_abba = self.create_mos_abba_image(rinput,
                                                      dict_rtas,
                                                      reduced_mos_abba_data,
                                                      header_a,
                                                      header_b,
                                                      pattern,
                                                      full_set,
                                                      list_offsets,
                                                      voffset_pix=0)

        # combine A and B data by shifting B on top of A
        if abs(ab_different_target) == 0:
            len_prof_a = len(reference_profile_a)
            len_prof_b = len(reference_profile_b)
            if len_prof_a == len_prof_b:
                reference_profile = reference_profile_a
                profile = reference_profile_b
                naround_zero = len_prof_a // 3
            elif len_prof_a > len_prof_b:
                ndiff = len_prof_a - len_prof_b
                reference_profile = reference_profile_a
                profile = np.concatenate(
                    (reference_profile_b, np.zeros(ndiff, dtype='float')))
                naround_zero = len_prof_a // 3
            else:
                ndiff = len_prof_b - len_prof_a
                reference_profile = np.concatenate(
                    (reference_profile_a, np.zeros(ndiff, dtype='float')))
                profile = reference_profile_b
                naround_zero = len_prof_b // 3
            offset, fpeak = periodic_corr1d(
                sp_reference=reference_profile,
                sp_offset=profile,
                remove_mean=False,
                frac_cosbell=0.10,
                zero_padding=11,
                fminmax=None,
                nfit_peak=5,
                naround_zero=naround_zero,
                sp_label='spatial profile',
                plottitle='Comparison of A and B profiles',
                pdf=pdf)
            voffset_pix = vpix_region_b_target[0] - vpix_region_a_target[0]
            voffset_pix += offset
        elif abs(ab_different_target) == 1:
            # apply nominal offset (from WCS info) between first A
            # and first B
            voffset_pix = sep_pixel[1] * ab_different_target
        elif ab_different_target == 9:
            voffset_pix = None
        else:
            raise ValueError(
                'Invalid ab_different_target={}'.format(ab_different_target))

        # close output PDF file
        if pdf is not None:
            pdf.close()

        if voffset_pix is not None:
            self.logger.info(
                'correcting vertical offset (pixesl): {}'.format(voffset_pix))
            shifted_a_minus_b_data = shift_image2d(
                reduced_mos_abba_data,
                yoffset=-voffset_pix,
            ).astype('float32')
            reduced_mos_abba_combined_data = \
                reduced_mos_abba_data - shifted_a_minus_b_data
            # scale signal to exposure of a single image
            reduced_mos_abba_combined_data /= 2.0
        else:
            reduced_mos_abba_combined_data = None

        # update reduced mos combined image header
        reduced_mos_abba_combined = self.create_mos_abba_image(
            rinput, dict_rtas, reduced_mos_abba_combined_data, header_a,
            header_b, pattern, full_set, list_offsets, voffset_pix)

        # ds9 region files (to be saved in the work directory)
        if self.intermediate_results:
            save_four_ds9(list_rectwv_coeff[0])
            save_spectral_lines_ds9(list_rectwv_coeff[0])

        # compute median spectra employing the useful region of the
        # rectified image
        if self.intermediate_results:
            for imode, outfile in enumerate([
                    'median_spectra_full', 'median_spectra_slitlets',
                    'median_spectrum_slitlets'
            ]):
                median_image = median_slitlets_rectified(reduced_mos_abba,
                                                         mode=imode)
                self.save_intermediate_img(median_image, outfile + '.fits')

        # save results in results directory
        self.logger.info('end rect.+wavecal. reduction of ABBA spectra')
        result = self.create_result(
            reduced_mos_abba=reduced_mos_abba,
            reduced_mos_abba_combined=reduced_mos_abba_combined)
        return result

    def create_mos_abba_image(self, rinput, dict_rtas, reduced_mos_abba_data,
                              header_a, header_b, pattern, full_set,
                              list_offsets, voffset_pix):
        with contextlib.ExitStack() as stack:
            hduls = [
                stack.enter_context(fname.open())
                for fname in rinput.obresult.frames
            ]
            # Copy header of first image
            base_header = hduls[0][0].header.copy()

            hdu = fits.PrimaryHDU(reduced_mos_abba_data, header=base_header)
            self.set_base_headers(hdu.header)

            # check consistency of wavelength calibration paramenters
            for param in ['crpix1', 'crval1', 'cdelt1']:
                if header_a[param] != header_b[param]:
                    raise ValueError(
                        'Headers of A and B images have different '
                        'values of {}'.format(param))
            self.logger.debug('update result header')
            crpix1 = header_a['crpix1']
            crval1 = header_a['crval1']
            cdelt1 = header_a['cdelt1']

            # update wavelength calibration in FITS header
            for keyword in ['crval1', 'crpix1', 'crval2', 'crpix2']:
                if keyword in hdu.header:
                    hdu.header.remove(keyword)
            hdu.header['crpix1'] = (crpix1, 'reference pixel')
            hdu.header['crval1'] = (crval1, 'central wavelength at crpix1')
            hdu.header['cdelt1'] = (cdelt1,
                                    'linear dispersion (Angstrom/pixel)')
            hdu.header['cunit1'] = ('Angstrom', 'units along axis1')
            hdu.header['ctype1'] = 'WAVELENGTH'
            hdu.header['crpix2'] = (0.0, 'reference pixel')
            hdu.header['crval2'] = (0.0, 'central value at crpix2')
            hdu.header['cdelt2'] = (1.0, 'increment')
            hdu.header['ctype2'] = 'PIXEL'
            hdu.header['cunit2'] = ('Pixel', 'units along axis2')
            for keyword in [
                    'cd1_1', 'cd1_2', 'cd2_1', 'cd2_2', 'PCD1_1', 'PCD1_2',
                    'PCD2_1', 'PCD2_2', 'PCRPIX1', 'PCRPIX2'
            ]:
                if keyword in hdu.header:
                    hdu.header.remove(keyword)

            # update additional keywords
            hdu.header['UUID'] = str(uuid.uuid1())
            hdu.header['OBSMODE'] = pattern + ' pattern'
            hdu.header['TSUTC2'] = hduls[-1][0].header['TSUTC2']
            hdu.header['NUM-NCOM'] = (len(hduls), 'Number of combined frames')

            # update history
            hdu.header['HISTORY'] = "Processed " + pattern + " pattern"
            hdu.header['HISTORY'] = '--- Reduction of A images ---'
            for line in header_a['HISTORY']:
                hdu.header['HISTORY'] = line
            hdu.header['HISTORY'] = '--- Reduction of B images ---'
            for line in header_b['HISTORY']:
                hdu.header['HISTORY'] = line
            hdu.header['HISTORY'] = '--- Combination of ABBA images ---'
            for key in dict_rtas:
                hdu.header['HISTORY'] = '{}: {}'.format(key, dict_rtas[key])
            dm = emirdrp.datamodel.EmirDataModel()
            for img, key, offset in zip(hduls, full_set, list_offsets):
                imgid = dm.get_imgid(img)
                hdu.header['HISTORY'] = \
                    "Image '{}' is '{}', with voffset_pix {}".format(
                        imgid, key, offset)

        if voffset_pix is not None and voffset_pix != 0:
            hdu.header['HISTORY'] = '--- Combination of AB spectra ---'
            hdu.header['HISTORY'] = "voffset_pix between A and B {}".format(
                voffset_pix)
        hdu.header.add_history('--- rinput.stored() (BEGIN) ---')
        for item in rinput.stored():
            value = getattr(rinput, item)
            cline = '{}: {}'.format(item, value)
            hdu.header.add_history(cline)
        hdu.header.add_history('--- rinput.stored() (END) ---')
        result = fits.HDUList([hdu])
        return result

    def set_base_headers(self, hdr):
        newhdr = super(ABBASpectraRectwv, self).set_base_headers(hdr)
        # Update EXP to 0
        newhdr['EXP'] = 0
        return newhdr

    def extract_tags_from_obsres(self, obsres, tag_keys):
        # this function is necessary to make use of recipe requirements
        # that are provided as lists
        final_tags = []
        for frame in obsres.frames:
            ref_img = frame.open()
            tag = self.extract_tags_from_ref(ref_img,
                                             tag_keys,
                                             base=obsres.labels)
            final_tags.append(tag)
        return final_tags
Beispiel #24
0
class BarDetectionRecipe(EmirRecipe):

    # Recipe Requirements
    #
    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    master_sky = reqs.MasterSkyRequirement()

    bars_nominal_positions = Requirement(prods.NominalPositions,
                                         'Nominal positions of the bars')
    median_filter_size = Parameter(5, 'Size of the median box')
    average_box_row_size = Parameter(
        7, 'Number of rows to average for fine centering (odd)')
    average_box_col_size = Parameter(
        21, 'Number of columns to extract for fine centering (odd)')
    fit_peak_npoints = Parameter(
        3, 'Number of points to use for fitting the peak (odd)')

    # Recipe Products
    frame = Result(prods.ProcessedImage)
    # derivative = Result(prods.ProcessedImage)
    slits = Result(tarray.ArrayType)
    positions3 = Result(tarray.ArrayType)
    positions5 = Result(tarray.ArrayType)
    positions7 = Result(tarray.ArrayType)
    positions9 = Result(tarray.ArrayType)
    DTU = Result(tarray.ArrayType)
    ROTANG = Result(float)
    TSUTC1 = Result(float)
    csupos = Result(tarray.ArrayType)
    csusens = Result(tarray.ArrayType)

    def run(self, rinput):
        self.logger.info('starting processing for bars detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        self.save_intermediate_img(hdulist, 'reduced_image.fits')

        try:
            rotang = hdr['ROTANG']
            tsutc1 = hdr['TSUTC1']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
            csupos = datamodel.get_csup_from_header(hdr)
            if len(csupos) != 2 * EMIR_NBARS:
                raise RecipeError('Number of CSUPOS != 2 * NBARS')
            csusens = datamodel.get_cs_from_header(hdr)

        except KeyError as error:
            self.logger.error(error)
            raise RecipeError(error)

        self.logger.debug('start finding bars')
        allpos, slits = find_bars(
            hdulist,
            rinput.bars_nominal_positions,
            csupos,
            dtur,
            average_box_row_size=rinput.average_box_row_size,
            average_box_col_size=rinput.average_box_col_size,
            fit_peak_npoints=rinput.fit_peak_npoints,
            median_filter_size=rinput.median_filter_size,
            logger=self.logger)

        self.logger.debug('end finding bars')

        if self.intermediate_results:
            with open('ds9.reg', 'w') as ds9reg:
                slits_to_ds9_reg(ds9reg, slits)

        result = self.create_result(
            frame=hdulist,
            slits=slits,
            positions9=allpos[9],
            positions7=allpos[7],
            positions5=allpos[5],
            positions3=allpos[3],
            DTU=dtub,
            ROTANG=rotang,
            TSUTC1=tsutc1,
            csupos=csupos,
            csusens=csusens,
        )
        return result
Beispiel #25
0
class BarDetectionRecipe(EmirRecipe):

    # Recipe Requirements
    #
    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    master_sky = reqs.MasterSkyRequirement()

    bars_nominal_positions = Requirement(prods.CoordinateList2DType,
                                         'Nominal positions of the bars')
    median_filter_size = Parameter(5, 'Size of the median box')
    canny_sigma = Parameter(3.0, 'Sigma for the canny algorithm')
    canny_high_threshold = Parameter(0.04,
                                     'High threshold for the canny algorithm')
    canny_low_threshold = Parameter(0.01,
                                    'High threshold for the canny algorithm')

    # Recipe Results
    frame = Result(prods.ProcessedImage)
    positions = Result(tarray.ArrayType)
    DTU = Result(tarray.ArrayType)
    ROTANG = Result(float)
    csupos = Result(tarray.ArrayType)
    csusens = Result(tarray.ArrayType)
    param_median_filter_size = Result(float)
    param_canny_high_threshold = Result(float)
    param_canny_low_threshold = Result(float)

    def run(self, rinput):

        logger = logging.getLogger('numina.recipes.emir')

        logger.info('starting processing for bars detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        try:
            rotang = hdr['ROTANG']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
            csupos = datamodel.get_csup_from_header(hdr)
            csusens = datamodel.get_cs_from_header(hdr)

        except KeyError as error:
            logger.error(error)
            raise numina.exceptions.RecipeError(error)

        logger.debug('finding bars')

        arr = hdulist[0].data

        # Median filter
        logger.debug('median filtering')
        mfilter_size = rinput.median_filter_size

        arr_median = median_filter(arr, size=mfilter_size)

        # Image is mapped between 0 and 1
        # for the full range [0: 2**16]
        logger.debug('image scaling to 0-1')
        arr_grey = normalize_raw(arr_median)

        # Find borders
        logger.debug('find borders')
        canny_sigma = rinput.canny_sigma
        # These threshols corespond roughly with
        # value x (2**16 - 1)
        high_threshold = rinput.canny_high_threshold
        low_threshold = rinput.canny_low_threshold

        edges = canny(arr_grey,
                      sigma=canny_sigma,
                      high_threshold=high_threshold,
                      low_threshold=low_threshold)

        # Number or rows used
        # These other parameters cab be tuned also
        total = 5
        maxdist = 1.0
        bstart = 100
        bend = 1900

        positions = []
        nt = total // 2

        xfac = dtur[0] / EMIR_PIXSCALE
        yfac = -dtur[1] / EMIR_PIXSCALE

        vec = [yfac, xfac]
        logger.debug('DTU shift is %s', vec)

        # Based on the 'edges image'
        # and the table of approx positions of the slits
        barstab = rinput.bars_nominal_positions

        # Currently, we only use fields 0 and 2
        # of the nominal positions file

        for coords in barstab:
            lbarid = int(coords[0])
            rbarid = lbarid + 55
            ref_y_coor = coords[2] + vec[1]
            prow = coor_to_pix_1d(ref_y_coor) - 1
            fits_row = prow + 1  # FITS pixel index

            logger.debug('looking for bars with ids %d - %d', lbarid, rbarid)
            logger.debug('reference y position is Y %7.2f', ref_y_coor)
            # Find the position of each bar

            bpos = find_position(edges, prow, bstart, bend, total)

            nbars_found = len(bpos)

            # If no bar is found, append and empty token
            if nbars_found == 0:
                logger.debug('bars %d, %d not found at row %d', lbarid, rbarid,
                             fits_row)
                thisres1 = (lbarid, fits_row, 0, 0, 1)
                thisres2 = (rbarid, fits_row, 0, 0, 1)

            elif nbars_found == 2:

                # Order values by increasing X
                centl, centr = sorted(bpos, key=lambda cen: cen[0])
                c1 = centl[0]
                c2 = centr[0]

                logger.debug('bars found  at row %d between %7.2f - %7.2f',
                             fits_row, c1, c2)
                # Compute FWHM of the collapsed profile

                cslit = arr_grey[prow - nt:prow + nt + 1, :]
                pslit = cslit.mean(axis=0)

                # Add 1 to return FITS coordinates
                epos, epos_f, error = locate_bar_l(pslit, c1)
                thisres1 = lbarid, fits_row, epos + 1, epos_f + 1, error

                epos, epos_f, error = locate_bar_r(pslit, c2)
                thisres2 = rbarid, fits_row, epos + 1, epos_f + 1, error

            elif nbars_found == 1:
                logger.warning(
                    'only 1 edge found  at row %d, not yet implemented',
                    fits_row)
                thisres1 = (lbarid, fits_row, 0, 0, 1)
                thisres2 = (rbarid, fits_row, 0, 0, 1)

            else:
                logger.warning(
                    '3 or more edges found  at row %d, not yet implemented',
                    fits_row)
                thisres1 = (lbarid, fits_row, 0, 0, 1)
                thisres2 = (rbarid, fits_row, 0, 0, 1)

            positions.append(thisres1)
            positions.append(thisres2)

        logger.debug('end finding bars')
        result = self.create_result(
            frame=hdulist,
            positions=positions,
            DTU=dtub,
            ROTANG=rotang,
            csupos=csupos,
            csusens=csusens,
            param_median_filter_size=rinput.median_filter_size,
            param_canny_high_threshold=rinput.canny_high_threshold,
            param_canny_low_threshold=rinput.canny_low_threshold)
        return result
Beispiel #26
0
class TestSlitDetectionRecipe(EmirRecipe):

    # Recipe Requirements
    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    master_sky = reqs.MasterSkyRequirement()

    median_filter_size = Parameter(5, 'Size of the median box')
    canny_sigma = Parameter(3.0, 'Sigma for the canny algorithm')
    canny_high_threshold = Parameter(0.04, 'High threshold for the Canny algorithm')
    canny_low_threshold = Parameter(0.01, 'High threshold for the Canny algorithm')

    # Recipe Results
    frame = Result(prods.ProcessedImage)
    slitstable = Result(tarray.ArrayType)
    DTU = Result(tarray.ArrayType)
    ROTANG = Result(float)
    DETPA = Result(float)
    DTUPA = Result(float)

    def run(self, rinput):
        self.logger.info('starting slit processing')

        self.logger.info('basic image reduction')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)
        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        try:
            rotang = hdr['ROTANG']
            detpa = hdr['DETPA']
            dtupa = hdr['DTUPA']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)

        except KeyError as error:
            self.logger.error(error)
            raise RecipeError(error)

        self.logger.debug('finding slits')


        # Filter values below 0.0
        self.logger.debug('Filter values below 0')
        data1 = hdulist[0].data[:]

        data1[data1 < 0.0] = 0.0
        # First, prefilter with median
        median_filter_size = rinput.median_filter_size
        canny_sigma = rinput.canny_sigma

        self.logger.debug('Median filter with box %d', median_filter_size)
        data2 = median_filter(data1, size=median_filter_size)

        # Grey level image
        img_grey = normalize_raw(data2)

        # Find edges with Canny
        self.logger.debug('Find edges, Canny sigma %f', canny_sigma)
        # These thresholds corespond roughly with
        # value x (2**16 - 1)
        high_threshold = rinput.canny_high_threshold
        low_threshold = rinput.canny_low_threshold
        self.logger.debug('Find edges, Canny high threshold %f', high_threshold)
        self.logger.debug('Find edges, Canny low threshold %f', low_threshold)
        edges = canny(img_grey, sigma=canny_sigma,
                      high_threshold=high_threshold,
                      low_threshold=low_threshold)
        
        # Fill edges
        self.logger.debug('Fill holes')
        # I do a dilation and erosion to fill
        # possible holes in 'edges'
        fill = ndimage.binary_dilation(edges)
        fill2 = ndimage.binary_fill_holes(fill)
        fill_slits = ndimage.binary_erosion(fill2)

        self.logger.debug('Label objects')
        label_objects, nb_labels = ndimage.label(fill_slits)
        self.logger.debug('%d objects found', nb_labels)
        ids = list(six.moves.range(1, nb_labels + 1))

        self.logger.debug('Find regions and centers')
        regions = ndimage.find_objects(label_objects)
        centers = ndimage.center_of_mass(data2, labels=label_objects,
                                         index=ids
                                         )

        table = char_slit(data2, regions,
                          slit_size_ratio=-1.0
                          )

        result = self.create_result(frame=hdulist,
                                    slitstable=table,
                                    DTU=dtub,
                                    ROTANG=rotang,
                                    DETPA=detpa,
                                    DTUPA=dtupa
                                    )

        return result
Beispiel #27
0
class SpecFlatLowFreq(EmirRecipe):
    """Process continuum exposures of continuum lamp (lamp ON-OFF)

    """

    logger = logging.getLogger(__name__)

    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_flat = reqs.MasterSpectralFlatFieldRequirement()
    rectwv_coeff = reqs.RectWaveCoeffRequirement(optional=True)
    master_rectwv = reqs.MasterRectWaveRequirement(optional=True)

    # note that 'sum' is not allowed as combination method
    method = Parameter('sigmaclip',
                       description='Combination method',
                       choices=['mean', 'median', 'sigmaclip'])
    method_kwargs = Parameter(dict(),
                              description='Arguments for combination method',
                              optional=True)
    minimum_slitlet_width_mm = Parameter(
        float(EMIR_MINIMUM_SLITLET_WIDTH_MM),
        description='Minimum width (mm) for a valid slitlet',
        optional=True)
    maximum_slitlet_width_mm = Parameter(
        float(EMIR_MAXIMUM_SLITLET_WIDTH_MM),
        description='Maximum width (mm) for a valid slitlet',
        optional=True)
    global_integer_offset_x_pix = Parameter(
        0,
        description='Global offset (pixels) in wavelength direction (integer)',
        optional=True)
    global_integer_offset_y_pix = Parameter(
        0,
        description='Global offset (pixels) in spatial direction (integer)',
        optional=True)
    nwindow_x_median = Parameter(
        301,
        description='Window size to smooth the flatfield in the spectral '
        'direction',
        optional=True)
    nwindow_y_median = Parameter(
        11,
        description='Window size to smooth the flatfield in the spatial '
        'direction',
        optional=True)
    minimum_fraction = Parameter(
        0.01,
        description='Minimum useful flatfielding value (fraction of maximum)',
        optional=True)
    minimum_value_in_output = Parameter(
        0.01,
        description='Minimum value allowed in output: pixels below this value'
        ' are set to 1.0',
        optional=True)
    maximum_value_in_output = Parameter(
        10.0,
        description='Maximum value allowed in output: pixels above this value'
        ' are set to 1.0',
        optional=True)
    debugplot = Parameter(0, description='Debugger parameter', optional=True)

    reduced_flatlowfreq = Result(prods.MasterSpectralFlat)

    def run(self, rinput):
        self.logger.info('starting generation of flatlowfreq')

        self.logger.info('rectwv_coeff..........................: {}'.format(
            rinput.rectwv_coeff))
        self.logger.info('master_rectwv.........................: {}'.format(
            rinput.master_rectwv))
        self.logger.info('Minimum slitlet width (mm)............: {}'.format(
            rinput.minimum_slitlet_width_mm))
        self.logger.info('Maximum slitlet width (mm)............: {}'.format(
            rinput.maximum_slitlet_width_mm))
        self.logger.info('Global offset X direction (pixels)....: {}'.format(
            rinput.global_integer_offset_x_pix))
        self.logger.info('Global offset Y direction (pixels)....: {}'.format(
            rinput.global_integer_offset_y_pix))
        self.logger.info('nwindow_x_median......................: {}'.format(
            rinput.nwindow_x_median))
        self.logger.info('nwindow_y_median......................: {}'.format(
            rinput.nwindow_y_median))
        self.logger.info('Minimum fraction......................: {}'.format(
            rinput.minimum_fraction))
        self.logger.info('Minimum value in output...............: {}'.format(
            rinput.minimum_value_in_output))
        self.logger.info('Maximum value in output...............: {}'.format(
            rinput.maximum_value_in_output))

        # check rectification and wavelength calibration information
        if rinput.master_rectwv is None and rinput.rectwv_coeff is None:
            raise ValueError('No master_rectwv nor rectwv_coeff data have '
                             'been provided')
        elif rinput.master_rectwv is not None and \
                rinput.rectwv_coeff is not None:
            self.logger.warning('rectwv_coeff will be used instead of '
                                'master_rectwv')
        if rinput.rectwv_coeff is not None and \
                (rinput.global_integer_offset_x_pix != 0 or
                 rinput.global_integer_offset_y_pix != 0):
            raise ValueError('global_integer_offsets cannot be used '
                             'simultaneously with rectwv_coeff')

        # check headers to detect lamp status (on/off)
        list_lampincd = []
        for fname in rinput.obresult.frames:
            with fname.open() as f:
                list_lampincd.append(f[0].header['lampincd'])

        # check number of images
        nimages = len(rinput.obresult.frames)
        n_on = list_lampincd.count(1)
        n_off = list_lampincd.count(0)
        self.logger.info(
            'Number of images with lamp ON.........: {}'.format(n_on))
        self.logger.info(
            'Number of images with lamp OFF........: {}'.format(n_off))
        self.logger.info(
            'Total number of images................: {}'.format(nimages))
        if n_on == 0:
            raise ValueError('Insufficient number of images with lamp ON')
        if n_on + n_off != nimages:
            raise ValueError('Number of images does not match!')

        # check combination method
        if rinput.method_kwargs == {}:
            method_kwargs = None
        else:
            if rinput.method == 'sigmaclip':
                method_kwargs = rinput.method_kwargs
            else:
                raise ValueError('Unexpected method_kwargs={}'.format(
                    rinput.method_kwargs))

        # build object to proceed with bpm, bias, and dark (not flat)
        flow = self.init_filters(rinput)

        # available combination methods
        method = getattr(combine, rinput.method)

        # basic reduction of images with lamp ON or OFF
        lampmode = {0: 'off', 1: 'on'}
        reduced_image_on = None
        reduced_image_off = None
        for imode in lampmode.keys():
            self.logger.info('starting basic reduction of images with'
                             ' lamp {}'.format(lampmode[imode]))
            tmplist = [
                rinput.obresult.frames[i]
                for i, lampincd in enumerate(list_lampincd)
                if lampincd == imode
            ]
            if len(tmplist) > 0:
                with contextlib.ExitStack() as stack:
                    hduls = [
                        stack.enter_context(fname.open()) for fname in tmplist
                    ]
                    reduced_image = combine_imgs(hduls,
                                                 method=method,
                                                 method_kwargs=method_kwargs,
                                                 errors=False,
                                                 prolog=None)
                if imode == 0:
                    reduced_image_off = flow(reduced_image)
                    hdr = reduced_image_off[0].header
                    self.set_base_headers(hdr)
                    self.save_intermediate_img(reduced_image_off,
                                               'reduced_image_off.fits')
                elif imode == 1:
                    reduced_image_on = flow(reduced_image)
                    hdr = reduced_image_on[0].header
                    self.set_base_headers(hdr)
                    self.save_intermediate_img(reduced_image_on,
                                               'reduced_image_on.fits')
                else:
                    raise ValueError('Unexpected imode={}'.format(imode))

        # computation of ON-OFF
        header_on = reduced_image_on[0].header
        data_on = reduced_image_on[0].data.astype('float32')
        if n_off > 0:
            header_off = reduced_image_off[0].header
            data_off = reduced_image_off[0].data.astype('float32')
        else:
            header_off = None
            data_off = np.zeros_like(data_on)
        reduced_data = data_on - data_off

        # update reduced image header
        reduced_image = self.create_reduced_image(rinput, reduced_data,
                                                  header_on, header_off,
                                                  list_lampincd)

        # save intermediate image in work directory
        self.save_intermediate_img(reduced_image, 'reduced_image.fits')

        # define rectification and wavelength calibration coefficients
        if rinput.rectwv_coeff is None:
            rectwv_coeff = rectwv_coeff_from_mos_library(
                reduced_image, rinput.master_rectwv)
            # set global offsets
            rectwv_coeff.global_integer_offset_x_pix = \
                rinput.global_integer_offset_x_pix
            rectwv_coeff.global_integer_offset_y_pix = \
                rinput.global_integer_offset_y_pix
        else:
            rectwv_coeff = rinput.rectwv_coeff
        # save as JSON in work directory
        self.save_structured_as_json(rectwv_coeff, 'rectwv_coeff.json')
        # ds9 region files (to be saved in the work directory)
        if self.intermediate_results:
            save_four_ds9(rectwv_coeff)
            save_spectral_lines_ds9(rectwv_coeff)

        # apply global offsets (to both, the original and the cleaned version)
        image2d = apply_integer_offsets(
            image2d=reduced_data,
            offx=rectwv_coeff.global_integer_offset_x_pix,
            offy=rectwv_coeff.global_integer_offset_y_pix)

        # load CSU configuration
        csu_conf = CsuConfiguration.define_from_header(reduced_image[0].header)
        # determine (pseudo) longslits
        dict_longslits = csu_conf.pseudo_longslits()

        # valid slitlet numbers
        list_valid_islitlets = list(range(1, EMIR_NBARS + 1))
        for idel in rectwv_coeff.missing_slitlets:
            self.logger.info('-> Removing slitlet (not defined): ' + str(idel))
            list_valid_islitlets.remove(idel)
        # filter out slitlets with widths outside valid range
        list_outside_valid_width = []
        for islitlet in list_valid_islitlets:
            slitwidth = csu_conf.csu_bar_slit_width(islitlet)
            if (slitwidth < rinput.minimum_slitlet_width_mm) or \
                    (slitwidth > rinput.maximum_slitlet_width_mm):
                list_outside_valid_width.append(islitlet)
                self.logger.info('-> Removing slitlet (width out of range): ' +
                                 str(islitlet))
        if len(list_outside_valid_width) > 0:
            for idel in list_outside_valid_width:
                list_valid_islitlets.remove(idel)

        # initialize rectified image
        image2d_flatfielded = np.zeros((EMIR_NAXIS2, EMIR_NAXIS1))

        # main loop
        grism_name = rectwv_coeff.tags['grism']
        filter_name = rectwv_coeff.tags['filter']
        cout = '0'
        debugplot = rinput.debugplot
        for islitlet in list(range(1, EMIR_NBARS + 1)):
            if islitlet in list_valid_islitlets:
                # define Slitlet2D object
                slt = Slitlet2D(islitlet=islitlet,
                                rectwv_coeff=rectwv_coeff,
                                debugplot=debugplot)
                if abs(slt.debugplot) > 10:
                    print(slt)

                # extract (distorted) slitlet from the initial image
                slitlet2d = slt.extract_slitlet2d(image_2k2k=image2d,
                                                  subtitle='original image')

                # rectify slitlet
                slitlet2d_rect = slt.rectify(
                    slitlet2d=slitlet2d,
                    resampling=2,
                    subtitle='original (cleaned) rectified')
                naxis2_slitlet2d, naxis1_slitlet2d = slitlet2d_rect.shape

                if naxis1_slitlet2d != EMIR_NAXIS1:
                    print('naxis1_slitlet2d: ', naxis1_slitlet2d)
                    print('EMIR_NAXIS1.....: ', EMIR_NAXIS1)
                    raise ValueError("Unexpected naxis1_slitlet2d")

                # get useful slitlet region (use boundaries)
                spectrail = slt.list_spectrails[0]
                yy0 = slt.corr_yrect_a + \
                      slt.corr_yrect_b * spectrail(slt.x0_reference)
                ii1 = int(yy0 + 0.5) - slt.bb_ns1_orig
                spectrail = slt.list_spectrails[2]
                yy0 = slt.corr_yrect_a + \
                      slt.corr_yrect_b * spectrail(slt.x0_reference)
                ii2 = int(yy0 + 0.5) - slt.bb_ns1_orig

                # median spectrum
                sp_collapsed = np.median(slitlet2d_rect[ii1:(ii2 + 1), :],
                                         axis=0)

                # smooth median spectrum along the spectral direction
                sp_median = ndimage.median_filter(sp_collapsed,
                                                  5,
                                                  mode='nearest')

                ymax_spmedian = sp_median.max()
                y_threshold = ymax_spmedian * rinput.minimum_fraction
                lremove = np.where(sp_median < y_threshold)
                sp_median[lremove] = 0.0

                if abs(slt.debugplot) % 10 != 0:
                    xaxis1 = np.arange(1, naxis1_slitlet2d + 1)
                    title = 'Slitlet#' + str(islitlet) + ' (median spectrum)'
                    ax = ximplotxy(xaxis1,
                                   sp_collapsed,
                                   title=title,
                                   show=False,
                                   **{'label': 'collapsed spectrum'})
                    ax.plot(xaxis1, sp_median, label='fitted spectrum')
                    ax.plot([1, naxis1_slitlet2d],
                            2 * [y_threshold],
                            label='threshold')
                    # ax.plot(xknots, yknots, 'o', label='knots')
                    ax.legend()
                    ax.set_ylim(-0.05 * ymax_spmedian, 1.05 * ymax_spmedian)
                    pause_debugplot(slt.debugplot,
                                    pltshow=True,
                                    tight_layout=True)

                # generate rectified slitlet region filled with the
                # median spectrum
                slitlet2d_rect_spmedian = np.tile(sp_median,
                                                  (naxis2_slitlet2d, 1))
                if abs(slt.debugplot) % 10 != 0:
                    slt.ximshow_rectified(
                        slitlet2d_rect=slitlet2d_rect_spmedian,
                        subtitle='rectified, filled with median spectrum')

                # compute smooth surface
                # clipped region
                slitlet2d_rect_clipped = slitlet2d_rect_spmedian.copy()
                slitlet2d_rect_clipped[:(ii1 - 1), :] = 0.0
                slitlet2d_rect_clipped[(ii2 + 2):, :] = 0.0
                # unrectified clipped image
                slitlet2d_unrect_clipped = slt.rectify(
                    slitlet2d=slitlet2d_rect_clipped,
                    resampling=2,
                    inverse=True,
                    subtitle='unrectified, filled with median spectrum '
                    '(clipped)')
                # normalize initial slitlet image (avoid division by zero)
                slitlet2d_norm_clipped = np.zeros_like(slitlet2d)
                for j in range(naxis1_slitlet2d):
                    for i in range(naxis2_slitlet2d):
                        den = slitlet2d_unrect_clipped[i, j]
                        if den == 0:
                            slitlet2d_norm_clipped[i, j] = 1.0
                        else:
                            slitlet2d_norm_clipped[i, j] = \
                                slitlet2d[i, j] / den
                # set to 1.0 one additional pixel at each side (since
                # 'den' above is small at the borders and generates wrong
                # bright pixels)
                slitlet2d_norm_clipped = fix_pix_borders(
                    image2d=slitlet2d_norm_clipped,
                    nreplace=1,
                    sought_value=1.0,
                    replacement_value=1.0)
                slitlet2d_norm_clipped = slitlet2d_norm_clipped.transpose()
                slitlet2d_norm_clipped = fix_pix_borders(
                    image2d=slitlet2d_norm_clipped,
                    nreplace=1,
                    sought_value=1.0,
                    replacement_value=1.0)
                slitlet2d_norm_clipped = slitlet2d_norm_clipped.transpose()
                slitlet2d_norm_smooth = ndimage.median_filter(
                    slitlet2d_norm_clipped,
                    size=(rinput.nwindow_y_median, rinput.nwindow_x_median),
                    mode='nearest')

                if abs(slt.debugplot) % 10 != 0:
                    slt.ximshow_unrectified(
                        slitlet2d=slitlet2d_norm_clipped,
                        subtitle='unrectified, pixel-to-pixel (clipped)')
                    slt.ximshow_unrectified(
                        slitlet2d=slitlet2d_norm_smooth,
                        subtitle='unrectified, pixel-to-pixel (smoothed)')

                # ---

                # check for (pseudo) longslit with previous and next slitlet
                imin = dict_longslits[islitlet].imin()
                imax = dict_longslits[islitlet].imax()
                if islitlet > 1:
                    same_slitlet_below = (islitlet - 1) >= imin
                else:
                    same_slitlet_below = False
                if islitlet < EMIR_NBARS:
                    same_slitlet_above = (islitlet + 1) <= imax
                else:
                    same_slitlet_above = False

                for j in range(EMIR_NAXIS1):
                    xchannel = j + 1
                    y0_lower = slt.list_frontiers[0](xchannel)
                    y0_upper = slt.list_frontiers[1](xchannel)
                    n1, n2 = nscan_minmax_frontiers(y0_frontier_lower=y0_lower,
                                                    y0_frontier_upper=y0_upper,
                                                    resize=True)
                    # note that n1 and n2 are scans (ranging from 1 to NAXIS2)
                    nn1 = n1 - slt.bb_ns1_orig + 1
                    nn2 = n2 - slt.bb_ns1_orig + 1
                    image2d_flatfielded[(n1 - 1):n2, j] = \
                        slitlet2d_norm_smooth[(nn1 - 1):nn2, j]

                    # force to 1.0 region around frontiers
                    if not same_slitlet_below:
                        image2d_flatfielded[(n1 - 1):(n1 + 2), j] = 1
                    if not same_slitlet_above:
                        image2d_flatfielded[(n2 - 5):n2, j] = 1
                cout += '.'
            else:
                cout += 'i'

            if islitlet % 10 == 0:
                if cout != 'i':
                    cout = str(islitlet // 10)

            self.logger.info(cout)

        # restore global offsets
        image2d_flatfielded = apply_integer_offsets(
            image2d=image2d_flatfielded,
            offx=-rectwv_coeff.global_integer_offset_x_pix,
            offy=-rectwv_coeff.global_integer_offset_y_pix)

        # set pixels below minimum value to 1.0
        filtered = np.where(
            image2d_flatfielded < rinput.minimum_value_in_output)
        image2d_flatfielded[filtered] = 1.0

        # set pixels above maximum value to 1.0
        filtered = np.where(
            image2d_flatfielded > rinput.maximum_value_in_output)
        image2d_flatfielded[filtered] = 1.0

        # update image header
        reduced_flatlowfreq = self.create_reduced_image(
            rinput, image2d_flatfielded, header_on, header_off, list_lampincd)

        # ds9 region files (to be saved in the work directory)
        if self.intermediate_results:
            save_four_ds9(rectwv_coeff)
            save_spectral_lines_ds9(rectwv_coeff)

        # save results in results directory
        self.logger.info('end of flatlowfreq generation')
        result = self.create_result(reduced_flatlowfreq=reduced_flatlowfreq)
        return result

    def create_reduced_image(self, rinput, reduced_data, header_on, header_off,
                             list_lampincd):
        with contextlib.ExitStack() as stack:
            hduls = [
                stack.enter_context(fname.open())
                for fname in rinput.obresult.frames
            ]
            # Copy header of first image
            base_header = hduls[0][0].header.copy()

            hdu = fits.PrimaryHDU(reduced_data, header=base_header)
            self.set_base_headers(hdu.header)

            self.logger.debug('update result header')
            # update additional keywords
            hdu.header['UUID'] = str(uuid.uuid1())
            hdu.header['OBSMODE'] = 'flatlowfreq'
            hdu.header['TSUTC2'] = hduls[-1][0].header['TSUTC2']
            hdu.header['history'] = "Processed flatlowfreq"
            hdu.header['NUM-NCOM'] = (len(hduls), 'Number of combined frames')

            # update history
            dm = emirdrp.datamodel.EmirDataModel()
            for img, lampincd in zip(hduls, list_lampincd):
                imgid = dm.get_imgid(img)
                hdu.header['HISTORY'] = "Image '{}' has lampincd='{}'".format(
                    imgid, lampincd)
        hdu.header['HISTORY'] = "Processed flatlowfreq"
        hdu.header['HISTORY'] = '--- Reduction of images with lamp ON ---'
        for line in header_on['HISTORY']:
            hdu.header['HISTORY'] = line
        if header_off is not None:
            hdu.header['HISTORY'] = '--- Reduction of images with lamp OFF ---'
            for line in header_off['HISTORY']:
                hdu.header['HISTORY'] = line
        hdu.header.add_history('--- rinput.stored() (BEGIN) ---')
        for item in rinput.stored():
            value = getattr(rinput, item)
            cline = '{}: {}'.format(item, value)
            hdu.header.add_history(cline)
        hdu.header.add_history('--- rinput.stored() (END) ---')

        result = fits.HDUList([hdu])
        return result

    def set_base_headers(self, hdr):
        newhdr = super(SpecFlatLowFreq, self).set_base_headers(hdr)
        # Update EXP to 0
        newhdr['EXP'] = 0
        return newhdr
Beispiel #28
0
class MaskSpectraExtractionRecipe(EmirRecipe):
    '''
    '''

    # Recipe Requirements
    obresult = ObservationResultRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    master_flat = MasterIntensityFlatFieldRequirement()
    master_sky = MasterSkyRequirement()

    median_filter_size = Parameter(5, 'Size of the median box')
    slits_positions = Requirement(ArrayType,
                                  'Positions and widths of the slits')

    frame = Product(DataFrameType)
    rss = Product(DataFrameType)
    regions = Product(ArrayType)

    #slitstable = Product(ArrayType)
    #DTU = Product(ArrayType)
    #ROTANG = Product(float)
    #DETPA = Product(float)
    #DTUPA = Product(float)

    def run(self, rinput):
        _logger.info('starting extraction')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        # First, prefilter with median
        median_filter_size = rinput.median_filter_size

        data1 = hdulist[0].data
        _logger.debug('Median filter with box %d', median_filter_size)
        data2 = median_filter(data1, size=median_filter_size)

        # Normalize input between -1 and +1
        data3 = img_norm(data2)

        # Tracing parameters
        ws = 10
        step = 15
        hs = 15
        tol = 2
        doplot = False
        npol = 5

        _logger.info('Create output images')
        rssdata = numpy.zeros(
            (rinput.slits_positions.shape[0], data3.shape[1]), dtype='float32')

        # FIXME, number of columns depends on polynomial degree
        regiontable = numpy.zeros(
            (rinput.slits_positions.shape[0], 4 + 2 * (npol + 1)),
            dtype='float32')

        count = 0
        # Loop over slits
        for slit_coords in rinput.slits_positions:
            col, y1, y2 = convert_to_(*slit_coords)
            _logger.info('Processing slit in column %i, row1=%i, row2=%i', col,
                         y1, y2)
            xmin, xmax, ymin, ymax, pfit1, pfit2 = ex_region(data3,
                                                             col,
                                                             y1,
                                                             y2,
                                                             step,
                                                             hs,
                                                             ws,
                                                             tol=tol,
                                                             doplot=doplot)

            _logger.info('Spectrum region is %i, %i, %i, %i', xmin, xmax, ymin,
                         ymax)
            try:
                region = data1[ymin:ymax + 1, xmin:xmax + 1]
                rssdata[count, xmin:xmax + 1] = region.mean(axis=0)
            except ValueError as err:
                _logger.error("Error collapsing spectrum: %s", err)
            # IN FITS convention
            _logger.info('Create regions table')
            regiontable[count, :4] = xmin + 1, xmax + 1, ymin + 1, ymax + 1
            #regiontable[count, 4:4 + npol + 1] = pfit1
            #regiontable[count, 4 + npol + 1:] = pfit2
            count += 1

        hdurss = fits.PrimaryHDU(rssdata)

        result = self.create_result(frame=hdulist,
                                    rss=hdurss,
                                    regions=regiontable)

        return result
Beispiel #29
0
class TestMaskRecipe(EmirRecipe):

    # Recipe Requirements
    #
    obresult = ObservationResultRequirement()
    master_bpm = MasterBadPixelMaskRequirement()
    master_bias = MasterBiasRequirement()
    master_dark = MasterDarkRequirement()
    master_flat = MasterIntensityFlatFieldRequirement()
    master_sky = MasterSkyRequirement()

    pinhole_nominal_positions = Requirement(CoordinateList2DType,
                                            'Nominal positions of the pinholes'
                                            )
    shift_coordinates = Parameter(True, 'Use header information to'
                                  ' shift the pinhole positions from (0,0) '
                                  'to X_DTU, Y_DTU')
    box_half_size = Parameter(4, 'Half of the computation box size in pixels')
    recenter = Parameter(True, 'Recenter the pinhole coordinates')
    max_recenter_radius = Parameter(2.0, 'Maximum distance for recentering')

    median_filter_size = Parameter(5, 'Size of the median box')
    canny_sigma = Parameter(3.0, 'Sigma for the canny algorithm')
    obj_min_size = Parameter(200, 'Minimum size of the slit')
    obj_max_size = Parameter(3000, 'Maximum size of the slit')
    slit_size_ratio = Parameter(4.0, 'Minimum ratio between height and width for slits')

    # Recipe Products
    frame = Product(DataFrameType)
    positions = Product(ArrayType)
    positions_alt = Product(ArrayType)
    slitstable = Product(ArrayType)
    DTU = Product(ArrayType)
    filter = Product(str)
    readmode = Product(str)
    ROTANG = Product(float)
    DETPA = Product(float)
    DTUPA = Product(float)
    param_recenter = Product(bool)
    param_max_recenter_radius = Product(float)
    param_box_half_size = Product(float)

    def run(self, rinput):
        _logger.info('starting processing for slit detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        _logger.debug('finding pinholes')

        try:
            filtername = hdr['FILTER']
            readmode = hdr['READMODE']
            rotang = hdr['ROTANG']
            detpa = hdr['DETPA']
            dtupa = hdr['DTUPA']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
        except KeyError as error:
            _logger.error(error)
            raise RecipeError(error)

        if rinput.shift_coordinates:
            xdtur, ydtur, zdtur = dtur
            xfac = xdtur / EMIR_PIXSCALE
            yfac = -ydtur / EMIR_PIXSCALE

            vec = numpy.array([yfac, xfac])
            _logger.info('shift is %s', vec)
            ncenters = rinput.pinhole_nominal_positions + vec
        else:
            _logger.info('using pinhole coordinates as they are')
            ncenters = rinput.pinhole_nominal_positions

        _logger.info('pinhole characterization')
        positions = pinhole_char(
            hdulist[0].data,
            ncenters,
            box=rinput.box_half_size,
            recenter_pinhole=rinput.recenter,
            maxdist=rinput.max_recenter_radius
        )

        _logger.info('alternate pinhole characterization')
        positions_alt = pinhole_char2(
            hdulist[0].data, ncenters,
            recenter_pinhole=rinput.recenter,
            recenter_half_box=rinput.box_half_size,
            recenter_maxdist=rinput.max_recenter_radius
        )

        _logger.debug('finding slits')

        # First, prefilter with median
        median_filter_size = rinput.median_filter_size
        canny_sigma = rinput.canny_sigma
        obj_min_size = rinput.obj_min_size
        obj_max_size = rinput.obj_max_size

        data1 = hdulist[0].data
        _logger.debug('Median filter with box %d', median_filter_size)
        data2 = median_filter(data1, size=median_filter_size)

        # Grey level image
        img_grey = normalize(data2)

        # Find edges with canny
        _logger.debug('Find edges with canny, sigma %d', canny_sigma)
        edges = canny(img_grey, sigma=canny_sigma)

        # Fill edges
        _logger.debug('Fill holes')
        fill_slits = ndimage.binary_fill_holes(edges)

        _logger.debug('Label objects')
        label_objects, nb_labels = ndimage.label(fill_slits)
        _logger.debug('%d objects found', nb_labels)
        # Filter on the area of the labeled region
        # Perhaps we could ignore this filtering and
        # do it later?
        _logger.debug('Filter objects by size')
        # Sizes of regions
        sizes = numpy.bincount(label_objects.ravel())

        _logger.debug('Min size is %d', obj_min_size)
        _logger.debug('Max size is %d', obj_max_size)

        mask_sizes = (sizes > obj_min_size) & (sizes < obj_max_size)

        # Filter out regions
        nids, = numpy.where(mask_sizes)

        mm = numpy.in1d(label_objects, nids)
        mm.shape = label_objects.shape

        fill_slits_clean = numpy.where(mm, 1, 0)

        # and relabel
        _logger.debug('Label filtered objects')
        relabel_objects, nb_labels = ndimage.label(fill_slits_clean)
        _logger.debug('%d objects found after filtering', nb_labels)
        ids = list(six.moves.range(1, nb_labels + 1))

        _logger.debug('Find regions and centers')
        regions = ndimage.find_objects(relabel_objects)
        centers = ndimage.center_of_mass(data2, labels=relabel_objects,
                                         index=ids
                                         )

        table = char_slit(data2, regions,
                          slit_size_ratio=rinput.slit_size_ratio
                          )

        result = self.create_result(frame=hdulist,
                                    positions=positions,
                                    positions_alt=positions_alt,
                                    slitstable=table,
                                    filter=filtername,
                                    DTU=dtub,
                                    readmode=readmode,
                                    ROTANG=rotang,
                                    DETPA=detpa,
                                    DTUPA=dtupa,
                                    param_recenter=rinput.recenter,
                                    param_max_recenter_radius=rinput.max_recenter_radius,
                                    param_box_half_size=rinput.box_half_size
                                    )
        return result
Beispiel #30
0
class MaskImagingRecipe(EmirRecipe):

    # Recipe Requirements
    #
    obresult = reqs.ObservationResultRequirement()
    master_bpm = reqs.MasterBadPixelMaskRequirement()
    master_bias = reqs.MasterBiasRequirement()
    master_dark = reqs.MasterDarkRequirement()
    master_flat = reqs.MasterIntensityFlatFieldRequirement()
    master_sky = reqs.MasterSkyRequirement()

    bars_nominal_positions = Requirement(prods.CoordinateList2DType,
                                         'Nominal positions of the bars')
    median_filter_size = Parameter(5, 'Size of the median box')
    average_box_row_size = Parameter(
        7, 'Number of rows to average for fine centering (odd)')
    average_box_col_size = Parameter(
        21, 'Number of columns to extract for fine centering (odd)')
    fit_peak_npoints = Parameter(
        3, 'Number of points to use for fitting the peak (odd)')

    # Recipe Products
    frame = Result(prods.ProcessedImage)
    # derivative = Result(prods.ProcessedImage)
    slits = Result(tarray.ArrayType)
    positions3 = Result(tarray.ArrayType)
    positions5 = Result(tarray.ArrayType)
    positions7 = Result(tarray.ArrayType)
    positions9 = Result(tarray.ArrayType)
    DTU = Result(tarray.ArrayType)
    ROTANG = Result(float)
    TSUTC1 = Result(float)
    csupos = Result(tarray.ArrayType)
    csusens = Result(tarray.ArrayType)

    def run(self, rinput):
        self.logger.info('starting processing for bars detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        try:
            rotang = hdr['ROTANG']
            tsutc1 = hdr['TSUTC1']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
            csupos = datamodel.get_csup_from_header(hdr)
            csusens = datamodel.get_cs_from_header(hdr)

        except KeyError as error:
            self.logger.error(error)
            raise numina.exceptions.RecipeError(error)

        self.logger.debug('finding bars')
        # Processed array
        arr = hdulist[0].data

        # Median filter of processed array (two times)
        mfilter_size = rinput.median_filter_size

        self.logger.debug('median filtering X, %d columns', mfilter_size)
        arr_median = median_filter(arr, size=(1, mfilter_size))
        self.logger.debug('median filtering X, %d rows', mfilter_size)
        arr_median = median_filter(arr_median, size=(mfilter_size, 1))

        # Median filter of processed array (two times) in the other direction
        # for Y coordinates
        self.logger.debug('median filtering Y, %d rows', mfilter_size)
        arr_median_alt = median_filter(arr, size=(mfilter_size, 1))
        self.logger.debug('median filtering Y, %d columns', mfilter_size)
        arr_median_alt = median_filter(arr_median_alt, size=(1, mfilter_size))

        xfac = dtur[0] / EMIR_PIXSCALE
        yfac = -dtur[1] / EMIR_PIXSCALE

        vec = [yfac, xfac]
        self.logger.debug('DTU shift is %s', vec)

        # and the table of approx positions of the slits
        barstab = rinput.bars_nominal_positions
        # Currently, we only use fields 0 and 2
        # of the nominal positions file

        # Number or rows used
        # These other parameters cab be tuned also
        bstart = 1
        bend = 2047
        self.logger.debug('ignoring columns outside %d - %d', bstart, bend - 1)

        # extract a region to average
        wy = (rinput.average_box_row_size // 2)
        wx = (rinput.average_box_col_size // 2)
        self.logger.debug('extraction window is %d rows, %d cols', 2 * wy + 1,
                          2 * wx + 1)
        # Fit the peak with these points
        wfit = 2 * (rinput.fit_peak_npoints // 2) + 1
        self.logger.debug('fit with %d points', wfit)

        # Minimum threshold
        threshold = 5 * EMIR_RON
        # Savitsky and Golay (1964) filter to compute the X derivative
        # scipy >= xx has a savgol_filter function
        # for compatibility we do it manually

        allpos = {}
        ypos3_kernel = None
        slits = numpy.zeros((EMIR_NBARS, 8), dtype='float')

        self.logger.info('start finding bars')
        for ks in [3, 5, 7, 9]:
            self.logger.debug('kernel size is %d', ks)
            # S and G kernel for derivative
            kw = ks * (ks * ks - 1) / 12.0
            coeffs_are = -numpy.arange((1 - ks) // 2, (ks - 1) // 2 + 1) / kw
            if ks == 3:
                ypos3_kernel = coeffs_are
            self.logger.debug('kernel weights are %s', coeffs_are)

            self.logger.debug('derive image in X direction')
            arr_deriv = convolve1d(arr_median, coeffs_are, axis=-1)
            # Axis 0 is
            #
            self.logger.debug('derive image in Y direction (with kernel=3)')
            arr_deriv_alt = convolve1d(arr_median_alt, ypos3_kernel, axis=0)

            positions = []
            for coords in barstab:
                lbarid = int(coords[0])
                rbarid = lbarid + EMIR_NBARS
                ref_y_coor = coords[1] + vec[1]
                poly_coeffs = coords[2:]
                prow = coor_to_pix_1d(ref_y_coor) - 1
                fits_row = prow + 1  # FITS pixel index

                # A function that returns the center of the bar
                # given its X position
                def center_of_bar(x):
                    # Pixel values are 0-based
                    return polyval(x + 1 - vec[0], poly_coeffs) + vec[1] - 1

                self.logger.debug('looking for bars with ids %d - %d', lbarid,
                                  rbarid)
                self.logger.debug('reference y position is Y %7.2f',
                                  ref_y_coor)

                # if ref_y_coor is outlimits, skip this bar
                # ref_y_coor is in FITS format
                if (ref_y_coor >= 2047) or (ref_y_coor <= 1):
                    self.logger.debug(
                        'reference y position is outlimits, skipping')
                    positions.append([lbarid, fits_row, fits_row, 1, 0, 3])
                    positions.append([rbarid, fits_row, fits_row, 1, 0, 3])
                    continue

                # Left bar
                self.logger.debug('measure left border (%d)', lbarid)

                centery, xpos, fwhm, st = char_bar_peak_l(arr_deriv,
                                                          prow,
                                                          bstart,
                                                          bend,
                                                          threshold,
                                                          center_of_bar,
                                                          wx=wx,
                                                          wy=wy,
                                                          wfit=wfit)
                xpos1 = xpos
                positions.append(
                    [lbarid, centery + 1, fits_row, xpos + 1, fwhm, st])

                # Right bar
                self.logger.debug('measure rigth border (%d)', rbarid)
                centery, xpos, fwhm, st = char_bar_peak_r(arr_deriv,
                                                          prow,
                                                          bstart,
                                                          bend,
                                                          threshold,
                                                          center_of_bar,
                                                          wx=wx,
                                                          wy=wy,
                                                          wfit=wfit)
                positions.append(
                    [rbarid, centery + 1, fits_row, xpos + 1, fwhm, st])
                xpos2 = xpos
                #
                if st == 0:
                    self.logger.debug('measure top-bottom borders')
                    try:
                        y1, y2, statusy = char_bar_height(arr_deriv_alt,
                                                          xpos1,
                                                          xpos2,
                                                          centery,
                                                          threshold,
                                                          wh=35,
                                                          wfit=wfit)
                    except Exception as error:
                        self.logger.warning('Error computing height: %s',
                                            error)
                        statusy = 44

                    if statusy in [0, 40]:
                        # Main border is detected
                        positions[-1][1] = y2 + 1
                        positions[-2][1] = y2 + 1
                    else:
                        # Update status
                        positions[-1][-1] = 4
                        positions[-2][-1] = 4
                else:
                    self.logger.debug('slit is not complete')
                    y1, y2 = 0, 0

                # Update positions

                self.logger.debug(
                    'bar %d centroid-y %9.4f, row %d x-pos %9.4f, FWHM %6.3f, status %d',
                    *positions[-2])
                self.logger.debug(
                    'bar %d centroid-y %9.4f, row %d x-pos %9.4f, FWHM %6.3f, status %d',
                    *positions[-1])

                if ks == 5:
                    slits[lbarid -
                          1] = [xpos1, y2, xpos2, y2, xpos2, y1, xpos1, y1]
                    # FITS coordinates
                    slits[lbarid - 1] += 1.0
                    self.logger.debug('inserting bars %d-%d into "slits"',
                                      lbarid, rbarid)

            allpos[ks] = numpy.asarray(
                positions, dtype='float')  # GCS doesn't like lists of lists

        self.logger.debug('end finding bars')
        result = self.create_result(
            frame=hdulist,
            slits=slits,
            positions9=allpos[9],
            positions7=allpos[7],
            positions5=allpos[5],
            positions3=allpos[3],
            DTU=dtub,
            ROTANG=rotang,
            TSUTC1=tsutc1,
            csupos=csupos,
            csusens=csusens,
        )
        return result