コード例 #1
0
ファイル: bargrad.py プロジェクト: bxy8804/pyemir
    def run(self, rinput):
        self.logger.info('starting processing for bars detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        self.save_intermediate_img(hdulist, 'reduced_image.fits')

        try:
            rotang = hdr['ROTANG']
            tsutc1 = hdr['TSUTC1']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
            csupos = datamodel.get_csup_from_header(hdr)
            if len(csupos) != 2 * EMIR_NBARS:
                raise RecipeError('Number of CSUPOS != 2 * NBARS')
            csusens = datamodel.get_cs_from_header(hdr)

        except KeyError as error:
            self.logger.error(error)
            raise RecipeError(error)

        self.logger.debug('start finding bars')
        allpos, slits = find_bars(
            hdulist,
            rinput.bars_nominal_positions,
            csupos,
            dtur,
            average_box_row_size=rinput.average_box_row_size,
            average_box_col_size=rinput.average_box_col_size,
            fit_peak_npoints=rinput.fit_peak_npoints,
            median_filter_size=rinput.median_filter_size,
            logger=self.logger)

        self.logger.debug('end finding bars')

        if self.intermediate_results:
            with open('ds9.reg', 'w') as ds9reg:
                slits_to_ds9_reg(ds9reg, slits)

        result = self.create_result(
            frame=hdulist,
            slits=slits,
            positions9=allpos[9],
            positions7=allpos[7],
            positions5=allpos[5],
            positions3=allpos[3],
            DTU=dtub,
            ROTANG=rotang,
            TSUTC1=tsutc1,
            csupos=csupos,
            csusens=csusens,
        )
        return result
コード例 #2
0
    def run(self, rinput):
        _logger.info('starting bias reduction')

        iinfo = gather_info_frames(rinput.obresult.frames)

        if iinfo:
            mode = iinfo[0]['readmode']
            if mode.lower() not in EMIR_BIAS_MODES:
                msg = 'readmode %s, is not a bias mode' % mode
                _logger.error(msg)
                raise RecipeError(msg)

        flow = lambda x: x
        hdulist = basic_processing_with_combination(rinput,
                                                    flow,
                                                    method=median,
                                                    errors=False)

        pdata = hdulist[0].data

        # update hdu header with
        # reduction keywords
        hdr = hdulist[0].header
        self.set_base_headers(hdr)
        hdr['CCDMEAN'] = pdata.mean()

        _logger.info('bias reduction ended')

        result = self.create_result(biasframe=DataFrame(hdulist))
        return result
コード例 #3
0
    def run(self, rinput):

        _logger.info('starting dark reduction')

        flow = self.init_filters(rinput)

        iinfo = gather_info_frames(rinput.obresult.frames)
        ref_exptime = 0.0
        for el in iinfo[1:]:
            if abs(el['texp'] - ref_exptime) > 1e-4:
                _logger.error('image with wrong exposure time')
                raise RecipeError('image with wrong exposure time')

        hdulist = basic_processing_with_combination(rinput,
                                                    flow,
                                                    method=median,
                                                    errors=True)

        pdata = hdulist[0].data

        # update hdu header with
        # reduction keywords

        hdr = hdulist[0].header
        self.set_base_headers(hdr)
        hdr['CCDMEAN'] = pdata.mean()

        _logger.info('dark reduction ended')
        result = self.create_result(darkframe=hdulist)
        return result
コード例 #4
0
ファイル: subs.py プロジェクト: lrpatrick/pyemir
    def aggregate_result(self, partial_result, rinput):
        obresult = rinput.obresult
        # Check if this is our first run
        naccum = getattr(obresult, 'naccum', 0)
        accum = getattr(obresult, 'accum', None)
        # result to accumulate
        result_key = 'reduced_mos_abba'
        field_to_accum = getattr(partial_result, result_key)

        if naccum == 0:
            self.logger.debug('naccum is not set, do not accumulate')
            return partial_result
        elif naccum == 1:
            self.logger.debug('round %d initialize accumulator', naccum)
            newaccum = field_to_accum
        elif naccum > 1:
            self.logger.debug('round %d of accumulation', naccum)
            newaccum = self.aggregate_frames(accum, field_to_accum, naccum)
        else:
            msg = 'naccum set to %d, invalid' % (naccum, )
            self.logger.error(msg)
            raise RecipeError(msg)

        # Update partial result
        partial_result.accum = newaccum

        return partial_result
コード例 #5
0
ファイル: detectorgain.py プロジェクト: lrpatrick/pyemir
    def run(self, rinput):

        resets = []
        ramps = []

        for frame in rinput.obresult.frames:
            if frame.itype == 'RESET':
                resets.append(frame.label)
                _logger.debug('%s is RESET', frame.label)
            elif frame.itype == 'RAMP':
                ramps.append(frame.label)
                _logger.debug('%s is RAMP', frame.label)
            else:
                raise RecipeError('frame is neither a RAMP nor a RESET')

        channels = self.region(rinput)
        result_gain = numpy.zeros((len(channels), ))
        result_ron = numpy.zeros_like(result_gain)

        counts = numpy.zeros((len(ramps), len(channels)))
        variance = numpy.zeros_like(counts)

        last_reset = resets[-1]
        _logger.debug('opening last reset image %s', last_reset)
        last_reset_data = fits.getdata(last_reset)

        for i, di in enumerate(ramps):
            with fits.open(di, mode='readonly') as fd:
                restdata = fd[0].data - last_reset_data
                for j, channel in enumerate(channels):
                    c = restdata[channel].mean()
                    _logger.debug('%f counts in channel', c)
                    counts[i, j] = c
                    v = restdata[channel].var(ddof=1)
                    _logger.debug('%f variance in channel', v)
                    variance[i, j] = v

        for j, _ in enumerate(channels):
            res = scipy.stats.linregress(counts[:, j], variance[:, j])
            slope, intercept, _r_value, _p_value, _std_err = res

            result_gain[j] = 1.0 / slope
            result_ron[j] = math.sqrt(intercept)
        cube = numpy.zeros((2, 2048, 2048))

        for gain, var, channel in zip(result_gain, result_ron, channels):
            cube[0][channel] = gain
            cube[1][channel] = var

        hdu = fits.PrimaryHDU(cube[0])
        hduvar = fits.ImageHDU(cube[1])
        hdulist = fits.HDUList([hdu, hduvar])

        gain = MasterGainMap(mean=result_gain,
                             var=numpy.array([]),
                             frame=DataFrame(hdulist))
        ron = MasterRONMap(mean=result_ron, var=numpy.array([]))
        return self.create_result(gain=gain, ron=ron)
コード例 #6
0
    def run(self, rinput):

        # FIXME:
        # We need 2 flats
        # Of different exposure times
        #
        # And their calibrations
        #

        if len(rinput.obresult.frames) < 2:
            raise RecipeError('The recipe requires 2 flat frames')

        iinfo = []
        for frame in rinput.obresult.frames:
            with frame.open() as hdulist:
                iinfo.append(gather_info(hdulist))

        # Loading calibrations
        with rinput.master_bias.open() as hdul:
            readmode = hdul[0].header.get('READMODE', 'undefined')
            if readmode.lower() in ['simple', 'bias']:
                self.logger.debug('loading bias')
                mbias = hdul[0].data
                bias_corrector = proc.BiasCorrector(mbias)
            else:
                self.logger.debug('ignoring bias')
                bias_corrector = numina.util.node.IdNode()

        with rinput.master_dark.open() as mdark_hdul:
            self.logger.debug('loading dark')
            mdark = mdark_hdul[0].data
            dark_corrector = proc.DarkCorrector(mdark)

        flow = numina.util.flow.SerialFlow([bias_corrector, dark_corrector])

        self.logger.info('processing flat #1')
        with rinput.obresult.frames[0].open() as hdul:
            other = flow(hdul)
            f1 = other[0].data.copy() * iinfo[0]['texp'] * 1e-3

        self.logger.info('processing flat #2')
        with rinput.obresult.frames[1].open() as hdul:
            other = flow(hdul)
            f2 = other[0].data.copy() * iinfo[1]['texp'] * 1e-3

        # Preprocess...

        maxiter = rinput.maxiter
        lowercut = rinput.lowercut
        uppercut = rinput.uppercut

        ninvalid = 0
        mask = None

        if mask:
            m = fits.getdata(mask)
            ninvalid = numpy.count_nonzero(m)
        else:
            m = numpy.zeros_like(f1, dtype='int')

        for niter in range(1, maxiter + 1):
            self.logger.debug('iter %d', niter)
            ratio, m, sigma = cosmetics(f1,
                                        f2,
                                        m,
                                        lowercut=lowercut,
                                        uppercut=uppercut)

            if self.intermediate_results:
                with warnings.catch_warnings():
                    warnings.simplefilter('ignore')
                    fits.writeto('numina-cosmetics-i%02d.fits' % niter,
                                 ratio,
                                 overwrite=True)
                    fits.writeto('numina-mask-i%02d.fits' % niter,
                                 m,
                                 overwrite=True)
                    fits.writeto('numina-sigma-i%02d.fits' % niter,
                                 m * 0.0 + sigma,
                                 overwrite=True)
            self.logger.debug('iter %d, invalid points in input mask: %d',
                              niter, ninvalid)
            self.logger.debug('iter %d, estimated sigma is %f', niter, sigma)
            n_ninvalid = numpy.count_nonzero(m)

            # Probably there is something wrong here
            # too much defective pixels
            if ninvalid / m.size >= 0.10:
                # This should set a flag in the output
                msg = 'defective pixels are greater than 10%'
                self.logger.warning(msg)

            if n_ninvalid == ninvalid:
                self.logger.info('convergence reached after %d iterations',
                                 niter)
                break
            self.logger.info('new invalid points: %d', n_ninvalid - ninvalid)
            ninvalid = n_ninvalid
        else:
            # This should set a flag in the output
            msg = 'convergence not reached after %d iterations' % maxiter
            self.logger.warning(msg)

        self.logger.info('number of dead pixels %d',
                         numpy.count_nonzero(m == PIXEL_DEAD))
        self.logger.info('number of hot pixels %d',
                         numpy.count_nonzero(m == PIXEL_HOT))

        if self.intermediate_results:
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                fits.writeto('numina-cosmetics.fits', ratio, overwrite=True)
                fits.writeto('numina-mask.fits', m, overwrite=True)
                fits.writeto('numina-sigma.fits',
                             sigma * numpy.ones_like(m),
                             overwrite=True)

        hdu = fits.PrimaryHDU(ratio)
        hdr = hdu.header
        hdr['NUMXVER'] = (__version__, 'Numina package version')
        hdr['NUMRNAM'] = (self.__class__.__name__, 'Numina recipe name')
        hdr['NUMRVER'] = (self.__version__, 'Numina recipe version')
        ratiohdl = fits.HDUList([hdu])

        maskhdu = fits.PrimaryHDU(m)
        hdr = maskhdu.header
        hdr['NUMXVER'] = (__version__, 'Numina package version')
        hdr['NUMRNAM'] = (self.__class__.__name__, 'Numina recipe name')
        hdr['NUMRVER'] = (self.__version__, 'Numina recipe version')
        maskhdl = fits.HDUList([maskhdu])

        res = self.create_result(ratioframe=ratiohdl, maskframe=maskhdl)
        return res
コード例 #7
0
    def run(self, rinput):
        self.logger.info('starting processing for object detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        self.logger.debug('finding point sources')

        try:
            filtername = hdr['FILTER']
            readmode = hdr['READMODE']
            rotang = hdr['ROTANG']
            detpa = hdr['DETPA']
            dtupa = hdr['DTUPA']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
        except KeyError as error:
            self.logger.error(error)
            raise RecipeError(error)

        data = hdulist[0].data

        # Copy needed in numpy 1.7
        # This seems already bitswapped??
        # FIXME: check this works offline/online
        # ndata = data.byteswap().newbyteorder()
        # data = data.byteswap(inplace=True).newbyteorder()

        snr_detect = 5.0
        fwhm = 4.0
        npixels = 15
        box_shape = [64, 64]
        self.logger.info('point source detection2')
        self.logger.info('using internal mask to remove corners')
        # Corners
        mask = numpy.zeros_like(data, dtype='int32')
        mask[2000:, 0:80] = 1
        mask[2028:, 2000:] = 1
        mask[:50, 1950:] = 1
        mask[:100, :50] = 1
        # Remove corner regions

        self.logger.info('compute background map, %s', box_shape)
        bkg = sep.Background(data)

        self.logger.info('reference fwhm is %5.1f pixels', fwhm)
        self.logger.info('detect threshold, %3.1f over background', snr_detect)
        self.logger.info('convolve with gaussian kernel, FWHM %3.1f pixels',
                         fwhm)
        sigma = fwhm * gaussian_fwhm_to_sigma
        #
        kernel = Gaussian2DKernel(sigma)
        kernel.normalize()

        thresh = snr_detect * bkg.globalrms
        data_s = data - bkg.back()
        objects, segmap = sep.extract(data - bkg.back(),
                                      thresh,
                                      minarea=npixels,
                                      filter_kernel=kernel.array,
                                      segmentation_map=True,
                                      mask=mask)
        fits.writeto('segmap.fits', segmap)
        self.logger.info('detected %d objects', len(objects))

        # Hardcoded values
        rs2 = 15.0
        fit_rad = 10.0
        flux_min = 1000.0
        flux_max = 30000.0
        self.logger.debug('Flux limit is %6.1f %6.1f', flux_min, flux_max)
        # FIXME: this should be a view, not a copy
        xall = objects['x']
        yall = objects['y']
        mm = numpy.array([xall, yall]).T
        self.logger.info('computing FWHM')
        # Find objects with pairs inside fit_rad
        kdtree = KDTree(mm)
        nearobjs = (kdtree.query_ball_tree(kdtree, r=fit_rad))
        positions = []
        for idx, obj in enumerate(objects):
            x0 = obj['x']
            y0 = obj['y']
            sl = image_box2d(x0, y0, data.shape, (fit_rad, fit_rad))
            # sl_sky = image_box2d(x0, y0, data.shape, (rs2, rs2))
            part_s = data_s[sl]
            # Logical coordinates
            xx0 = x0 - sl[1].start
            yy0 = y0 - sl[0].start

            _, fwhm_x, fwhm_y = compute_fwhm_2d_simple(part_s, xx0, yy0)

            if min(fwhm_x, fwhm_x) < 3:
                continue
            if flux_min > obj['peak'] or flux_max < obj['peak']:
                continue
            # nobjs is the number of object inside fit_rad
            nobjs = len(nearobjs[idx])

            flag = 0 if nobjs == 1 else 1

            positions.append([idx, x0, y0, obj['peak'], fwhm_x, fwhm_y, flag])

        self.logger.info('saving photometry')
        positions = numpy.array(positions)
        positions_alt = positions
        self.logger.info('end processing for object detection')

        result = self.create_result(
            frame=hdulist,
            positions=positions_alt,
            positions_alt=positions_alt,
            filter=filtername,
            DTU=dtub,
            readmode=readmode,
            ROTANG=rotang,
            DETPA=detpa,
            DTUPA=dtupa,
            param_recenter=rinput.recenter,
            param_max_recenter_radius=rinput.max_recenter_radius,
            param_box_half_size=rinput.box_half_size)
        return result
コード例 #8
0
    def run(self, rinput):
        self.logger.info('starting slit processing')

        self.logger.info('basic image reduction')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)
        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        try:
            rotang = hdr['ROTANG']
            detpa = hdr['DETPA']
            dtupa = hdr['DTUPA']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)

        except KeyError as error:
            self.logger.error(error)
            raise RecipeError(error)

        self.logger.debug('finding slits')


        # Filter values below 0.0
        self.logger.debug('Filter values below 0')
        data1 = hdulist[0].data[:]

        data1[data1 < 0.0] = 0.0
        # First, prefilter with median
        median_filter_size = rinput.median_filter_size
        canny_sigma = rinput.canny_sigma

        self.logger.debug('Median filter with box %d', median_filter_size)
        data2 = median_filter(data1, size=median_filter_size)

        # Grey level image
        img_grey = normalize_raw(data2)

        # Find edges with Canny
        self.logger.debug('Find edges, Canny sigma %f', canny_sigma)
        # These thresholds corespond roughly with
        # value x (2**16 - 1)
        high_threshold = rinput.canny_high_threshold
        low_threshold = rinput.canny_low_threshold
        self.logger.debug('Find edges, Canny high threshold %f', high_threshold)
        self.logger.debug('Find edges, Canny low threshold %f', low_threshold)
        edges = canny(img_grey, sigma=canny_sigma,
                      high_threshold=high_threshold,
                      low_threshold=low_threshold)
        
        # Fill edges
        self.logger.debug('Fill holes')
        # I do a dilation and erosion to fill
        # possible holes in 'edges'
        fill = ndimage.binary_dilation(edges)
        fill2 = ndimage.binary_fill_holes(fill)
        fill_slits = ndimage.binary_erosion(fill2)

        self.logger.debug('Label objects')
        label_objects, nb_labels = ndimage.label(fill_slits)
        self.logger.debug('%d objects found', nb_labels)
        ids = list(six.moves.range(1, nb_labels + 1))

        self.logger.debug('Find regions and centers')
        regions = ndimage.find_objects(label_objects)
        centers = ndimage.center_of_mass(data2, labels=label_objects,
                                         index=ids
                                         )

        table = char_slit(data2, regions,
                          slit_size_ratio=-1.0
                          )

        result = self.create_result(frame=hdulist,
                                    slitstable=table,
                                    DTU=dtub,
                                    ROTANG=rotang,
                                    DETPA=detpa,
                                    DTUPA=dtupa
                                    )

        return result
コード例 #9
0
    def run(self, rinput):
        self.logger.info('starting slit processing')

        self.logger.info('basic image reduction')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)
        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        try:
            rotang = hdr['ROTANG']
            detpa = hdr['DETPA']
            dtupa = hdr['DTUPA']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)

        except KeyError as error:
            self.logger.error(error)
            raise RecipeError(error)

        self.logger.debug('finding slits')

        # First, prefilter with median
        median_filter_size = rinput.median_filter_size
        canny_sigma = rinput.canny_sigma
        obj_min_size = rinput.obj_min_size
        obj_max_size = rinput.obj_max_size

        data1 = hdulist[0].data
        self.logger.debug('Median filter with box %d', median_filter_size)
        data2 = median_filter(data1, size=median_filter_size)

        # Grey level image
        img_grey = normalize_raw(data2)

        # Find edges with Canny
        self.logger.debug('Find edges with Canny, sigma %f', canny_sigma)
        # These thresholds corespond roughly with
        # value x (2**16 - 1)
        high_threshold = rinput.canny_high_threshold
        low_threshold = rinput.canny_low_threshold
        self.logger.debug('Find edges, Canny high threshold %f', high_threshold)
        self.logger.debug('Find edges, Canny low threshold %f', low_threshold)
        edges = canny(img_grey, sigma=canny_sigma,
                      high_threshold=high_threshold,
                      low_threshold=low_threshold)
        # Fill edges
        self.logger.debug('Fill holes')
        fill_slits =  ndimage.binary_fill_holes(edges)

        self.logger.debug('Label objects')
        label_objects, nb_labels = ndimage.label(fill_slits)
        self.logger.debug('%d objects found', nb_labels)
        # Filter on the area of the labeled region
        # Perhaps we could ignore this filtering and
        # do it later?
        self.logger.debug('Filter objects by size')
        # Sizes of regions
        sizes = numpy.bincount(label_objects.ravel())

        self.logger.debug('Min size is %d', obj_min_size)
        self.logger.debug('Max size is %d', obj_max_size)

        mask_sizes = (sizes > obj_min_size) & (sizes < obj_max_size)

        # Filter out regions
        nids, = numpy.where(mask_sizes)

        mm = numpy.in1d(label_objects, nids)
        mm.shape = label_objects.shape

        fill_slits_clean = numpy.where(mm, 1, 0)
        #plt.imshow(fill_slits_clean)

        # and relabel
        self.logger.debug('Label filtered objects')
        relabel_objects, nb_labels = ndimage.label(fill_slits_clean)
        self.logger.debug('%d objects found after filtering', nb_labels)
        ids = list(six.moves.range(1, nb_labels + 1))

        self.logger.debug('Find regions and centers')
        regions = ndimage.find_objects(relabel_objects)
        centers = ndimage.center_of_mass(data2, labels=relabel_objects,
                                         index=ids
                                         )

        table = char_slit(data2, regions,
                          slit_size_ratio=rinput.slit_size_ratio
                          )

        result = self.create_result(frame=hdulist, slitstable=table,
                                    DTU=dtub,
                                    ROTANG=rotang,
                                    DETPA=detpa,
                                    DTUPA=dtupa
                                    )

        return result
コード例 #10
0
    def process(self, ri,
                window=None, subpix=1,
                store_intermediate=True,
                target_is_sky=True, stop_after=PRERED):

        numpy.seterr(divide='raise')

        recipe_input = ri
        # FIXME: hardcoded instrument information
        keywords = {'airmass': 'AIRMASS',
                    'exposure': 'EXPTIME',
                    'imagetype': 'IMGTYP',
                    'juliandate': 'MJD-OBS',
                    'tstamp': 'TSTAMP'
                    }
        baseshape = [2048, 2048]
        channels = FULL

        if window is None:
            window = tuple((0, siz) for siz in baseshape)

        if store_intermediate:
            pass

        # States
        sf_data = None
        state = self.BASIC
        step = 0

        try:
            niteration = ri.iterations
        except KeyError:
            niteration = 1

        while True:
            if state == self.BASIC:
                _logger.info('Basic processing')

                # Basic processing
                basicflow = self.init_filters(recipe_input)

                for frame in ri.obresult.frames:
                    with frame.open() as hdulist:
                        hdulist = basicflow(hdulist)

                if stop_after == state:
                    break
                else:
                    state = self.PRERED
            elif state == self.PRERED:
                # Shape of the window
                windowshape = tuple((i[1] - i[0]) for i in window)
                _logger.debug('Shape of window is %s', windowshape)
                # Shape of the scaled window
                subpixshape = tuple((side * subpix) for side in windowshape)

                # Scaled window region
                scalewindow = tuple(
                    slice(*(subpix * i for i in p)) for p in window)
                # Window region
                window = tuple(slice(*p) for p in window)

                scaled_chan = clip_slices(channels, window, scale=subpix)

                # Reference pixel in the center of the frame
                refpix = numpy.divide(
                    numpy.array([baseshape], dtype='int'), 2).astype('float')

                # lists of targets and sky frames
                targetframes = []
                skyframes = []

                for frame in ri.obresult.frames:

                    # Getting some metadata from FITS header
                    hdr = fits.getheader(frame.label)
                    try:
                        frame.exposure = hdr[str(keywords['exposure'])]
                        # frame.baseshape = get_image_shape(hdr)
                        frame.airmass = hdr[str(keywords['airmass'])]
                        frame.mjd = hdr[str(keywords['tstamp'])]
                    except KeyError as e:
                        raise KeyError("%s in frame %s" %
                                       (str(e), frame.label))

                    frame.baselabel = os.path.splitext(frame.label)[0]
                    frame.mask = ri.master_bpm
                    # Insert pixel offsets between frames
                    frame.objmask_data = None
                    frame.valid_target = False
                    frame.valid_sky = False
                    frame.valid_region = scalewindow
                    # FIXME: hardcode itype for the moment
                    frame.itype = 'TARGET'
                    if frame.itype == 'TARGET':
                        frame.valid_target = True
                        targetframes.append(frame)
                        if target_is_sky:
                            frame.valid_sky = True
                            skyframes.append(frame)
                    if frame.itype == 'SKY':
                        frame.valid_sky = True
                        skyframes.append(frame)

#                labels = [frame.label for frame in targetframes]

                if ri.offsets is not None:
                    _logger.info('Using offsets from parameters')
                    base_ref = numpy.asarray(ri.offsets)
                    list_of_offsets= -(base_ref - base_ref[0])
                else:
                    _logger.info('Computing offsets from WCS information')
                    list_of_offsets = offsets_from_wcs(targetframes, refpix)

                # FIXME: Im using offsets in row/columns
                # the values are provided in XY so flip-lr
                list_of_offsets = numpy.fliplr(list_of_offsets)

                # Insert pixel offsets between frames
                for frame, off in zip(targetframes, list_of_offsets):

                    # Insert pixel offsets between frames
                    frame.pix_offset = off
                    frame.scaled_pix_offset = subpix * off

                    _logger.debug('Frame %s, offset=%s, scaled=%s',
                                  frame.label, off, subpix * off)

                _logger.info('Computing relative offsets')
                offsets = [(frame.scaled_pix_offset)
                           for frame in targetframes]
                offsets = numpy.round(offsets).astype('int')
                finalshape, offsetsp = combine_shape(subpixshape, offsets)
                _logger.info('Shape of resized array is %s', finalshape)

                # Resizing target frames
                self.resize(targetframes, subpixshape, offsetsp, finalshape,
                            window=window, scale=subpix)

                if not target_is_sky:
                    for frame in skyframes:
                        frame.resized_base = frame.label
                        frame.resized_mask = frame.mask

                # superflat
                _logger.info('Step %d, superflat correction (SF)', step)
                # Compute scale factors (median)
                self.update_scale_factors(ri.obresult.frames)

                # Create superflat
                superflat = self.compute_superflat(skyframes,
                                                   channels=scaled_chan,
                                                   step=step)

                # Apply superflat
                self.figure_init(subpixshape)
                self.apply_superflat(ri.obresult.frames, superflat)

                _logger.info('Simple sky correction')
                if target_is_sky:
                    # Each frame is the closest sky frame available

                    for frame in ri.obresult.frames:
                        self.compute_simple_sky_for_frame(frame, frame)
                else:
                    self.compute_simple_sky(targetframes, skyframes)

                # Combining the frames
                _logger.info("Step %d, Combining target frames", step)

                sf_data = self.combine_frames(
                    targetframes, extinction=ri.extinction)

                self.figures_after_combine(sf_data)

                _logger.info('Step %d, finished', step)

                if stop_after == state:
                    break
                else:
                    state = self.CHECKRED
            elif state == self.CHECKRED:

                seeing_fwhm = None

                # self.check_position(images_info, sf_data, seeing_fwhm)
                recompute = False
                if recompute:
                    _logger.info('Recentering is needed')
                    state = self.PRERED
                else:
                    _logger.info('Recentering is not needed')
                    _logger.info('Checking photometry')
                    check_photometry(targetframes, sf_data,
                                     seeing_fwhm, figure=self._figure)

                    if stop_after == state:
                        break
                    else:
                        state = self.FULLRED
            elif state == self.FULLRED:

                # Generating segmentation image
                _logger.info('Step %d, generating segmentation image', step)
                objmask, seeing_fwhm = self.create_mask(
                    sf_data, seeing_fwhm, step=step)
                step += 1
                # Update objects mask
                # For all images
                # FIXME:
                for frame in targetframes:
                    frame.objmask = name_object_mask(frame.baselabel, step)
                    _logger.info(
                        'Step %d, create object mask %s', step,  frame.objmask)
                    frame.objmask_data = objmask[frame.valid_region]
                    fits.writeto(
                        frame.objmask, frame.objmask_data, overwrite=True)

                if not target_is_sky:
                    # Empty object mask for sky frames
                    bogus_objmask = numpy.zeros(windowshape, dtype='int')

                    for frame in skyframes:
                        frame.objmask_data = bogus_objmask

                _logger.info('Step %d, superflat correction (SF)', step)

                # Compute scale factors (median)
                self.update_scale_factors(ri.obresult.frames, step)

                # Create superflat
                superflat = self.compute_superflat(skyframes, scaled_chan,
                                                   segmask=objmask, step=step)

                # Apply superflat
                self.figure_init(subpixshape)

                self.apply_superflat(
                    ri.obresult.frames, superflat, step=step, save=True)

                _logger.info('Step %d, advanced sky correction (SC)', step)
                self.compute_advanced_sky(targetframes, objmask,
                                          skyframes=skyframes,
                                          target_is_sky=target_is_sky,
                                          step=step)

                # Combining the images
                _logger.info("Step %d, Combining the images", step)
                # FIXME: only for science
                sf_data = self.combine_frames(
                    targetframes, ri.extinction, step=step)
                self.figures_after_combine(sf_data)

                if step >= niteration:
                    state = self.COMPLETE
            else:
                break

        if sf_data is None:
            raise RecipeError(
                'no combined image has been generated at step %d', state)

        hdu = fits.PrimaryHDU(sf_data[0])
        hdr = hdu.header
        hdr.update('NUMXVER', __version__, 'Numina package version')
        hdr.update('NUMRNAM', self.__class__.__name__, 'Numina recipe name')
        hdr.update('NUMRVER', self.__version__, 'Numina recipe version')

        hdr.update('FILENAME', 'result.fits')
        hdr.update('IMGTYP', 'TARGET', 'Image type')
        hdr.update('NUMTYP', 'TARGET', 'Data product type')

        varhdu = fits.ImageHDU(sf_data[1], name='VARIANCE')
        num = fits.ImageHDU(sf_data[2], name='MAP')

        result = fits.HDUList([hdu, varhdu, num])

        _logger.info("Final frame created")

        return DataFrame(result), SourcesCatalog()