Ejemplo n.º 1
0
def get_corrector_b(rinput, meta):
    from numina.flow.processing import BiasCorrector
    datamodel = EmirDataModel()
    iinfo = meta['obresult']
    if iinfo:
        mode = iinfo[0]['readmode']
        if mode.lower() in EMIR_BIAS_MODES:
            use_bias = True
            _logger.info('readmode is %s, bias required', mode)
        else:
            use_bias = False
            _logger.info('readmode is %s, bias not required', mode)
    else:
        # raise ValueError('cannot gather images info')
        use_bias = False

    # Loading calibrations
    if use_bias:
        bias_info = meta['master_bias']
        with rinput.master_bias.open() as hdul:
            _logger.info('loading bias')
            _logger.debug('bias info: %s', bias_info)
            mbias = hdul[0].data
            bias_corrector = BiasCorrector(mbias,
                                           datamodel=datamodel,
                                           calibid=datamodel.get_imgid(hdul))
    else:
        _logger.info('ignoring bias')
        bias_corrector = IdNode()

    return bias_corrector
Ejemplo n.º 2
0
 def time_it(self, time1, time2):
     values = self.attrs()
     for k, spec in self.stored().items():
         value = values[k]
         # Store for Images..
         if isinstance(value, DataFrame):
             d = EmirDataModel()
             hdul = value.open()
             d.add_computation_time(hdul, time1, time2)
Ejemplo n.º 3
0
def basic_processing_with_combination_frames(frames,
                                             flow,
                                             method=combine.mean,
                                             errors=True,
                                             prolog=None):
    odata = []
    cdata = []
    datamodel = EmirDataModel()
    try:
        _logger.info('processing input images')
        for frame in frames:
            hdulist = frame.open()
            fname = datamodel.get_imgid(hdulist)
            _logger.info('input is %s', fname)
            final = flow(hdulist)
            _logger.debug('output is input: %s', final is hdulist)
            cdata.append(final)
            # Files to be closed at the end
            odata.append(hdulist)
            if final is not hdulist:
                odata.append(final)

        base_header = cdata[0][0].header.copy()
        cnum = len(cdata)
        _logger.info("stacking %d images using '%s'", cnum, method.__name__)
        data = method([d[0].data for d in cdata], dtype='float32')
        hdu = fits.PrimaryHDU(data[0], header=base_header)
        _logger.debug('update result header')
        if prolog:
            _logger.debug('write prolog')
            hdu.header['history'] = prolog
        hdu.header['history'] = "Combined %d images using '%s'" % (
            cnum, method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(
            datetime.datetime.utcnow().isoformat())
        for img in cdata:
            hdu.header['history'] = "Image {}".format(datamodel.get_imgid(img))
        prevnum = base_header.get('NUM-NCOM', 1)
        hdu.header['NUM-NCOM'] = prevnum * cnum
        hdu.header['UUID'] = str(uuid.uuid1())
        # Headers of last image
        hdu.header['TSUTC2'] = cdata[-1][0].header['TSUTC2']
        if errors:
            varhdu = fits.ImageHDU(data[1], name='VARIANCE')
            num = fits.ImageHDU(data[2], name='MAP')
            result = fits.HDUList([hdu, varhdu, num])
        else:
            result = fits.HDUList([hdu])
    finally:
        _logger.debug('closing images')
        for hdulist in odata:
            hdulist.close()

    return result
Ejemplo n.º 4
0
def basic_processing_with_combination_frames(frames,
                                             flow,
                                             method=combine.mean,
                                             errors=True,
                                             prolog=None
                                             ):
    odata = []
    cdata = []
    datamodel = EmirDataModel()
    try:
        _logger.info('processing input images')
        for frame in frames:
            hdulist = frame.open()
            fname = datamodel.get_imgid(hdulist)
            _logger.info('input is %s', fname)
            final = flow(hdulist)
            _logger.debug('output is input: %s', final is hdulist)
            cdata.append(final)
            # Files to be closed at the end
            odata.append(hdulist)
            if final is not hdulist:
                odata.append(final)

        base_header = cdata[0][0].header.copy()
        cnum = len(cdata)
        _logger.info("stacking %d images using '%s'", cnum, method.__name__)
        data = method([d[0].data for d in cdata], dtype='float32')
        hdu = fits.PrimaryHDU(data[0], header=base_header)
        _logger.debug('update result header')
        if prolog:
            _logger.debug('write prolog')
            hdu.header['history'] = prolog
        hdu.header['history'] = "Combined %d images using '%s'" % (cnum, method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(datetime.datetime.utcnow().isoformat())
        for img in cdata:
            hdu.header['history'] = "Image {}".format(datamodel.get_imgid(img))
        prevnum = base_header.get('NUM-NCOM', 1)
        hdu.header['NUM-NCOM'] = prevnum * cnum
        hdu.header['UUID'] = str(uuid.uuid1())
        # Headers of last image
        hdu.header['TSUTC2'] = cdata[-1][0].header['TSUTC2']
        if errors:
            varhdu = fits.ImageHDU(data[1], name='VARIANCE')
            num = fits.ImageHDU(data[2], name='MAP')
            result = fits.HDUList([hdu, varhdu, num])
        else:
            result = fits.HDUList([hdu])
    finally:
        _logger.debug('closing images')
        for hdulist in odata:
            hdulist.close()

    return result
Ejemplo n.º 5
0
def get_corrector_s(rinput, meta):
    from numina.flow.processing import SkyCorrector
    sky_info = meta.get('master_sky')
    datamodel = EmirDataModel()

    if sky_info is None:
        return IdNode()
    else:
        with rinput.master_sky.open() as hdul:
            _logger.info('loading sky')
            _logger.debug('sky info: %s', sky_info)
            sky_corrector = SkyCorrector(hdul[0].data,
                                         datamodel=datamodel,
                                         calibid=datamodel.get_imgid(hdul))
        return sky_corrector
Ejemplo n.º 6
0
def basic_processing(rinput, flow):
    datamodel = EmirDataModel()
    cdata = []

    _logger.info('processing input images')
    for frame in rinput.obresult.images:
        hdulist = frame.open()
        fname = datamodel.get_imgid(hdulist)
        _logger.info('input is %s', fname)
        final = flow(hdulist)
        _logger.debug('output is input: %s', final is hdulist)

        cdata.append(final)

    return cdata
Ejemplo n.º 7
0
def get_corrector_d(rinput, meta):
    from numina.flow.processing import DarkCorrector
    key = 'master_dark'
    datamodel = EmirDataModel()

    corrector = get_corrector_gen(rinput, datamodel, DarkCorrector, key)
    return corrector
Ejemplo n.º 8
0
def get_corrector_p(rinput, meta):
    key = 'master_bpm'
    info = meta.get(key)
    datamodel = EmirDataModel()
    corrector_class = BadPixelCorrector

    if info is not None:
        inputval = getattr(rinput, key)
        with inputval.open() as hdul:
            _logger.info('loading "%s"', key)
            _logger.debug('info: %s', info)
            corrector = corrector_class(hdul[0].data,
                                        datamodel=datamodel,
                                        calibid=datamodel.get_imgid(hdul))
    else:
        _logger.info('"%s" not provided, ignored', key)
        corrector = IdNode()

    return corrector
Ejemplo n.º 9
0
def get_corrector_f(rinput, meta):
    from emirdrp.processing.flatfield import FlatFieldCorrector
    flat_info = meta['master_flat']
    datamodel = EmirDataModel()
    with rinput.master_flat.open() as hdul:
        _logger.info('loading intensity flat')
        _logger.debug('flat info: %s', flat_info)
        mflat = hdul[0].data
        # Check NaN and Ceros
        mask1 = mflat < 0
        mask2 = ~numpy.isfinite(mflat)
        if numpy.any(mask1):
            _logger.warning('flat has %d values below 0', mask1.sum())
        if numpy.any(mask2):
            _logger.warning('flat has %d NaN', mask2.sum())
        flat_corrector = FlatFieldCorrector(mflat,
                                            datamodel=datamodel,
                                            calibid=datamodel.get_imgid(hdul))

    return flat_corrector
Ejemplo n.º 10
0
class EmirRecipe(BaseRecipe):
    """Base clase for all EMIR Recipes


    Attributes
    ----------
    qc : QualityControl, result, QC.GOOD by default

    logger :
         recipe logger

    datamodel : EmirDataModel

    """
    RecipeResult = EmirRecipeResult

    qc = Product(QualityControlProduct, destination='qc', default=QC.GOOD)
    logger = logging.getLogger('numina.recipes.emir')
    datamodel = EmirDataModel()

    @classmethod
    def types_getter(cls):
        imgtypes = [
            prods.MasterBadPixelMask, prods.MasterBias, prods.MasterDark,
            prods.MasterIntensityFlat, prods.MasterSky, prods.SkySpectrum
        ]
        getters = [
            get_corrector_p, get_corrector_b, get_corrector_d,
            [get_corrector_f, get_checker], get_corrector_s, get_corrector_s
        ]

        return imgtypes, getters

    @classmethod
    def load_getters(cls):
        imgtypes, getters = cls.types_getter()
        used_getters = []
        for rtype, getter in zip(imgtypes, getters):
            for key, val in cls.RecipeInput.stored().items():
                if isinstance(val.type, rtype):
                    if isinstance(getter, collections.Iterable):
                        used_getters.extend(getter)
                    else:
                        used_getters.append(getter)
                    break
            else:
                pass
        return used_getters

    @classmethod
    def init_filters_generic(cls, rinput, getters):
        from numina.flow import SerialFlow
        # with BPM, bias, dark, flat and sky
        if numina.ext.gtc.check_gtc():
            cls.logger.debug('running in GTC environment')
        else:
            cls.logger.debug('running outside of GTC environment')

        meta = emirdrp.processing.info.gather_info(rinput)
        cls.logger.debug('obresult info')
        for entry in meta['obresult']:
            cls.logger.debug('frame info is %s', entry)
        correctors = [getter(rinput, meta) for getter in getters]
        flow = SerialFlow(correctors)
        return flow

    @classmethod
    def init_filters(cls, rinput):
        getters = cls.load_getters()
        return cls.init_filters_generic(rinput, getters)

    def aggregate_result(self, result, rinput):
        return result
Ejemplo n.º 11
0
def basic_processing(rinput, flow):
    datamodel = EmirDataModel()
    return basic_processing_(rinput.obresult.images, flow, datamodel)
Ejemplo n.º 12
0
def basic_processing_with_segmentation(rinput, flow,
                                          method=combine.mean,
                                          errors=True, bpm=None):

    odata = []
    cdata = []
    datamodel = EmirDataModel()
    try:
        _logger.info('processing input images')
        for frame in rinput.obresult.images:
            hdulist = frame.open()
            fname = datamodel.get_imgid(hdulist)
            _logger.info('input is %s', fname)
            final = flow(hdulist)
            _logger.debug('output is input: %s', final is hdulist)

            cdata.append(final)

            # Files to be closed at the end
            odata.append(hdulist)
            if final is not hdulist:
                odata.append(final)

        base_header = cdata[0][0].header.copy()

        baseshape = cdata[0][0].data.shape
        subpixshape = cdata[0][0].data.shape

        _logger.info('Computing offsets from WCS information')
        refpix = numpy.divide(numpy.array([baseshape], dtype='int'), 2).astype('float')
        offsets_xy = offsets_from_wcs(rinput.obresult.frames, refpix)
        _logger.debug("offsets_xy %s", offsets_xy)
        # Offsets in numpy order, swaping
        offsets_fc = offsets_xy[:, ::-1]
        offsets_fc_t = numpy.round(offsets_fc).astype('int')

        _logger.info('Computing relative offsets')
        finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)
        _logger.debug("offsetsp %s", offsetsp)

        _logger.info('Shape of resized array is %s', finalshape)
        # Resizing target frames
        rhduls, regions = resize_hdulists(cdata, subpixshape, offsetsp, finalshape)

        _logger.info("stacking %d images, with offsets using '%s'", len(cdata), method.__name__)
        data1 = method([d[0].data for d in rhduls], dtype='float32')

        segmap = segmentation_combined(data1[0])
        # submasks
        if bpm is None:
            masks = [(segmap[region] > 0) for region in regions]
        else:
            masks = [((segmap[region] > 0) & bpm) for region in regions]

        _logger.info("stacking %d images, with objects mask using '%s'", len(cdata), method.__name__)
        data2 = method([d[0].data for d in cdata], masks=masks, dtype='float32')
        hdu = fits.PrimaryHDU(data2[0], header=base_header)
        points_no_data = (data2[2] == 0).sum()

        _logger.debug('update result header')
        hdu.header['TSUTC2'] = cdata[-1][0].header['TSUTC2']
        hdu.header['history'] = "Combined %d images using '%s'" % (len(cdata), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(datetime.datetime.utcnow().isoformat())
        hdu.header['UUID'] = str(uuid.uuid1())
        _logger.info("missing points, total: %d, fraction: %3.1f", points_no_data, points_no_data / data2[2].size)

        if errors:
            varhdu = fits.ImageHDU(data2[1], name='VARIANCE')
            num = fits.ImageHDU(data2[2], name='MAP')
            result = fits.HDUList([hdu, varhdu, num])
        else:
            result = fits.HDUList([hdu])
    finally:
        _logger.debug('closing images')
        for hdulist in odata:
            hdulist.close()

    return result
Ejemplo n.º 13
0
def basic_processing_with_segmentation(rinput, flow,
                                          method=combine.mean,
                                          errors=True, bpm=None):

    odata = []
    cdata = []
    datamodel = EmirDataModel()
    try:
        _logger.info('processing input images')
        for frame in rinput.obresult.images:
            hdulist = frame.open()
            fname = datamodel.get_imgid(hdulist)
            _logger.info('input is %s', fname)
            final = flow(hdulist)
            _logger.debug('output is input: %s', final is hdulist)

            cdata.append(final)

            # Files to be closed at the end
            odata.append(hdulist)
            if final is not hdulist:
                odata.append(final)

        base_header = cdata[0][0].header.copy()

        baseshape = cdata[0][0].data.shape
        subpixshape = cdata[0][0].data.shape

        _logger.info('Computing offsets from WCS information')
        refpix = numpy.divide(numpy.array([baseshape], dtype='int'), 2).astype('float')
        offsets_xy = offsets_from_wcs(rinput.obresult.frames, refpix)
        _logger.debug("offsets_xy %s", offsets_xy)
        # Offsets in numpy order, swaping
        offsets_fc = offsets_xy[:, ::-1]
        offsets_fc_t = numpy.round(offsets_fc).astype('int')

        _logger.info('Computing relative offsets')
        finalshape, offsetsp = combine_shape(subpixshape, offsets_fc_t)
        _logger.debug("offsetsp %s", offsetsp)

        _logger.info('Shape of resized array is %s', finalshape)
        # Resizing target frames
        rhduls, regions = resize_hdulists(cdata, subpixshape, offsetsp, finalshape)

        _logger.info("stacking %d images, with offsets using '%s'", len(cdata), method.__name__)
        data1 = method([d[0].data for d in rhduls], dtype='float32')

        segmap = segmentation_combined(data1[0])
        # submasks
        if bpm is None:
            masks = [(segmap[region] > 0) for region in regions]
        else:
            masks = [((segmap[region] > 0) & bpm) for region in regions]

        _logger.info("stacking %d images, with objects mask using '%s'", len(cdata), method.__name__)
        data2 = method([d[0].data for d in cdata], masks=masks, dtype='float32')
        hdu = fits.PrimaryHDU(data2[0], header=base_header)
        points_no_data = (data2[2] == 0).sum()

        _logger.debug('update result header')
        hdu.header['TSUTC2'] = cdata[-1][0].header['TSUTC2']
        hdu.header['history'] = "Combined %d images using '%s'" % (len(cdata), method.__name__)
        hdu.header['history'] = 'Combination time {}'.format(datetime.datetime.utcnow().isoformat())
        hdu.header['UUID'] = str(uuid.uuid1())
        _logger.info("missing points, total: %d, fraction: %3.1f", points_no_data, points_no_data / data2[2].size)

        if errors:
            varhdu = fits.ImageHDU(data2[1], name='VARIANCE')
            num = fits.ImageHDU(data2[2], name='MAP')
            result = fits.HDUList([hdu, varhdu, num])
        else:
            result = fits.HDUList([hdu])
    finally:
        _logger.debug('closing images')
        for hdulist in odata:
            hdulist.close()

    return result