예제 #1
0
    def run(self, rinput):
        self.logger.info('applying existing rect.+wavecal. calibration of '
                         'stare spectra')

        # build object to proceed with bpm, bias, dark and flat
        flow = self.init_filters(rinput)

        # apply bpm, bias, dark and flat
        reduced_image = basic_processing_with_combination(rinput,
                                                          flow,
                                                          method=median)
        # update header with additional info
        hdr = reduced_image[0].header
        self.set_base_headers(hdr)

        # save intermediate image in work directory
        self.save_intermediate_img(reduced_image, 'reduced_image.fits')

        # apply rectification and wavelength calibration
        reduced_mos = apply_rectwv_coeff(reduced_image, rinput.rectwv_coeff)

        # ds9 region files (to be saved in the work directory)
        if self.intermediate_results:
            save_four_ds9(rinput.rectwv_coeff)
            save_spectral_lines_ds9(rinput.rectwv_coeff)

        # compute median spectra employing the useful region of the
        # rectified image
        if self.intermediate_results:
            for imode, outfile in enumerate([
                    'median_spectra_full', 'median_spectra_slitlets',
                    'median_spectrum_slitlets'
            ]):
                median_image = median_slitlets_rectified(reduced_mos,
                                                         mode=imode)
                self.save_intermediate_img(median_image, outfile + '.fits')

        # save results in results directory
        self.logger.info('end rect.+wavecal. reduction of stare spectra')
        result = self.create_result(reduced_mos=reduced_mos)
        return result
예제 #2
0
def refine_rectwv_coeff(input_image,
                        rectwv_coeff,
                        refine_wavecalib_mode,
                        minimum_slitlet_width_mm,
                        maximum_slitlet_width_mm,
                        save_intermediate_results=False,
                        debugplot=0):
    """Refine RectWaveCoeff object using a catalogue of lines

    One and only one among refine_with_oh_lines_mode and
    refine_with_arc_lines must be different from zero.

    Parameters
    ----------
    input_image : HDUList object
        Input 2D image.
    rectwv_coeff : RectWaveCoeff instance
        Rectification and wavelength calibration coefficients for the
        particular CSU configuration.
    refine_wavecalib_mode : int
        Integer, indicating the type of refinement:
        0 : no refinement
        1 : apply the same global offset to all the slitlets (using ARC lines)
        2 : apply individual offset to each slitlet (using ARC lines)
        11 : apply the same global offset to all the slitlets (using OH lines)
        12 : apply individual offset to each slitlet (using OH lines)
    minimum_slitlet_width_mm : float
        Minimum slitlet width (mm) for a valid slitlet.
    maximum_slitlet_width_mm : float
        Maximum slitlet width (mm) for a valid slitlet.
    save_intermediate_results : bool
        If True, save plots in PDF files
    debugplot : int
        Determines whether intermediate computations and/or plots
        are displayed. The valid codes are defined in
        numina.array.display.pause_debugplot.

    Returns
    -------
    refined_rectwv_coeff : RectWaveCoeff instance
        Refined rectification and wavelength calibration coefficients
        for the particular CSU configuration.
    expected_cat_image : HDUList object
        Output 2D image with the expected catalogued lines.

    """

    logger = logging.getLogger(__name__)

    if save_intermediate_results:
        from matplotlib.backends.backend_pdf import PdfPages
        pdf = PdfPages('crosscorrelation.pdf')
    else:
        pdf = None

    # image header
    main_header = input_image[0].header
    filter_name = main_header['filter']
    grism_name = main_header['grism']

    # protections
    if refine_wavecalib_mode not in [1, 2, 11, 12]:
        logger.error('Wavelength calibration refinemente mode={}'.format(
            refine_wavecalib_mode))
        raise ValueError("Invalid wavelength calibration refinement mode")

    # read tabulated lines
    if refine_wavecalib_mode in [1, 2]:  # ARC lines
        if grism_name == 'LR':
            catlines_file = 'lines_argon_neon_xenon_empirical_LR.dat'
        else:
            catlines_file = 'lines_argon_neon_xenon_empirical.dat'
        dumdata = pkgutil.get_data('emirdrp.instrument.configs', catlines_file)
        arc_lines_tmpfile = StringIO(dumdata.decode('utf8'))
        catlines = np.genfromtxt(arc_lines_tmpfile)
        # define wavelength and flux as separate arrays
        catlines_all_wave = catlines[:, 0]
        catlines_all_flux = catlines[:, 1]
        mode = refine_wavecalib_mode
    elif refine_wavecalib_mode in [11, 12]:  # OH lines
        dumdata = pkgutil.get_data('emirdrp.instrument.configs',
                                   'Oliva_etal_2013.dat')
        oh_lines_tmpfile = StringIO(dumdata.decode('utf8'))
        catlines = np.genfromtxt(oh_lines_tmpfile)
        # define wavelength and flux as separate arrays
        catlines_all_wave = np.concatenate((catlines[:, 1], catlines[:, 0]))
        catlines_all_flux = np.concatenate((catlines[:, 2], catlines[:, 2]))
        mode = refine_wavecalib_mode - 10
    else:
        raise ValueError('Unexpected mode={}'.format(refine_wavecalib_mode))

    # initialize output
    refined_rectwv_coeff = deepcopy(rectwv_coeff)

    logger.info('Computing median spectrum')
    # compute median spectrum and normalize it
    sp_median = median_slitlets_rectified(
        input_image,
        mode=2,
        minimum_slitlet_width_mm=minimum_slitlet_width_mm,
        maximum_slitlet_width_mm=maximum_slitlet_width_mm)[0].data
    sp_median /= sp_median.max()

    # determine minimum and maximum useful wavelength
    jmin, jmax = find_pix_borders(sp_median, 0)
    naxis1 = main_header['naxis1']
    naxis2 = main_header['naxis2']
    crpix1 = main_header['crpix1']
    crval1 = main_header['crval1']
    cdelt1 = main_header['cdelt1']
    xwave = crval1 + (np.arange(naxis1) + 1.0 - crpix1) * cdelt1
    if grism_name == 'LR':
        wv_parameters = set_wv_parameters(filter_name, grism_name)
        wave_min = wv_parameters['wvmin_useful']
        wave_max = wv_parameters['wvmax_useful']
    else:
        wave_min = crval1 + (jmin + 1 - crpix1) * cdelt1
        wave_max = crval1 + (jmax + 1 - crpix1) * cdelt1
    logger.info('Setting wave_min to {}'.format(wave_min))
    logger.info('Setting wave_max to {}'.format(wave_max))

    # extract subset of catalogue lines within current wavelength range
    lok1 = catlines_all_wave >= wave_min
    lok2 = catlines_all_wave <= wave_max
    catlines_reference_wave = catlines_all_wave[lok1 * lok2]
    catlines_reference_flux = catlines_all_flux[lok1 * lok2]
    catlines_reference_flux /= catlines_reference_flux.max()

    # estimate sigma to broaden catalogue lines
    csu_config = CsuConfiguration.define_from_header(main_header)
    # segregate slitlets
    list_useful_slitlets = csu_config.widths_in_range_mm(
        minwidth=minimum_slitlet_width_mm, maxwidth=maximum_slitlet_width_mm)
    list_not_useful_slitlets = [
        i for i in list(range(1, EMIR_NBARS + 1))
        if i not in list_useful_slitlets
    ]
    logger.info('list of useful slitlets: {}'.format(list_useful_slitlets))
    logger.info(
        'list of not useful slitlets: {}'.format(list_not_useful_slitlets))
    tempwidths = np.array([
        csu_config.csu_bar_slit_width(islitlet)
        for islitlet in list_useful_slitlets
    ])
    widths_summary = summary(tempwidths)
    logger.info('Statistics of useful slitlet widths (mm):')
    logger.info('- npoints....: {0:d}'.format(widths_summary['npoints']))
    logger.info('- mean.......: {0:7.3f}'.format(widths_summary['mean']))
    logger.info('- median.....: {0:7.3f}'.format(widths_summary['median']))
    logger.info('- std........: {0:7.3f}'.format(widths_summary['std']))
    logger.info('- robust_std.: {0:7.3f}'.format(widths_summary['robust_std']))
    # empirical transformation of slit width (mm) to pixels
    sigma_broadening = cdelt1 * widths_summary['median']

    # convolve location of catalogue lines to generate expected spectrum
    xwave_reference, sp_reference = convolve_comb_lines(
        catlines_reference_wave, catlines_reference_flux, sigma_broadening,
        crpix1, crval1, cdelt1, naxis1)
    sp_reference /= sp_reference.max()

    # generate image2d with expected lines
    image2d_expected_lines = np.tile(sp_reference, (naxis2, 1))
    hdu = fits.PrimaryHDU(data=image2d_expected_lines, header=main_header)
    expected_cat_image = fits.HDUList([hdu])

    if (abs(debugplot) % 10 != 0) or (pdf is not None):
        ax = ximplotxy(xwave,
                       sp_median,
                       'C1-',
                       xlabel='Wavelength (Angstroms, in vacuum)',
                       ylabel='Normalized number of counts',
                       title='Median spectrum',
                       label='observed spectrum',
                       show=False)
        # overplot reference catalogue lines
        ax.stem(catlines_reference_wave,
                catlines_reference_flux,
                'C4-',
                markerfmt=' ',
                basefmt='C4-',
                label='tabulated lines')
        # overplot convolved reference lines
        ax.plot(xwave_reference,
                sp_reference,
                'C0-',
                label='expected spectrum')
        ax.legend()
        if pdf is not None:
            pdf.savefig()
        else:
            pause_debugplot(debugplot=debugplot, pltshow=True)

    # compute baseline signal in sp_median
    baseline = np.percentile(sp_median[sp_median > 0], q=10)
    if (abs(debugplot) % 10 != 0) or (pdf is not None):
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.hist(sp_median, bins=1000, log=True)
        ax.set_xlabel('Normalized number of counts')
        ax.set_ylabel('Number of pixels')
        ax.set_title('Median spectrum')
        ax.axvline(float(baseline), linestyle='--', color='grey')
        if pdf is not None:
            pdf.savefig()
        else:
            geometry = (0, 0, 640, 480)
            set_window_geometry(geometry)
            plt.show()
    # subtract baseline to sp_median (only pixels with signal above zero)
    lok = np.where(sp_median > 0)
    sp_median[lok] -= baseline

    # compute global offset through periodic correlation
    logger.info('Computing global offset')
    global_offset, fpeak = periodic_corr1d(
        sp_reference=sp_reference,
        sp_offset=sp_median,
        fminmax=None,
        naround_zero=50,
        plottitle='Median spectrum (cross-correlation)',
        pdf=pdf,
        debugplot=debugplot)
    logger.info('Global offset: {} pixels'.format(-global_offset))

    missing_slitlets = rectwv_coeff.missing_slitlets

    if mode == 1:
        # apply computed offset to obtain refined_rectwv_coeff_global
        for islitlet in range(1, EMIR_NBARS + 1):
            if islitlet not in missing_slitlets:
                i = islitlet - 1
                dumdict = refined_rectwv_coeff.contents[i]
                dumdict['wpoly_coeff'][0] -= global_offset * cdelt1

    elif mode == 2:
        # compute individual offset for each slitlet
        logger.info('Computing individual offsets')
        median_55sp = median_slitlets_rectified(input_image, mode=1)
        offset_array = np.zeros(EMIR_NBARS)
        xplot = []
        yplot = []
        xplot_skipped = []
        yplot_skipped = []
        cout = '0'
        for islitlet in range(1, EMIR_NBARS + 1):
            if islitlet in list_useful_slitlets:
                i = islitlet - 1
                sp_median = median_55sp[0].data[i, :]
                lok = np.where(sp_median > 0)
                baseline = np.percentile(sp_median[lok], q=10)
                sp_median[lok] -= baseline
                sp_median /= sp_median.max()
                offset_array[i], fpeak = periodic_corr1d(
                    sp_reference=sp_reference,
                    sp_offset=median_55sp[0].data[i, :],
                    fminmax=None,
                    naround_zero=50,
                    plottitle='slitlet #{0} (cross-correlation)'.format(
                        islitlet),
                    pdf=pdf,
                    debugplot=debugplot)
                dumdict = refined_rectwv_coeff.contents[i]
                dumdict['wpoly_coeff'][0] -= offset_array[i] * cdelt1
                xplot.append(islitlet)
                yplot.append(-offset_array[i])
                # second correction
                wpoly_coeff_refined = check_wlcalib_sp(
                    sp=median_55sp[0].data[i, :],
                    crpix1=crpix1,
                    crval1=crval1 - offset_array[i] * cdelt1,
                    cdelt1=cdelt1,
                    wv_master=catlines_reference_wave,
                    coeff_ini=dumdict['wpoly_coeff'],
                    naxis1_ini=EMIR_NAXIS1,
                    title='slitlet #{0} (after applying offset)'.format(
                        islitlet),
                    ylogscale=False,
                    pdf=pdf,
                    debugplot=debugplot)
                dumdict['wpoly_coeff'] = wpoly_coeff_refined
                cout += '.'

            else:
                xplot_skipped.append(islitlet)
                yplot_skipped.append(0)
                cout += 'i'

            if islitlet % 10 == 0:
                if cout != 'i':
                    cout = str(islitlet // 10)

            logger.info(cout)

        # show offsets with opposite sign
        stat_summary = summary(np.array(yplot))
        logger.info('Statistics of individual slitlet offsets (pixels):')
        logger.info('- npoints....: {0:d}'.format(stat_summary['npoints']))
        logger.info('- mean.......: {0:7.3f}'.format(stat_summary['mean']))
        logger.info('- median.....: {0:7.3f}'.format(stat_summary['median']))
        logger.info('- std........: {0:7.3f}'.format(stat_summary['std']))
        logger.info('- robust_std.: {0:7.3f}'.format(
            stat_summary['robust_std']))
        if (abs(debugplot) % 10 != 0) or (pdf is not None):
            ax = ximplotxy(xplot,
                           yplot,
                           linestyle='',
                           marker='o',
                           color='C0',
                           xlabel='slitlet number',
                           ylabel='-offset (pixels) = offset to be applied',
                           title='cross-correlation result',
                           show=False,
                           **{'label': 'individual slitlets'})
            if len(xplot_skipped) > 0:
                ax.plot(xplot_skipped, yplot_skipped, 'mx')
            ax.axhline(-global_offset,
                       linestyle='--',
                       color='C1',
                       label='global offset')
            ax.legend()
            if pdf is not None:
                pdf.savefig()
            else:
                pause_debugplot(debugplot=debugplot, pltshow=True)
    else:
        raise ValueError('Unexpected mode={}'.format(mode))

    # close output PDF file
    if pdf is not None:
        pdf.close()

    # return result
    return refined_rectwv_coeff, expected_cat_image
예제 #3
0
    def run(self, rinput):

        nimages = len(rinput.obresult.frames)
        pattern = rinput.pattern
        pattern_length = len(pattern)

        # check combination method
        if rinput.method != 'sigmaclip':
            if rinput.method_kwargs != {}:
                raise ValueError('Unexpected method_kwargs={}'.format(
                    rinput.method_kwargs))

        # check pattern sequence matches number of images
        if nimages % pattern_length != 0:
            raise ValueError('Number of images is not a multiple of pattern '
                             'length: {}, {}'.format(nimages, pattern_length))
        nsequences = nimages // pattern_length

        rectwv_coeff = rinput.rectwv_coeff
        self.logger.info(rectwv_coeff)
        self.logger.info('observation pattern: {}'.format(pattern))
        self.logger.info('nsequences.........: {}'.format(nsequences))

        full_set = pattern * nsequences
        self.logger.info('full set of images.: {}'.format(full_set))

        # build object to proceed with bpm, bias, dark and flat
        flow = self.init_filters(rinput)

        # available combination methods
        method = getattr(combine, rinput.method)
        method_kwargs = rinput.method_kwargs

        # basic reduction of A images
        list_a = [
            rinput.obresult.frames[i] for i, char in enumerate(full_set)
            if char == 'A'
        ]
        with contextlib.ExitStack() as stack:
            self.logger.info('starting basic reduction of A images')
            hduls = [stack.enter_context(fname.open()) for fname in list_a]
            reduced_image_a = combine_imgs(hduls,
                                           method=method,
                                           method_kwargs=method_kwargs,
                                           errors=False,
                                           prolog=None)
        reduced_image_a = flow(reduced_image_a)
        hdr = reduced_image_a[0].header
        self.set_base_headers(hdr)

        # basic reduction of B images
        list_b = [
            rinput.obresult.frames[i] for i, char in enumerate(full_set)
            if char == 'B'
        ]
        with contextlib.ExitStack() as stack:
            self.logger.info('starting basic reduction of B images')
            hduls = [stack.enter_context(fname.open()) for fname in list_b]
            reduced_image_b = combine_imgs(hduls,
                                           method=method,
                                           method_kwargs=method_kwargs,
                                           errors=False,
                                           prolog=None)
        reduced_image_b = flow(reduced_image_b)
        hdr = reduced_image_b[0].header
        self.set_base_headers(hdr)

        # save intermediate reduced_image_a and reduced_image_b
        self.save_intermediate_img(reduced_image_a, 'reduced_image_a.fits')
        self.save_intermediate_img(reduced_image_b, 'reduced_image_b.fits')

        # computation of A-B
        header_a = reduced_image_a[0].header
        header_b = reduced_image_b[0].header
        data_a = reduced_image_a[0].data.astype('float32')
        data_b = reduced_image_b[0].data.astype('float32')
        reduced_data = data_a - data_b

        # update reduced image header
        reduced_image = self.create_reduced_image(
            rinput,
            reduced_data,
            header_a,
            header_b,
            rinput.pattern,
            full_set,
            voffset_pix=0,
            header_mos_abba=None,
        )

        # save intermediate image in work directory
        self.save_intermediate_img(reduced_image, 'reduced_image.fits')

        # apply rectification and wavelength calibration
        self.logger.info('begin rect.+wavecal. reduction of ABBA spectra')
        reduced_mos_abba = apply_rectwv_coeff(reduced_image, rectwv_coeff)
        header_mos_abba = reduced_mos_abba[0].header

        # combine A and B data by shifting B on top of A
        voffset_pix = rinput.voffset_pix

        if voffset_pix is not None and voffset_pix != 0:
            self.logger.info(
                'correcting vertical offset (pixesl): {}'.format(voffset_pix))
            reduced_mos_abba_data = reduced_mos_abba[0].data.astype('float32')
            shifted_a_minus_b_data = shift_image2d(
                reduced_mos_abba_data,
                yoffset=-voffset_pix,
            ).astype('float32')
            reduced_mos_abba_combined_data = \
                reduced_mos_abba_data - shifted_a_minus_b_data
            # scale signal to exposure of a single image
            reduced_mos_abba_combined_data /= 2.0
        else:
            reduced_mos_abba_combined_data = None

        # update reduced combined image header
        reduced_mos_abba_combined = self.create_reduced_image(
            rinput,
            reduced_mos_abba_combined_data,
            header_a,
            header_b,
            rinput.pattern,
            full_set,
            voffset_pix,
            header_mos_abba=header_mos_abba)

        # ds9 region files (to be saved in the work directory)
        if self.intermediate_results:
            save_four_ds9(rectwv_coeff)
            save_spectral_lines_ds9(rectwv_coeff)

        # compute median spectra employing the useful region of the
        # rectified image
        if self.intermediate_results:
            for imode, outfile in enumerate([
                    'median_spectra_full', 'median_spectra_slitlets',
                    'median_spectrum_slitlets'
            ]):
                median_image = median_slitlets_rectified(reduced_mos_abba,
                                                         mode=imode)
                self.save_intermediate_img(median_image, outfile + '.fits')

        # save results in results directory
        self.logger.info('end rect.+wavecal. reduction of ABBA spectra')
        result = self.create_result(
            reduced_mos_abba=reduced_mos_abba,
            reduced_mos_abba_combined=reduced_mos_abba_combined)
        return result
예제 #4
0
    def run(self, rinput):

        nimages = len(rinput.obresult.frames)
        pattern = rinput.pattern
        pattern_length = len(pattern)

        # check combination method
        if rinput.method != 'sigmaclip':
            if rinput.method_kwargs != {}:
                raise ValueError('Unexpected method_kwargs={}'.format(
                    rinput.method_kwargs))

        # check pattern sequence matches number of images
        if nimages % pattern_length != 0:
            raise ValueError('Number of images is not a multiple of pattern '
                             'length: {}, {}'.format(nimages, pattern_length))
        nsequences = nimages // pattern_length

        # check rectification and wavelength calibration information
        if rinput.rectwv_coeff is None and rinput.list_rectwv_coeff is None:
            raise ValueError('No rectwv_coeff nor list_rectwv_coeff data have '
                             'been provided')
        elif rinput.rectwv_coeff is not None and \
                rinput.list_rectwv_coeff is not None:
            raise ValueError("rectwv_coeff and list_rectwv_coeff cannot be "
                             "used simultaneously")
        elif rinput.rectwv_coeff is not None:
            list_rectwv_coeff = [rinput.rectwv_coeff] * nimages
        elif rinput.list_rectwv_coeff is not None:
            if len(rinput.list_rectwv_coeff) != nimages:
                raise ValueError("Unexpected number of rectwv_coeff files "
                                 "in list_rectwv_coeff")
            else:
                list_rectwv_coeff = rinput.list_rectwv_coeff
                # check filter and grism are the same in all JSON files
                for item in ['grism', 'filter']:
                    list_values = []
                    for calib in list_rectwv_coeff:
                        list_values.append(calib.tags[item])
                    if len(set(list_values)) != 1:
                        raise ValueError(
                            'list_rectwv_coeff contains coefficients for '
                            'different {}s'.format(item))
        else:
            raise ValueError("Unexpected error!")

        # grism and filter names
        grism_name = list_rectwv_coeff[0].tags['grism']
        filter_name = list_rectwv_coeff[0].tags['filter']

        # compute offsets from WCS info in image headers
        with contextlib.ExitStack() as stack:
            hduls = [
                stack.enter_context(fname.open())
                for fname in rinput.obresult.frames
            ]
            sep_arcsec, spatial_scales = compute_wcs_offsets(hduls)

        sep_pixel = np.round(sep_arcsec / spatial_scales, 6)

        for i in range(nimages):
            self.logger.info(list_rectwv_coeff[i])
        self.logger.info(
            'observation pattern..................: {}'.format(pattern))
        self.logger.info(
            'nsequences...........................: {}'.format(nsequences))
        self.logger.info(
            'offsets (arcsec, from WCS)...........: {}'.format(sep_arcsec))
        self.logger.info(
            'spatial scales (arcsec/pix, from WCS): {}'.format(spatial_scales))
        self.logger.info(
            'spatial scales (pixels, from WCS)....: {}'.format(sep_pixel))

        full_set = pattern * nsequences
        self.logger.info(
            'full set of images...................: {}'.format(full_set))

        # basic parameters to determine useful pixels in the wavelength
        # direction
        dict_rtas = rinput.refine_target_along_slitlet

        valid_keys = [
            'npix_removed_near_ohlines', 'nwidth_medfilt',
            'save_individual_images', 'ab_different_target',
            'vpix_region_a_target', 'vpix_region_a_sky',
            'vpix_region_b_target', 'vpix_region_b_sky',
            'list_valid_wvregions_a', 'list_valid_wvregions_b'
        ]
        for dumkey in dict_rtas.keys():
            if dumkey not in valid_keys:
                raise ValueError('Unexpected key={}'.format(dumkey))

        if 'vpix_region_a_target' in dict_rtas.keys():
            vpix_region_a_target = dict_rtas['vpix_region_a_target']
        else:
            vpix_region_a_target = None

        if 'vpix_region_a_sky' in dict_rtas.keys():
            vpix_region_a_sky = dict_rtas['vpix_region_a_sky']
        else:
            vpix_region_a_sky = None

        if 'vpix_region_b_target' in dict_rtas.keys():
            vpix_region_b_target = dict_rtas['vpix_region_b_target']
        else:
            vpix_region_b_target = None

        if 'vpix_region_b_sky' in dict_rtas.keys():
            vpix_region_b_sky = dict_rtas['vpix_region_b_sky']
        else:
            vpix_region_b_sky = None

        if 'ab_different_target' in dict_rtas.keys():
            ab_different_target = int(dict_rtas['ab_different_target'])
            if ab_different_target not in [-1, 0, 1, 9]:
                raise ValueError('Invalid ab_different_target={} value'.format(
                    ab_different_target))
        else:
            raise ValueError('Missing ab_different_target value')

        try:
            npix_removed_near_ohlines = \
                int(dict_rtas['npix_removed_near_ohlines'])
        except KeyError:
            npix_removed_near_ohlines = 0
        except ValueError:
            raise ValueError('wrong value: npix_removed_near_ohlines='
                             '{}'.format(
                                 dict_rtas['npix_removed_near_ohlines']))

        if 'list_valid_wvregions_a' in dict_rtas.keys():
            if vpix_region_a_target is None:
                raise ValueError('Unexpected list_valid_wvregions_a when '
                                 'vpix_region_a_target is not set')
            list_valid_wvregions_a = dict_rtas['list_valid_wvregions_a']
        else:
            list_valid_wvregions_a = None

        if 'list_valid_wvregions_b' in dict_rtas.keys():
            if vpix_region_b_target is None:
                raise ValueError('Unexpected list_valid_wvregions_b when '
                                 'vpix_region_b_target is not set')

            list_valid_wvregions_b = dict_rtas['list_valid_wvregions_b']
        else:
            list_valid_wvregions_b = None

        try:
            nwidth_medfilt = int(dict_rtas['nwidth_medfilt'])
        except KeyError:
            nwidth_medfilt = 0
        except ValueError:
            raise ValueError('wrong value: nwidth_medfilt={}'.format(
                dict_rtas['nwidth_medfilt']))

        try:
            save_individual_images = int(dict_rtas['save_individual_images'])
        except KeyError:
            save_individual_images = 0
        except ValueError:
            raise ValueError('wrong value: save_individual_images={}'.format(
                dict_rtas['save_individual_images']))

        self.logger.info(
            'npix_removed_near_ohlines: {}'.format(npix_removed_near_ohlines))
        self.logger.info('nwidth_medfilt: {}'.format(nwidth_medfilt))
        self.logger.info(
            'vpix_region_a_target: {}'.format(vpix_region_a_target))
        self.logger.info(
            'vpix_region_b_target: {}'.format(vpix_region_b_target))
        self.logger.info(
            'list_valid_wvregions_a: {}'.format(list_valid_wvregions_a))
        self.logger.info(
            'list_valid_wvregions_b: {}'.format(list_valid_wvregions_b))

        # build object to proceed with bpm, bias, dark and flat
        flow = self.init_filters(rinput)

        # basic reduction, rectification and wavelength calibration of
        # all the individual images
        list_reduced_mos_images = []
        self.logger.info('starting reduction of individual images')
        for i, char in enumerate(full_set):
            frame = rinput.obresult.frames[i]
            self.logger.info('image {} ({} of {})'.format(
                char, i + 1, nimages))
            self.logger.info('image: {}'.format(frame.filename))
            with frame.open() as f:
                base_header = f[0].header.copy()
                data = f[0].data.astype('float32')
            grism_name_ = base_header['grism']
            if grism_name_ != grism_name:
                raise ValueError('Incompatible grism name in '
                                 'rectwv_coeff.json file and FITS image')
            filter_name_ = base_header['filter']
            if filter_name_ != filter_name:
                raise ValueError('Incompatible filter name in '
                                 'rectwv_coeff.json file and FITS image')
            hdu = fits.PrimaryHDU(data, header=base_header)
            hdu.header['UUID'] = str(uuid.uuid1())
            hdul = fits.HDUList([hdu])
            # basic reduction
            reduced_image = flow(hdul)
            hdr = reduced_image[0].header
            self.set_base_headers(hdr)
            # rectification and wavelength calibration
            reduced_mos_image = apply_rectwv_coeff(reduced_image,
                                                   list_rectwv_coeff[i])
            if save_individual_images != 0:
                self.save_intermediate_img(
                    reduced_mos_image, 'reduced_mos_image_' + char + '_' +
                    frame.filename[:10] + '.fits')
            list_reduced_mos_images.append(reduced_mos_image)

        # intermediate PDF file with crosscorrelation plots
        if self.intermediate_results:
            from matplotlib.backends.backend_pdf import PdfPages
            pdf = PdfPages('crosscorrelation_ab.pdf')
        else:
            pdf = None

        # compute offsets between images
        self.logger.info('computing offsets between individual images')
        first_a = True
        first_b = True
        xisok_a = None
        xisok_b = None
        reference_profile_a = None
        reference_profile_b = None
        refine_a = vpix_region_a_target is not None
        refine_b = vpix_region_b_target is not None
        list_offsets = []
        for i, char in enumerate(full_set):
            if char == 'A' and refine_a:
                refine_image = True
            elif char == 'B' and refine_b:
                refine_image = True
            else:
                refine_image = False
            if refine_image:
                frame = rinput.obresult.frames[i]
                isky = get_isky(i, pattern)
                frame_sky = rinput.obresult.frames[isky]
                self.logger.info('image {} ({} of {})'.format(
                    char, i + 1, nimages))
                self.logger.info('image: {}'.format(frame.filename))
                self.logger.info('(sky): {}'.format(frame_sky.filename))
                data = list_reduced_mos_images[i][0].data.copy()
                data_sky = list_reduced_mos_images[isky][0].data
                data -= data_sky
                base_header = list_reduced_mos_images[i][0].header

                # get useful pixels in the wavelength direction
                if char == 'A' and first_a:
                    xisok_a = useful_mos_xpixels(
                        data,
                        base_header,
                        vpix_region=vpix_region_a_target,
                        npix_removed_near_ohlines=npix_removed_near_ohlines,
                        list_valid_wvregions=list_valid_wvregions_a,
                        debugplot=0)
                elif char == 'B' and first_b:
                    xisok_b = useful_mos_xpixels(
                        data,
                        base_header,
                        vpix_region=vpix_region_b_target,
                        npix_removed_near_ohlines=npix_removed_near_ohlines,
                        list_valid_wvregions=list_valid_wvregions_b,
                        debugplot=0)

                if char == 'A':
                    nsmin = vpix_region_a_target[0]
                    nsmax = vpix_region_a_target[1]
                    xisok = xisok_a
                    if vpix_region_a_sky is not None:
                        nsmin_sky = vpix_region_a_sky[0]
                        nsmax_sky = vpix_region_a_sky[1]
                        skysubtraction = True
                    else:
                        nsmin_sky = None
                        nsmax_sky = None
                        skysubtraction = False
                elif char == 'B':
                    nsmin = vpix_region_b_target[0]
                    nsmax = vpix_region_b_target[1]
                    xisok = xisok_b
                    if vpix_region_b_sky is not None:
                        nsmin_sky = vpix_region_b_sky[0]
                        nsmax_sky = vpix_region_b_sky[1]
                        skysubtraction = True
                    else:
                        nsmin_sky = None
                        nsmax_sky = None
                        skysubtraction = False
                else:
                    raise ValueError('Unexpected char value: {}'.format(char))

                # initial slitlet region
                slitlet2d = data[(nsmin - 1):nsmax, :].copy()
                if skysubtraction:
                    slitlet2d_sky = data[(nsmin_sky - 1):nsmax_sky, :].copy()
                    median_sky = np.median(slitlet2d_sky, axis=0)
                    slitlet2d -= median_sky

                # selected wavelength regions after blocking OH lines
                slitlet2d_blocked = slitlet2d[:, xisok]

                # apply median filter in the X direction
                if nwidth_medfilt > 1:
                    slitlet2d_blocked_smoothed = ndimage.filters.median_filter(
                        slitlet2d_blocked, size=(1, nwidth_medfilt))
                else:
                    slitlet2d_blocked_smoothed = slitlet2d_blocked

                # ---
                # convert to 1D series of spatial profiles (note: this
                # option does not work very well when the signal is low;
                # a simple mean profile does a better job)
                # profile = slitlet2d_blocked_smoothed.transpose().ravel()
                # ---
                profile = np.mean(slitlet2d_blocked_smoothed, axis=1)
                if char == 'A' and first_a:
                    reference_profile_a = profile.copy()
                elif char == 'B' and first_b:
                    reference_profile_b = profile.copy()
                # ---
                # # To write pickle file
                # import pickle
                # with open(frame.filename[:10] + '_profile.pkl', 'wb') as f:
                #     pickle.dump(profile, f)
                # # To read pickle file
                # with open('test.pkl', 'rb') as f:
                #     x = pickle.load(f)
                # ---

                # crosscorrelation to find offset
                naround_zero = (nsmax - nsmin) // 3
                if char == 'A':
                    reference_profile = reference_profile_a
                elif char == 'B':
                    reference_profile = reference_profile_b
                else:
                    raise ValueError('Unexpected char value: {}'.format(char))
                offset, fpeak = periodic_corr1d(
                    sp_reference=reference_profile,
                    sp_offset=profile,
                    remove_mean=False,
                    frac_cosbell=0.10,
                    zero_padding=11,
                    fminmax=None,
                    nfit_peak=5,
                    naround_zero=naround_zero,
                    sp_label='spatial profile',
                    plottitle='Image #{} (type {}), {}'.format(
                        i + 1, char, frame.filename[:10]),
                    pdf=pdf)
                # round to 4 decimal places
                if abs(offset) < 1E-4:
                    offset = 0.0  # avoid -0.0
                else:
                    offset = round(offset, 4)
                list_offsets.append(offset)

                # end of loop
                if char == 'A' and first_a:
                    first_a = False
                elif char == 'B' and first_b:
                    first_b = False
            else:
                list_offsets.append(0.0)
        self.logger.info('computed offsets: {}'.format(list_offsets))

        self.logger.info('correcting vertical offsets between individual '
                         'images')
        list_a = []
        list_b = []
        for i, (char, offset) in enumerate(zip(full_set, list_offsets)):
            frame = rinput.obresult.frames[i]
            self.logger.info('image {} ({} of {})'.format(
                char, i + 1, nimages))
            self.logger.info('image: {}'.format(frame.filename))
            reduced_mos_image = list_reduced_mos_images[i]
            data = reduced_mos_image[0].data
            base_header = reduced_mos_image[0].header
            self.logger.info(
                'correcting vertical offset (pixesl): {}'.format(offset))
            if offset != 0:
                reduced_mos_image[0].data = shift_image2d(
                    data, yoffset=-offset).astype('float32')
            base_header['HISTORY'] = 'Applying voffset_pix {}'.format(offset)
            if save_individual_images != 0:
                self.save_intermediate_img(
                    reduced_mos_image, 'reduced_mos_image_refined_' + char +
                    '_' + frame.filename[:10] + '.fits')

            # store reduced_mos_image
            if char == 'A':
                list_a.append(reduced_mos_image)
            elif char == 'B':
                list_b.append(reduced_mos_image)
            else:
                raise ValueError('Unexpected char value: {}'.format(char))

        # combination method
        method = getattr(combine, rinput.method)
        method_kwargs = rinput.method_kwargs

        # final combination of A images
        self.logger.info('combining individual A images')
        reduced_mos_image_a = combine_imgs(list_a,
                                           method=method,
                                           method_kwargs=method_kwargs,
                                           errors=False,
                                           prolog=None)
        self.save_intermediate_img(reduced_mos_image_a,
                                   'reduced_mos_image_a.fits')

        # final combination of B images
        self.logger.info('combining individual B images')
        reduced_mos_image_b = combine_imgs(list_b,
                                           method=method,
                                           method_kwargs=method_kwargs,
                                           errors=False,
                                           prolog=None)
        self.save_intermediate_img(reduced_mos_image_b,
                                   'reduced_mos_image_b.fits')

        self.logger.info('mixing A and B spectra')
        header_a = reduced_mos_image_a[0].header
        header_b = reduced_mos_image_b[0].header
        data_a = reduced_mos_image_a[0].data.astype('float32')
        data_b = reduced_mos_image_b[0].data.astype('float32')

        reduced_mos_abba_data = data_a - data_b

        # update reduced mos image header
        reduced_mos_abba = self.create_mos_abba_image(rinput,
                                                      dict_rtas,
                                                      reduced_mos_abba_data,
                                                      header_a,
                                                      header_b,
                                                      pattern,
                                                      full_set,
                                                      list_offsets,
                                                      voffset_pix=0)

        # combine A and B data by shifting B on top of A
        if abs(ab_different_target) == 0:
            len_prof_a = len(reference_profile_a)
            len_prof_b = len(reference_profile_b)
            if len_prof_a == len_prof_b:
                reference_profile = reference_profile_a
                profile = reference_profile_b
                naround_zero = len_prof_a // 3
            elif len_prof_a > len_prof_b:
                ndiff = len_prof_a - len_prof_b
                reference_profile = reference_profile_a
                profile = np.concatenate(
                    (reference_profile_b, np.zeros(ndiff, dtype='float')))
                naround_zero = len_prof_a // 3
            else:
                ndiff = len_prof_b - len_prof_a
                reference_profile = np.concatenate(
                    (reference_profile_a, np.zeros(ndiff, dtype='float')))
                profile = reference_profile_b
                naround_zero = len_prof_b // 3
            offset, fpeak = periodic_corr1d(
                sp_reference=reference_profile,
                sp_offset=profile,
                remove_mean=False,
                frac_cosbell=0.10,
                zero_padding=11,
                fminmax=None,
                nfit_peak=5,
                naround_zero=naround_zero,
                sp_label='spatial profile',
                plottitle='Comparison of A and B profiles',
                pdf=pdf)
            voffset_pix = vpix_region_b_target[0] - vpix_region_a_target[0]
            voffset_pix += offset
        elif abs(ab_different_target) == 1:
            # apply nominal offset (from WCS info) between first A
            # and first B
            voffset_pix = sep_pixel[1] * ab_different_target
        elif ab_different_target == 9:
            voffset_pix = None
        else:
            raise ValueError(
                'Invalid ab_different_target={}'.format(ab_different_target))

        # close output PDF file
        if pdf is not None:
            pdf.close()

        if voffset_pix is not None:
            self.logger.info(
                'correcting vertical offset (pixesl): {}'.format(voffset_pix))
            shifted_a_minus_b_data = shift_image2d(
                reduced_mos_abba_data,
                yoffset=-voffset_pix,
            ).astype('float32')
            reduced_mos_abba_combined_data = \
                reduced_mos_abba_data - shifted_a_minus_b_data
            # scale signal to exposure of a single image
            reduced_mos_abba_combined_data /= 2.0
        else:
            reduced_mos_abba_combined_data = None

        # update reduced mos combined image header
        reduced_mos_abba_combined = self.create_mos_abba_image(
            rinput, dict_rtas, reduced_mos_abba_combined_data, header_a,
            header_b, pattern, full_set, list_offsets, voffset_pix)

        # ds9 region files (to be saved in the work directory)
        if self.intermediate_results:
            save_four_ds9(list_rectwv_coeff[0])
            save_spectral_lines_ds9(list_rectwv_coeff[0])

        # compute median spectra employing the useful region of the
        # rectified image
        if self.intermediate_results:
            for imode, outfile in enumerate([
                    'median_spectra_full', 'median_spectra_slitlets',
                    'median_spectrum_slitlets'
            ]):
                median_image = median_slitlets_rectified(reduced_mos_abba,
                                                         mode=imode)
                self.save_intermediate_img(median_image, outfile + '.fits')

        # save results in results directory
        self.logger.info('end rect.+wavecal. reduction of ABBA spectra')
        result = self.create_result(
            reduced_mos_abba=reduced_mos_abba,
            reduced_mos_abba_combined=reduced_mos_abba_combined)
        return result
예제 #5
0
    def run(self, rinput):
        self.logger.info('starting reduction of stare spectra')

        self.logger.info(rinput.master_rectwv)

        # build object to proceed with bpm, bias, dark and flat
        flow = self.init_filters(rinput)

        # apply bpm, bias, dark and flat
        reduced_image = basic_processing_with_combination(rinput,
                                                          flow,
                                                          method=median)
        # update header with additional info
        hdr = reduced_image[0].header
        self.set_base_headers(hdr)

        # save intermediate image in work directory
        self.save_intermediate_img(reduced_image, 'reduced_image.fits')

        # rectification and wavelength calibration (if a model has
        # been provided)
        if rinput.master_rectwv:
            # RectWaveCoeff object with rectification and wavelength
            # calibration coefficients for the particular CSU configuration
            rectwv_coeff = rectwv_coeff_from_mos_library(
                reduced_image, rinput.master_rectwv)

            # apply rectification and wavelength calibration
            stare_image = apply_rectwv_coeff(reduced_image, rectwv_coeff)

            # save as JSON file in work directory
            self.save_structured_as_json(rectwv_coeff, 'rectwv_coeff.json')

            # ds9 region files (to be saved in the work directory)
            if self.intermediate_results:
                save_four_ds9(rectwv_coeff)
                save_spectral_lines_ds9(rectwv_coeff)

            # compute median spectra employing the useful region of the
            # rectified image
            if self.intermediate_results:
                for imode, outfile in enumerate([
                        'median_spectra_full', 'median_spectra_slitlets',
                        'median_spectrum_slitlets'
                ]):
                    median_image = median_slitlets_rectified(stare_image,
                                                             mode=imode)
                    self.save_intermediate_img(median_image, outfile + '.fits')

            # image_wl_calibrated = True

        else:

            stare_image = reduced_image

            self.logger.info('No wavelength calibration provided')
            grism_value = hdr.get('GRISM', 'unknown')
            self.logger.debug('GRISM is %s', grism_value)
            if grism_value.lower() == 'open':
                self.logger.debug('GRISM is %s, so this seems OK', grism_value)

            # image_wl_calibrated = False

        if rinput.master_sky:
            # Sky subtraction after rectification
            msky = rinput.master_sky.open()
            # Check if images have the same size.
            # if so, go ahead
            if msky[0].data.shape != stare_image[0].data.shape:
                self.logger.warning(
                    "sky and current image don't have the same shape")
            else:
                sky_corrector = proc.SkyCorrector(
                    msky[0].data,
                    datamodel=self.datamodel,
                    calibid=self.datamodel.get_imgid(msky))

                stare_image = sky_corrector(stare_image)
        else:
            self.logger.info('No sky image provided')

        # save results in results directory
        self.logger.info('end reduction of stare spectra')
        result = self.create_result(reduced_image=reduced_image,
                                    stare=stare_image)
        return result
예제 #6
0
    def run(self, rinput):
        self.logger.info('starting rect.+wavecal. reduction of stare spectra')

        self.logger.info(rinput.master_rectwv)
        self.logger.info('Wavelength calibration refinement mode: {}'.format(
            rinput.refine_wavecalib_mode))
        self.logger.info('Minimum slitlet width (mm)............: {}'.format(
            rinput.minimum_slitlet_width_mm))
        self.logger.info('Maximum slitlet width (mm)............: {}'.format(
            rinput.maximum_slitlet_width_mm))
        self.logger.info('Global offset X direction (pixels)....: {}'.format(
            rinput.global_integer_offset_x_pix))
        self.logger.info('Global offset Y direction (pixels)....: {}'.format(
            rinput.global_integer_offset_y_pix))

        # build object to proceed with bpm, bias, dark and flat
        flow = self.init_filters(rinput)

        # apply bpm, bias, dark and flat
        reduced_image = basic_processing_with_combination(rinput,
                                                          flow,
                                                          method=median)
        # update header with additional info
        hdr = reduced_image[0].header
        self.set_base_headers(hdr)

        # save intermediate image in work directory
        self.save_intermediate_img(reduced_image, 'reduced_image.fits')

        # RectWaveCoeff object with rectification and wavelength
        # calibration coefficients for the particular CSU configuration
        rectwv_coeff = rectwv_coeff_from_mos_library(reduced_image,
                                                     rinput.master_rectwv)

        # set global offsets
        rectwv_coeff.global_integer_offset_x_pix = \
            rinput.global_integer_offset_x_pix
        rectwv_coeff.global_integer_offset_y_pix = \
            rinput.global_integer_offset_y_pix

        # apply rectification and wavelength calibration
        reduced_mos = apply_rectwv_coeff(reduced_image, rectwv_coeff)

        # wavelength calibration refinement
        # 0 -> no refinement
        # 1 -> apply global offset to all the slitlets (using ARC lines)
        # 2 -> apply individual offset to each slitlet (using ARC lines)
        # 11 -> apply global offset to all the slitlets (using OH lines)
        # 12 -> apply individual offset to each slitlet (using OH lines)
        if rinput.refine_wavecalib_mode != 0:
            self.logger.info(
                'Refining wavelength calibration (mode={})'.format(
                    rinput.refine_wavecalib_mode))
            # refine RectWaveCoeff object
            rectwv_coeff, expected_catalog_lines = refine_rectwv_coeff(
                reduced_mos,
                rectwv_coeff,
                rinput.refine_wavecalib_mode,
                rinput.minimum_slitlet_width_mm,
                rinput.maximum_slitlet_width_mm,
                save_intermediate_results=self.intermediate_results)
            self.save_intermediate_img(expected_catalog_lines,
                                       'expected_catalog_lines.fits')
            # re-apply rectification and wavelength calibration
            reduced_mos = apply_rectwv_coeff(reduced_image, rectwv_coeff)

        # ds9 region files (to be saved in the work directory)
        if self.intermediate_results:
            save_four_ds9(rectwv_coeff)
            save_spectral_lines_ds9(rectwv_coeff)

        # compute median spectra employing the useful region of the
        # rectified image
        if self.intermediate_results:
            for imode, outfile in enumerate([
                    'median_spectra_full', 'median_spectra_slitlets',
                    'median_spectrum_slitlets'
            ]):
                median_image = median_slitlets_rectified(reduced_mos,
                                                         mode=imode)
                self.save_intermediate_img(median_image, outfile + '.fits')

        # save results in results directory
        self.logger.info('end rect.+wavecal. reduction of stare spectra')
        result = self.create_result(reduced_mos=reduced_mos,
                                    rectwv_coeff=rectwv_coeff)
        return result
예제 #7
0
파일: stare.py 프로젝트: bxy8804/pyemir
    def run(self, rinput):
        self.logger.info('starting rect.+wavecal. reduction of stare spectra')

        self.logger.info(rinput.master_rectwv)
        self.logger.info(
            'Wavelength calibration refinement mode....: {}'.format(
                rinput.refine_wavecalib_mode))
        self.logger.info(
            'Minimum slitlet width (mm)................: {}'.format(
                rinput.minimum_slitlet_width_mm))
        self.logger.info(
            'Maximum slitlet width (mm)................: {}'.format(
                rinput.maximum_slitlet_width_mm))
        self.logger.info(
            'Global integer offsets mode...............: {}'.format(
                rinput.global_integer_offsets_mode))
        self.logger.info(
            'Global integer offset X direction (pixels): {}'.format(
                rinput.global_integer_offset_x_pix))
        self.logger.info(
            'Global integer offset Y direction (pixels): {}'.format(
                rinput.global_integer_offset_y_pix))

        # build object to proceed with bpm, bias, dark and flat
        flow = self.init_filters(rinput)

        # apply bpm, bias, dark and flat
        reduced_image = basic_processing_with_combination(rinput,
                                                          flow,
                                                          method=sigmaclip)
        # update header with additional info
        hdr = reduced_image[0].header
        self.set_base_headers(hdr)

        # save intermediate image in work directory
        self.save_intermediate_img(reduced_image, 'reduced_image.fits')

        # RectWaveCoeff object with rectification and wavelength
        # calibration coefficients for the particular CSU configuration
        rectwv_coeff = rectwv_coeff_from_mos_library(reduced_image,
                                                     rinput.master_rectwv)

        # wavelength calibration refinement
        # 0 -> no refinement
        # 1 -> apply global offset to all the slitlets (using ARC lines)
        # 2 -> apply individual offset to each slitlet (using ARC lines)
        # 11 -> apply global offset to all the slitlets (using OH lines)
        # 12 -> apply individual offset to each slitlet (using OH lines)
        if rinput.refine_wavecalib_mode != 0:
            main_header = reduced_image[0].header

            # determine useful slitlets
            csu_config = CsuConfiguration.define_from_header(main_header)
            # segregate slitlets
            list_useful_slitlets = csu_config.widths_in_range_mm(
                minwidth=rinput.minimum_slitlet_width_mm,
                maxwidth=rinput.maximum_slitlet_width_mm)
            # remove missing slitlets
            if len(rectwv_coeff.missing_slitlets) > 0:
                for iremove in rectwv_coeff.missing_slitlets:
                    if iremove in list_useful_slitlets:
                        list_useful_slitlets.remove(iremove)

            list_not_useful_slitlets = [
                i for i in list(range(1, EMIR_NBARS + 1))
                if i not in list_useful_slitlets
            ]
            self.logger.info(
                'list of useful slitlets: {}'.format(list_useful_slitlets))
            self.logger.info('list of unusable slitlets: {}'.format(
                list_not_useful_slitlets))

            # retrieve arc/OH lines
            catlines_all_wave, catlines_all_flux = retrieve_catlines(
                rinput.refine_wavecalib_mode, main_header['grism'])

            # global integer offsets
            if rinput.global_integer_offsets_mode == 'auto':
                if (rinput.global_integer_offset_x_pix != 0) or \
                        (rinput.global_integer_offset_y_pix != 0):
                    raise ValueError('Global integer offsets must be zero when'
                                     ' mode=auto')

                # ToDo: include additional airglow emission lines

                self.logger.info('computing synthetic image')
                # generate synthetic image
                synthetic_raw_data = synthetic_lines_rawdata(
                    catlines_all_wave, catlines_all_flux, list_useful_slitlets,
                    rectwv_coeff)
                synthetic_raw_header = main_header.copy()
                synthetic_raw_header['DATE-OBS'] = \
                    datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
                chistory = 'Synthetic image'
                synthetic_raw_header.add_history(chistory)
                hdu = fits.PrimaryHDU(synthetic_raw_data.astype('float32'),
                                      header=synthetic_raw_header)
                synthetic_raw_image = fits.HDUList([hdu])
                if self.intermediate_results:
                    self.save_intermediate_img(synthetic_raw_image,
                                               'synthetic_raw_image.fits')

                # cross-correlation to determine global integer offsets
                # (rescaling data arrays to [0, 1] before using skimage
                # function)
                data1_rs, coef1_rs = rescale_array_to_z1z2(
                    reduced_image[0].data, (0, 1))
                data2_rs, coef2_rs = rescale_array_to_z1z2(
                    synthetic_raw_data, (0, 1))
                shifts, error, diffphase = register_translation(
                    data1_rs, data2_rs, 100)
                self.logger.info(
                    'global_float_offset_x_pix..: {}'.format(-shifts[1]))
                self.logger.info(
                    'global_float_offset_y_pix..: {}'.format(-shifts[0]))
                rectwv_coeff.global_integer_offset_x_pix = \
                    -int(round(shifts[1]))
                rectwv_coeff.global_integer_offset_y_pix = \
                    -int(round(shifts[0]))
                self.logger.info('global_integer_offset_x_pix: {}'.format(
                    rectwv_coeff.global_integer_offset_x_pix))
                self.logger.info('global_integer_offset_y_pix: {}'.format(
                    rectwv_coeff.global_integer_offset_y_pix))
                if self.intermediate_results:
                    data_product = np.fft.fft2(data1_rs) * \
                                   np.fft.fft2(data2_rs).conj()
                    cc_image = np.fft.fftshift(np.fft.ifft2(data_product))
                    power = np.log10(cc_image.real)
                    hdu_power = fits.PrimaryHDU(power)
                    hdul_power = fits.HDUList([hdu_power])
                    hdul_power.writeto('power.fits', overwrite=True)
            else:
                rectwv_coeff.global_integer_offset_x_pix = \
                    rinput.global_integer_offset_x_pix
                rectwv_coeff.global_integer_offset_y_pix = \
                    rinput.global_integer_offset_y_pix

            # apply initial rectification and wavelength calibration
            reduced_mos = apply_rectwv_coeff(reduced_image, rectwv_coeff)

            self.logger.info(
                'Refining wavelength calibration (mode={})'.format(
                    rinput.refine_wavecalib_mode))
            # refine RectWaveCoeff object
            rectwv_coeff, expected_catalog_lines = refine_rectwv_coeff(
                reduced_mos,
                rectwv_coeff,
                catlines_all_wave,
                catlines_all_flux,
                rinput.refine_wavecalib_mode,
                list_useful_slitlets,
                save_intermediate_results=self.intermediate_results)
            self.save_intermediate_img(expected_catalog_lines,
                                       'expected_catalog_lines.fits')

        # apply rectification and wavelength calibration
        reduced_mos = apply_rectwv_coeff(reduced_image, rectwv_coeff)

        # ds9 region files (to be saved in the work directory)
        if self.intermediate_results:
            save_four_ds9(rectwv_coeff)
            save_spectral_lines_ds9(rectwv_coeff)

        # compute median spectra employing the useful region of the
        # rectified image
        if self.intermediate_results:
            for imode, outfile in enumerate([
                    'median_spectra_full', 'median_spectra_slitlets',
                    'median_spectrum_slitlets'
            ]):
                median_image = median_slitlets_rectified(reduced_mos,
                                                         mode=imode)
                self.save_intermediate_img(median_image, outfile + '.fits')

        # save results in results directory
        self.logger.info('end rect.+wavecal. reduction of stare spectra')
        result = self.create_result(reduced_mos=reduced_mos,
                                    rectwv_coeff=rectwv_coeff)
        return result
예제 #8
0
def refine_rectwv_coeff(input_image, rectwv_coeff,
                        refine_wavecalib_mode,
                        minimum_slitlet_width_mm,
                        maximum_slitlet_width_mm,
                        save_intermediate_results=False,
                        debugplot=0):
    """Refine RectWaveCoeff object using a catalogue of lines

    One and only one among refine_with_oh_lines_mode and
    refine_with_arc_lines must be different from zero.

    Parameters
    ----------
    input_image : HDUList object
        Input 2D image.
    rectwv_coeff : RectWaveCoeff instance
        Rectification and wavelength calibration coefficients for the
        particular CSU configuration.
    refine_wavecalib_mode : int
        Integer, indicating the type of refinement:
        0 : no refinement
        1 : apply the same global offset to all the slitlets (using ARC lines)
        2 : apply individual offset to each slitlet (using ARC lines)
        11 : apply the same global offset to all the slitlets (using OH lines)
        12 : apply individual offset to each slitlet (using OH lines)
    minimum_slitlet_width_mm : float
        Minimum slitlet width (mm) for a valid slitlet.
    maximum_slitlet_width_mm : float
        Maximum slitlet width (mm) for a valid slitlet.
    save_intermediate_results : bool
        If True, save plots in PDF files
    debugplot : int
        Determines whether intermediate computations and/or plots
        are displayed. The valid codes are defined in
        numina.array.display.pause_debugplot.

    Returns
    -------
    refined_rectwv_coeff : RectWaveCoeff instance
        Refined rectification and wavelength calibration coefficients
        for the particular CSU configuration.
    expected_cat_image : HDUList object
        Output 2D image with the expected catalogued lines.

    """

    logger = logging.getLogger(__name__)

    if save_intermediate_results:
        from matplotlib.backends.backend_pdf import PdfPages
        pdf = PdfPages('crosscorrelation.pdf')
    else:
        pdf = None

    # image header
    main_header = input_image[0].header
    filter_name = main_header['filter']
    grism_name = main_header['grism']

    # protections
    if refine_wavecalib_mode not in [1, 2, 11, 12]:
        logger.error('Wavelength calibration refinemente mode={}'. format(
            refine_wavecalib_mode
        ))
        raise ValueError("Invalid wavelength calibration refinement mode")

    # read tabulated lines
    if refine_wavecalib_mode in [1, 2]:        # ARC lines
        if grism_name == 'LR':
            catlines_file = 'lines_argon_neon_xenon_empirical_LR.dat'
        else:
            catlines_file = 'lines_argon_neon_xenon_empirical.dat'
        dumdata = pkgutil.get_data('emirdrp.instrument.configs', catlines_file)
        arc_lines_tmpfile = StringIO(dumdata.decode('utf8'))
        catlines = np.genfromtxt(arc_lines_tmpfile)
        # define wavelength and flux as separate arrays
        catlines_all_wave = catlines[:, 0]
        catlines_all_flux = catlines[:, 1]
        mode = refine_wavecalib_mode
    elif refine_wavecalib_mode in [11, 12]:    # OH lines
        dumdata = pkgutil.get_data(
            'emirdrp.instrument.configs',
            'Oliva_etal_2013.dat'
        )
        oh_lines_tmpfile = StringIO(dumdata.decode('utf8'))
        catlines = np.genfromtxt(oh_lines_tmpfile)
        # define wavelength and flux as separate arrays
        catlines_all_wave = np.concatenate((catlines[:, 1], catlines[:, 0]))
        catlines_all_flux = np.concatenate((catlines[:, 2], catlines[:, 2]))
        mode = refine_wavecalib_mode - 10
    else:
        raise ValueError('Unexpected mode={}'.format(refine_wavecalib_mode))

    # initialize output
    refined_rectwv_coeff = deepcopy(rectwv_coeff)

    logger.info('Computing median spectrum')
    # compute median spectrum and normalize it
    sp_median = median_slitlets_rectified(
        input_image,
        mode=2,
        minimum_slitlet_width_mm=minimum_slitlet_width_mm,
        maximum_slitlet_width_mm=maximum_slitlet_width_mm
    )[0].data
    sp_median /= sp_median.max()

    # determine minimum and maximum useful wavelength
    jmin, jmax = find_pix_borders(sp_median, 0)
    naxis1 = main_header['naxis1']
    naxis2 = main_header['naxis2']
    crpix1 = main_header['crpix1']
    crval1 = main_header['crval1']
    cdelt1 = main_header['cdelt1']
    xwave = crval1 + (np.arange(naxis1) + 1.0 - crpix1) * cdelt1
    if grism_name == 'LR':
        wv_parameters = set_wv_parameters(filter_name, grism_name)
        wave_min = wv_parameters['wvmin_useful']
        wave_max = wv_parameters['wvmax_useful']
    else:
        wave_min = crval1 + (jmin + 1 - crpix1) * cdelt1
        wave_max = crval1 + (jmax + 1 - crpix1) * cdelt1
    logger.info('Setting wave_min to {}'.format(wave_min))
    logger.info('Setting wave_max to {}'.format(wave_max))

    # extract subset of catalogue lines within current wavelength range
    lok1 = catlines_all_wave >= wave_min
    lok2 = catlines_all_wave <= wave_max
    catlines_reference_wave = catlines_all_wave[lok1*lok2]
    catlines_reference_flux = catlines_all_flux[lok1*lok2]
    catlines_reference_flux /= catlines_reference_flux.max()

    # estimate sigma to broaden catalogue lines
    csu_config = CsuConfiguration.define_from_header(main_header)
    # segregate slitlets
    list_useful_slitlets = csu_config.widths_in_range_mm(
        minwidth=minimum_slitlet_width_mm,
        maxwidth=maximum_slitlet_width_mm
    )
    # remove missing slitlets
    if len(refined_rectwv_coeff.missing_slitlets) > 0:
        for iremove in refined_rectwv_coeff.missing_slitlets:
            if iremove in list_useful_slitlets:
                list_useful_slitlets.remove(iremove)

    list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1))
                                if i not in list_useful_slitlets]
    logger.info('list of useful slitlets: {}'.format(
        list_useful_slitlets))
    logger.info('list of not useful slitlets: {}'.format(
        list_not_useful_slitlets))
    tempwidths = np.array([csu_config.csu_bar_slit_width(islitlet)
                           for islitlet in list_useful_slitlets])
    widths_summary = summary(tempwidths)
    logger.info('Statistics of useful slitlet widths (mm):')
    logger.info('- npoints....: {0:d}'.format(widths_summary['npoints']))
    logger.info('- mean.......: {0:7.3f}'.format(widths_summary['mean']))
    logger.info('- median.....: {0:7.3f}'.format(widths_summary['median']))
    logger.info('- std........: {0:7.3f}'.format(widths_summary['std']))
    logger.info('- robust_std.: {0:7.3f}'.format(widths_summary['robust_std']))
    # empirical transformation of slit width (mm) to pixels
    sigma_broadening = cdelt1 * widths_summary['median']

    # convolve location of catalogue lines to generate expected spectrum
    xwave_reference, sp_reference = convolve_comb_lines(
        catlines_reference_wave, catlines_reference_flux, sigma_broadening,
        crpix1, crval1, cdelt1, naxis1
    )
    sp_reference /= sp_reference.max()

    # generate image2d with expected lines
    image2d_expected_lines = np.tile(sp_reference, (naxis2, 1))
    hdu = fits.PrimaryHDU(data=image2d_expected_lines, header=main_header)
    expected_cat_image = fits.HDUList([hdu])

    if (abs(debugplot) % 10 != 0) or (pdf is not None):
        ax = ximplotxy(xwave, sp_median, 'C1-',
                       xlabel='Wavelength (Angstroms, in vacuum)',
                       ylabel='Normalized number of counts',
                       title='Median spectrum',
                       label='observed spectrum', show=False)
        # overplot reference catalogue lines
        ax.stem(catlines_reference_wave, catlines_reference_flux, 'C4-',
                markerfmt=' ', basefmt='C4-', label='tabulated lines')
        # overplot convolved reference lines
        ax.plot(xwave_reference, sp_reference, 'C0-',
                label='expected spectrum')
        ax.legend()
        if pdf is not None:
            pdf.savefig()
        else:
            pause_debugplot(debugplot=debugplot, pltshow=True)

    # compute baseline signal in sp_median
    baseline = np.percentile(sp_median[sp_median > 0], q=10)
    if (abs(debugplot) % 10 != 0) or (pdf is not None):
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.hist(sp_median, bins=1000, log=True)
        ax.set_xlabel('Normalized number of counts')
        ax.set_ylabel('Number of pixels')
        ax.set_title('Median spectrum')
        ax.axvline(float(baseline), linestyle='--', color='grey')
        if pdf is not None:
            pdf.savefig()
        else:
            geometry = (0, 0, 640, 480)
            set_window_geometry(geometry)
            plt.show()
    # subtract baseline to sp_median (only pixels with signal above zero)
    lok = np.where(sp_median > 0)
    sp_median[lok] -= baseline

    # compute global offset through periodic correlation
    logger.info('Computing global offset')
    global_offset, fpeak = periodic_corr1d(
        sp_reference=sp_reference,
        sp_offset=sp_median,
        fminmax=None,
        naround_zero=50,
        plottitle='Median spectrum (cross-correlation)',
        pdf=pdf,
        debugplot=debugplot
    )
    logger.info('Global offset: {} pixels'.format(-global_offset))

    missing_slitlets = rectwv_coeff.missing_slitlets

    if mode == 1:
        # apply computed offset to obtain refined_rectwv_coeff_global
        for islitlet in range(1, EMIR_NBARS + 1):
            if islitlet not in missing_slitlets:
                i = islitlet - 1
                dumdict = refined_rectwv_coeff.contents[i]
                dumdict['wpoly_coeff'][0] -= global_offset*cdelt1

    elif mode == 2:
        # compute individual offset for each slitlet
        logger.info('Computing individual offsets')
        median_55sp = median_slitlets_rectified(input_image, mode=1)
        offset_array = np.zeros(EMIR_NBARS)
        xplot = []
        yplot = []
        xplot_skipped = []
        yplot_skipped = []
        cout = '0'
        for islitlet in range(1, EMIR_NBARS + 1):
            if islitlet in list_useful_slitlets:
                i = islitlet - 1
                sp_median = median_55sp[0].data[i, :]
                lok = np.where(sp_median > 0)
                if np.any(lok):
                    baseline = np.percentile(sp_median[lok], q=10)
                    sp_median[lok] -= baseline
                    sp_median /= sp_median.max()
                    offset_array[i], fpeak = periodic_corr1d(
                        sp_reference=sp_reference,
                        sp_offset=median_55sp[0].data[i, :],
                        fminmax=None,
                        naround_zero=50,
                        plottitle='slitlet #{0} (cross-correlation)'.format(
                            islitlet),
                        pdf=pdf,
                        debugplot=debugplot
                    )
                else:
                    offset_array[i] = 0.0
                dumdict = refined_rectwv_coeff.contents[i]
                dumdict['wpoly_coeff'][0] -= offset_array[i]*cdelt1
                xplot.append(islitlet)
                yplot.append(-offset_array[i])
                # second correction
                wpoly_coeff_refined = check_wlcalib_sp(
                    sp=median_55sp[0].data[i, :],
                    crpix1=crpix1,
                    crval1=crval1-offset_array[i]*cdelt1,
                    cdelt1=cdelt1,
                    wv_master=catlines_reference_wave,
                    coeff_ini=dumdict['wpoly_coeff'],
                    naxis1_ini=EMIR_NAXIS1,
                    title='slitlet #{0} (after applying offset)'.format(
                        islitlet),
                    ylogscale=False,
                    pdf=pdf,
                    debugplot=debugplot
                )
                dumdict['wpoly_coeff'] = wpoly_coeff_refined
                cout += '.'

            else:
                xplot_skipped.append(islitlet)
                yplot_skipped.append(0)
                cout += 'i'

            if islitlet % 10 == 0:
                if cout != 'i':
                    cout = str(islitlet // 10)

            logger.info(cout)

        # show offsets with opposite sign
        stat_summary = summary(np.array(yplot))
        logger.info('Statistics of individual slitlet offsets (pixels):')
        logger.info('- npoints....: {0:d}'.format(stat_summary['npoints']))
        logger.info('- mean.......: {0:7.3f}'.format(stat_summary['mean']))
        logger.info('- median.....: {0:7.3f}'.format(stat_summary['median']))
        logger.info('- std........: {0:7.3f}'.format(stat_summary['std']))
        logger.info('- robust_std.: {0:7.3f}'.format(stat_summary[
                                                        'robust_std']))
        if (abs(debugplot) % 10 != 0) or (pdf is not None):
            ax = ximplotxy(xplot, yplot,
                           linestyle='', marker='o', color='C0',
                           xlabel='slitlet number',
                           ylabel='-offset (pixels) = offset to be applied',
                           title='cross-correlation result',
                           show=False, **{'label': 'individual slitlets'})
            if len(xplot_skipped) > 0:
                ax.plot(xplot_skipped, yplot_skipped, 'mx')
            ax.axhline(-global_offset, linestyle='--', color='C1',
                       label='global offset')
            ax.legend()
            if pdf is not None:
                pdf.savefig()
            else:
                pause_debugplot(debugplot=debugplot, pltshow=True)
    else:
        raise ValueError('Unexpected mode={}'.format(mode))

    # close output PDF file
    if pdf is not None:
        pdf.close()

    # return result
    return refined_rectwv_coeff, expected_cat_image