def test_log_to_list_origin2(): with log.log_to_list(filter_origin='astropy.wcs') as log_list: log.error("Error message") log.warning("Warning message") assert len(log_list) == 0
def test_log_to_list_level(): with log.log_to_list(filter_level='ERROR') as log_list: log.error("Error message") log.warning("Warning message") assert len(log_list) == 1 and log_list[0].levelname == 'ERROR'
def test_infol_logged_if_unit_in_fits_header(ccd_data, tmpdir): tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: ccd_from_disk = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message
def test_warnings_logging(): # Without warnings logging with catch_warnings() as warn_list: with log.log_to_list() as log_list: warnings.warn("This is a warning", AstropyUserWarning) assert len(log_list) == 0 assert len(warn_list) == 1 assert warn_list[0].message.args[0] == "This is a warning" # With warnings logging with catch_warnings() as warn_list: log.enable_warnings_logging() with log.log_to_list() as log_list: warnings.warn("This is a warning", AstropyUserWarning) log.disable_warnings_logging() assert len(log_list) == 1 assert len(warn_list) == 0 assert log_list[0].levelname == 'WARNING' assert log_list[0].message.startswith('This is a warning') assert log_list[0].origin == 'astropy.tests.test_logger' # With warnings logging (differentiate between Astropy and non-Astropy) with catch_warnings() as warn_list: log.enable_warnings_logging() with log.log_to_list() as log_list: warnings.warn("This is a warning", AstropyUserWarning) warnings.warn("This is another warning, not from Astropy") log.disable_warnings_logging() assert len(log_list) == 1 assert len(warn_list) == 1 assert log_list[0].levelname == 'WARNING' assert log_list[0].message.startswith('This is a warning') assert log_list[0].origin == 'astropy.tests.test_logger' assert warn_list[0].message.args[ 0] == "This is another warning, not from Astropy" # Without warnings logging with catch_warnings() as warn_list: with log.log_to_list() as log_list: warnings.warn("This is a warning", AstropyUserWarning) assert len(log_list) == 0 assert len(warn_list) == 1 assert warn_list[0].message.args[0] == "This is a warning"
def test_warnings_logging(): # Without warnings logging with catch_warnings() as warn_list: with log.log_to_list() as log_list: warnings.warn("This is a warning", AstropyUserWarning) assert len(log_list) == 0 assert len(warn_list) == 1 assert warn_list[0].message.args[0] == "This is a warning" # With warnings logging with catch_warnings() as warn_list: log.enable_warnings_logging() with log.log_to_list() as log_list: warnings.warn("This is a warning", AstropyUserWarning) log.disable_warnings_logging() assert len(log_list) == 1 assert len(warn_list) == 0 assert log_list[0].levelname == 'WARNING' assert log_list[0].message.startswith('This is a warning') assert log_list[0].origin == 'astropy.tests.test_logger' # With warnings logging (differentiate between Astropy and non-Astropy) with catch_warnings() as warn_list: log.enable_warnings_logging() with log.log_to_list() as log_list: warnings.warn("This is a warning", AstropyUserWarning) warnings.warn("This is another warning, not from Astropy") log.disable_warnings_logging() assert len(log_list) == 1 assert len(warn_list) == 1 assert log_list[0].levelname == 'WARNING' assert log_list[0].message.startswith('This is a warning') assert log_list[0].origin == 'astropy.tests.test_logger' assert warn_list[0].message.args[0] == "This is another warning, not from Astropy" # Without warnings logging with catch_warnings() as warn_list: with log.log_to_list() as log_list: warnings.warn("This is a warning", AstropyUserWarning) assert len(log_list) == 0 assert len(warn_list) == 1 assert warn_list[0].message.args[0] == "This is a warning"
def test_exception_logging(): # Without exception logging try: with log.log_to_list() as log_list: raise Exception("This is an Exception") except Exception as exc: sys.excepthook(*sys.exc_info()) assert exc.args[0] == "This is an Exception" else: assert False # exception should have been raised assert len(log_list) == 0 # With exception logging try: log.enable_exception_logging() with log.log_to_list() as log_list: raise Exception("This is an Exception") except Exception as exc: sys.excepthook(*sys.exc_info()) assert exc.args[0] == "This is an Exception" else: assert False # exception should have been raised assert len(log_list) == 1 assert log_list[0].levelname == 'ERROR' assert log_list[0].message.startswith('Exception: This is an Exception') assert log_list[0].origin == 'astropy.tests.test_logger' # Without exception logging log.disable_exception_logging() try: with log.log_to_list() as log_list: raise Exception("This is an Exception") except Exception as exc: sys.excepthook(*sys.exc_info()) assert exc.args[0] == "This is an Exception" else: assert False # exception should have been raised assert len(log_list) == 0
def test_log_to_list(level): orig_level = log.level try: if level is not None: log.setLevel(level) with log.log_to_list() as log_list: log.error("Error message") log.warning("Warning message") log.info("Information message") log.debug("Debug message") finally: log.setLevel(orig_level) if level is None: # The log level *should* be set to whatever it was in the config level = conf.log_level # Check list length if level == 'DEBUG': assert len(log_list) == 4 elif level == 'INFO': assert len(log_list) == 3 elif level == 'WARN': assert len(log_list) == 2 elif level == 'ERROR': assert len(log_list) == 1 # Check list content assert log_list[0].levelname == 'ERROR' assert log_list[0].message.startswith('Error message') assert log_list[0].origin == 'astropy.tests.test_logger' if len(log_list) >= 2: assert log_list[1].levelname == 'WARNING' assert log_list[1].message.startswith('Warning message') assert log_list[1].origin == 'astropy.tests.test_logger' if len(log_list) >= 3: assert log_list[2].levelname == 'INFO' assert log_list[2].message.startswith('Information message') assert log_list[2].origin == 'astropy.tests.test_logger' if len(log_list) >= 4: assert log_list[3].levelname == 'DEBUG' assert log_list[3].message.startswith('Debug message') assert log_list[3].origin == 'astropy.tests.test_logger'
def test_warnings_logging_with_custom_class(): class CustomAstropyWarningClass(AstropyWarning): pass # With warnings logging with catch_warnings() as warn_list: log.enable_warnings_logging() with log.log_to_list() as log_list: warnings.warn("This is a warning", CustomAstropyWarningClass) log.disable_warnings_logging() assert len(log_list) == 1 assert len(warn_list) == 0 assert log_list[0].levelname == 'WARNING' assert log_list[0].message.startswith('CustomAstropyWarningClass: This is a warning') assert log_list[0].origin == 'astropy.tests.test_logger'
def test_warning_logging_with_io_votable_warning(): from astropy.io.votable.exceptions import W02, vo_warn with pytest.warns(None) as warn_list: log.enable_warnings_logging() with log.log_to_list() as log_list: vo_warn(W02, ('a', 'b')) log.disable_warnings_logging() assert len(log_list) == 1 assert len(warn_list) == 0 assert log_list[0].levelname == 'WARNING' x = log_list[0].message.startswith("W02: ?:?:?: W02: a attribute 'b' is " "invalid. Must be a standard XML id") assert x assert log_list[0].origin == 'astropy.tests.test_logger'
def test_warnings_logging_with_custom_class(): class CustomAstropyWarningClass(AstropyWarning): pass # With warnings logging with pytest.warns(None) as warn_list: log.enable_warnings_logging() with log.log_to_list() as log_list: warnings.warn("This is a warning", CustomAstropyWarningClass) log.disable_warnings_logging() assert len(log_list) == 1 assert len(warn_list) == 0 assert log_list[0].levelname == 'WARNING' assert log_list[0].message.startswith('CustomAstropyWarningClass: This is a warning') assert log_list[0].origin == 'astropy.tests.test_logger'
def test_warning_logging_with_io_votable_warning(): from astropy.io.votable.exceptions import W02, vo_warn with catch_warnings() as warn_list: log.enable_warnings_logging() with log.log_to_list() as log_list: vo_warn(W02, ('a', 'b')) log.disable_warnings_logging() assert len(log_list) == 1 assert len(warn_list) == 0 assert log_list[0].levelname == 'WARNING' x = log_list[0].message.startswith(("W02: ?:?:?: W02: a attribute 'b' is " "invalid. Must be a standard XML id")) assert x assert log_list[0].origin == 'astropy.tests.test_logger'
def test_exception_logging_argless_exception(): """ Regression test for a crash that occurred on Python 3 when logging an exception that was instantiated with no arguments (no message, etc.) Regression test for https://github.com/astropy/astropy/pull/4056 """ try: log.enable_exception_logging() with log.log_to_list() as log_list: raise Exception() except Exception: sys.excepthook(*sys.exc_info()) else: assert False # exception should have been raised assert len(log_list) == 1 assert log_list[0].levelname == 'ERROR' assert log_list[0].message == 'Exception [astropy.tests.test_logger]' assert log_list[0].origin == 'astropy.tests.test_logger'
def test_exception_logging_argless_exception(): """ Regression test for a crash that occurred on Python 3 when logging an exception that was instantiated with no arguments (no message, etc.) Regression test for https://github.com/astropy/astropy/pull/4056 """ try: log.enable_exception_logging() with log.log_to_list() as log_list: raise Exception() except Exception as exc: sys.excepthook(*sys.exc_info()) else: assert False # exception should have been raised assert len(log_list) == 1 assert log_list[0].levelname == 'ERROR' assert log_list[0].message == 'Exception [astropy.tests.test_logger]' assert log_list[0].origin == 'astropy.tests.test_logger'
def test_exception_logging_origin(): # The point here is to get an exception raised from another location # and make sure the error's origin is reported correctly from astropy.utils.collections import HomogeneousList l = HomogeneousList(int) try: log.enable_exception_logging() with log.log_to_list() as log_list: l.append('foo') except TypeError as exc: sys.excepthook(*sys.exc_info()) assert exc.args[0].startswith( "homogeneous list must contain only objects of type ") else: assert False assert len(log_list) == 1 assert log_list[0].levelname == 'ERROR' assert log_list[0].message.startswith( "TypeError: homogeneous list must contain only objects of type ") assert log_list[0].origin == 'astropy.utils.collections'
def test_exception_logging_origin(): # The point here is to get an exception raised from another location # and make sure the error's origin is reported correctly from astropy.utils.collections import HomogeneousList l = HomogeneousList(int) # noqa try: log.enable_exception_logging() with log.log_to_list() as log_list: l.append('foo') except TypeError as exc: sys.excepthook(*sys.exc_info()) assert exc.args[0].startswith( "homogeneous list must contain only objects of type ") else: assert False assert len(log_list) == 1 assert log_list[0].levelname == 'ERROR' assert log_list[0].message.startswith( "TypeError: homogeneous list must contain only objects of type ") assert log_list[0].origin == 'astropy.utils.collections'
def line_fitter(self, linecat, line_idx, niter,\ input_resid_level, max_contorder, max_ladjust, adjust_preference,\ input_continuum_deviation, llimits, max_exclusion_level, blends,\ autoadjust, fwhm_block): ''' This is module fits the spectral lines using pyspeckit. It automatically determines the goodness of fit and decides the best solution for the line and continuum fit. It handles blended lines in a way a maximum lines ratio can not be exceeded to not make the weaker blend the dominant lines. Additionally, the limits in wavelength range in iteration :math:`n` can be adjusted automatically based on iteration :math:`n` to accomodate for larger wavelength shifts. In this way it is possible to fit spectral lines using a line catalog as only input. Args: linecat : :func:`numpy.array` Array with the spectral lines and their wavelengths line_idx : :obj:`str` Name of the primary line niter : :obj:`int` Number of iterations input_resid_level : :obj:`float` The maximum MAD for the fit residuals for a succesfull fit max_contorder : :obj:`int` The maximum polynominal order of the continuum to have max_ladjust : :obj:`str` The maximum number of wavelength range adjustments in steps of 5 Angstrom adjust_preference : :obj:`str` contorder: continuum order is adjusted first wavelength: wavelength range is adjusted first input_continuum_deviation : :obj:`float` by how much the continuum is allowed to deviate from a running median estimate. This is set to prevent lines mimicking a continuum llimits : :obj:`list` the limits for the wavelength fit as set in ``ppxf`` max_exclusion_level : :obj:`float` The exclusion level for lines to be excluded from the next baseline estimate as set in ``pyspeckit`` blends : :obj:`ascii`-file or :obj:`None` A file with primary lines that contain blends to provide a maximum amplitude ratio of the primary and the blend to prevent that the blend becomes the dominant line in the fit. autoadjust : :obj:`bool` :obj:`True`: the wavelength limits ``llimit`` will be adjusted to the fit of the previous iteration. All other wavelength range are adjusted accordingly taking into account the proper velocity corrected shift :math:`\Delta \lambda/\lambda`. This is especially important to detect hyper-velocity stars. :obj:`False`: no adjustment to the limits done fwhm_block : :obj:`bool:obj:` :obj:`True`: The minimum fwhm of the voigt profiles of the fitted lines is the instrument's dispersion :obj:`False`: The minimum fwhm of the voigt profiles of the fitted lines is zero Returns: line_idx : :obj:`str` Name of the primary line temp_l : :obj:`float` fitted wavelength of the primary line temp_a : :obj:`float` fitted amplitude of the primary line temp_sl : :obj:`float` fitted Lorentzian gamma of the primary line temp_sg : :obj:`float` fitted Gaussian sigma of the primary line spec_select_idx : :func:`numpy.array` indices of the used part of the spectrum template_f : :func:`numpy.array` template spectrum shifted to the rest-frame continuum : :func:`numpy.array` the fitted continuum lstart : :obj:`float` first used wavelength bin (might differ from input if it was adjusted during fitting) lend : :obj:`float` last used wavelength bin (might differ from input if it was adjusted during fitting) contorder : :obj:`int` The order of the polynominal used for the continuum fit_f : array the fitted spectrum significance : :obj:`float` the line strength over the continuum fit_failed : :obj:`bool` :obj:`True`: if the fit failed for some reason. This line will be excluded from further analyses fit_f_highres : :obj:`float` the fitted spectrum and the oversampling resolution spec_select_idx_highres : :func:`numpy.array` indices of the used part of the over sampled spectrum template_f_highres : :func:`numpy.array` over sampled template spectrum shifted to the rest-frame continuum_highres : :func:`numpy.array` the fitted over sampled continuum ''' self.logger.info('Started line ' + line_idx) resid_level = 5. std_resid = resid_level + 1. continuum_dev = input_continuum_deviation[line_idx] + 1. lstart = float(self.cat.loc[line_idx, 'l_start']) lend = float(self.cat.loc[line_idx, 'l_end']) contorder = self.cat.loc[line_idx, 'cont_order'] n_ladjust = 0 max_cont_reached = False max_n_ladjust_reached = False significant = True fit_failed = False niter += 1 # Iteration 1 are setting the intial guesses while (std_resid > resid_level or np.isnan(std_resid)\ or continuum_dev > input_continuum_deviation[line_idx]\ or np.isnan(continuum_dev)) and not fit_failed: lines_select = linecat[np.where((linecat >= lstart)\ & (linecat < lend))] spec_select_idx\ = np.where((self.spec_lambda >= lstart) & (self.spec_lambda < lend)) spec_select_idx_highres\ = np.where((self.spec_lambda_highres >= lstart) &\ (self.spec_lambda_highres < lend)) linefit_guess, linefit_limits, linefit_limited =\ initial_guesses(self, lines_select, blends, llimits=llimits) exponent\ = int(np.log10(np.nanmedian(self.spec_f[spec_select_idx])) - 4.) factor = float(10**(exponent * (-1))) template_f = np.zeros_like(self.spec_lambda, dtype=float) fit_f = np.zeros_like(self.spec_lambda, dtype=float) temp_f = np.array(self.spec_f[spec_select_idx] * factor, dtype=float) temp_err = np.array(self.spec_err[spec_select_idx] * factor,\ dtype=float) temp_lambda = np.array(self.spec_lambda[spec_select_idx], dtype=float) continuum = np.zeros_like(self.spec_lambda, dtype=float) template_f_highres\ = np.zeros_like(self.spec_lambda_highres, dtype=float) fit_f_highres = np.zeros_like(self.spec_lambda_highres, dtype=float) temp_lambda_highres\ = np.array(self.spec_lambda_highres[spec_select_idx_highres],\ dtype=float) continuum_highres\ = np.zeros_like(self.spec_lambda_highres, dtype=float) if input_resid_level == None: resid_level = np.std(temp_err / np.max(temp_err)) else: resid_level = input_resid_level sp = pyspeckit.Spectrum(data=temp_f, error=temp_err, xarr=temp_lambda,\ unit=r'flux [$10^{' + str(exponent)\ + '}$ erg$^{-1}$ s$^{-1}$ cm$^{-2}$ '\ + '$\AA^{-1}$]', header={}) sp.xarr.set_unit(u.AA) sp.xarr.xtype = 'wavelength' if self.loglevel == "DEBUG": plt.ion() sp.plotter(linewidth=2, title=line_idx) exclusion_level = 0.01 iterations = 0 pat = [] while iterations < niter: if iterations == 1: newparinfo = update_parinfo(self, linefit_guess, \ llimits, line_idx, blends, sp.specfit.parinfo, False, False) if iterations > 1: newparinfo = update_parinfo(self, linefit_guess,\ llimits, line_idx, blends, sp.specfit.parinfo,\ autoadjust, fwhm_block) with log.log_to_list() as log_list: sp.baseline(order=int(contorder), plot_baseline=False,\ subtract=False, annotate=False, highlight_fitregion=False,\ excludefit=True, save=True, exclude=None,\ fit_plotted_area=False, exclusionlevel=float(exclusion_level)) if iterations == 0: sp.specfit.multifit(fittype='voigt', renormalize='auto',\ annotate=False, show_components=False, verbose=False,\ color=None, guesses=list(linefit_guess), parinfo=None,\ reset_fitspec=True, plot=False, limits=linefit_limits,\ limited=linefit_limited) if iterations > 0: sp.specfit.multifit(fittype='voigt', renormalize='auto',\ annotate=False, show_components=False, verbose=False,\ color=None, parinfo=newparinfo,\ reset_fitspec=True, plot=False) if exclusion_level >= max_exclusion_level: self.logger.error(line_idx + ': LINE FIT FAILED !!!!!: \ Exclusion level reached max of '\ + str(max_exclusion_level)) fit_failed = True break if len(log_list) > 0 and log_list[0].message[:8] == 'gnorm=0.'\ and exclusion_level < max_exclusion_level: exclusion_level += 0.01 self.logger.info(line_idx + ': Adjusting exclusion level'\ + ' to: ' + str('{:2.2f}'.format(exclusion_level))) iterations = 0 sp = pyspeckit.Spectrum(data=temp_f, error=temp_err,\ xarr=temp_lambda, unit=r'flux [$10^{' + str(exponent)\ + '}$ erg$^{-1}$ s$^{-1}$ cm$^{-2}$ $\AA^{-1}$]',\ header={}) sp.xarr.set_unit(u.AA) sp.xarr.xtype = 'wavelength' if self.loglevel == "DEBUG": sp.plotter() if len(log_list) == 0 or log_list[0].message[:8] != 'gnorm=0.': if iterations > 0: chi2_change =\ (chi2 - sp.specfit.optimal_chi2(reduced=True,\ threshold='auto'))\ / sp.specfit.optimal_chi2(reduced=True,\ threshold='auto') if sp.specfit.optimal_chi2(reduced=True,\ threshold='auto') == chi2\ or np.abs(chi2_change) < 0.05: break self.logger.debug(line_idx + ': Iteration: '\ + str(iterations) + ' delta Chi2: '\ + str('{:2.3f}'.format(chi2_change))) chi2 = sp.specfit.optimal_chi2(reduced=True,\ threshold='auto') self.logger.debug(line_idx + ': Iteration: '\ + str(iterations) + ' Chi2: '\ + str('{:3.3f}'.format(chi2))) if self.loglevel == "DEBUG": sp.specfit.plot_components(axis=sp.plotter.axis,\ add_baseline=True,\ component_fit_color=plotcolor(iterations)) ax = sp.plotter.axis pat.append(mlines.Line2D([], [],\ color=plotcolor(iterations),\ label='# iter: ' + str(iterations))) ax.legend(handles=pat) iterations += 1 if self.loglevel == "DEBUG": input("Press ENTER to continue") if self.loglevel == "DEBUG": sp.specfit.plotresiduals(axis=sp.plotter.axis, clear=False,\ yoffset=0.9 * np.min(temp_f), label=False, linewidth=2, color='g') sp.specfit.plot_fit(annotate=False, lw=2, composite_fit_color='r') sp.baseline.plot_baseline(annotate=False,\ baseline_fit_color='orange', linewidth=2) print('Parinfo:') print(sp.specfit.parinfo) print(' ') input("Press ENTER to close the plot window") if not fit_failed: if iterations < niter: self.logger.info(line_idx + ': Converged after '\ + str(iterations) + ' iterations; Chi2: '\ + str('{:3.3f}'.format(chi2)) + ' delta Chi2 [%]: '\ + str('{:2.3f}'.format(chi2_change))) if iterations == niter: self.logger.info(line_idx\ + ': Maximum number of iterations (' + str(iterations)\ + ') reached; Chi2: ' + str(chi2)) par_extract_idx = [] for lab_line_idx, lab_lines\ in enumerate(np.array([self.cat.loc[line_idx, 'l_lab']])): par_extract_idx = np.concatenate((par_extract_idx,\ np.where(lines_select == lab_lines)[0])) for i in range(len(lines_select)): xcen = lines_select[i] gamma = sp.specfit.parinfo[int(4 * i + 3)]['value'] sigma = sp.specfit.parinfo[int(4 * i + 2)]['value'] amp = sp.specfit.parinfo[int(4 * i + 0)]['value'] highres_f = voigt_funct(self.spec_lambda_highres, xcen, 1,\ sigma, gamma) lowres_f = spec_res_downgrade(self.spec_lambda_highres,\ highres_f, self.spec_lambda) lineflux = amp * lowres_f lineflux_highres = amp * highres_f template_f += lineflux / factor template_f_highres += lineflux_highres / factor highres_f = voigt_funct(self.spec_lambda_highres,\ sp.specfit.parinfo[int(4 * i + 1)]['value'], 1, sigma, gamma) lowres_f = spec_res_downgrade(self.spec_lambda_highres,\ highres_f, self.spec_lambda) lineflux_fit = amp * lowres_f lineflux_fit_highres = amp * highres_f fit_f += lineflux_fit / factor fit_f_highres += lineflux_fit_highres / factor if (i == par_extract_idx).any(): temp_a = sp.specfit.parinfo[int(4 * i + 0)]['value']\ / factor temp_l = sp.specfit.parinfo[int(4 * i + 1)]['value'] temp_sg = sp.specfit.parinfo[int(4 * i + 2)]['value'] temp_sl = sp.specfit.parinfo[int(4 * i + 3)]['value'] baseline_temp = baseline_extract(self, sp, contorder) baseline_sub_spec =\ sp.data / baseline_temp(temp_lambda - temp_lambda[0]) continuum = baseline_temp(temp_lambda - temp_lambda[0]) / factor continuum_highres = baseline_temp(temp_lambda_highres\ - temp_lambda_highres[0]) / factor resid = \ (self.spec_f[spec_select_idx] - fit_f[spec_select_idx]) / continuum std_resid = np.std(resid) continuum_dev = continuum_deviation(self, temp_lambda,\ temp_f, continuum, contorder) if std_resid > resid_level or np.isnan(std_resid)\ or continuum_dev > input_continuum_deviation[line_idx]\ or np.isnan(continuum_dev): if std_resid > resid_level or np.isnan(std_resid): self.logger.info(line_idx\ + ': STD of residuals: '\ + str('{:2.4f}'.format(std_resid))\ + ' targeted: ' + str('{:2.4f}'.format(resid_level))) if continuum_dev > input_continuum_deviation[line_idx]: self.logger.info(line_idx + ': Continuum deviation: '\ + str('{:2.4f}'.format(continuum_dev)) + ' targeted: '\ + str('{:2.4f}'.format(input_continuum_deviation[line_idx]))) if contorder == max_contorder: max_cont_reached = True if n_ladjust == max_ladjust: max_n_ladjust_reached = True if max_cont_reached and max_n_ladjust_reached: self.logger.warning(line_idx\ + ": Maximum adjustments reached: Check the output") break if adjust_preference == 'contorder': if not max_cont_reached: contorder += 1 self.logger.info(line_idx\ + ': Adjusting continuum order to: ' + str(contorder)) if max_cont_reached and not max_n_ladjust_reached: lstart -= 5. lend += 5. self.logger.info(line_idx\ + ': maximum continuum order reached => Adjusting'\ + ' wavelength range to: [' + str(lstart)\ + ',' + str(lend) + ']') n_ladjust += 1 max_cont_reached = False contorder = self.cat.loc[line_idx, 'cont_order'] if adjust_preference != 'contorder': contorder = self.cat.loc[line_idx, 'cont_order'] if not max_n_ladjust_reached: if adjust_preference == 'min_lambda'\ or adjust_preference == 'minmax_lambda': lstart -= 5. self.logger.info(line_idx\ + ': Adjusting lower wavelength range to: '\ + str(lstart)) if adjust_preference == 'max_lambda'\ or adjust_preference == 'minmax_lambda': lend += 5. self.logger.info(line_idx + ': Adjusting upper \ wavelength range to: ' + str(lend)) n_ladjust += 1 if max_n_ladjust_reached and not max_cont_reached: contorder += 1 self.logger.info(line_idx\ + ': maximum wavelength adjustment reached =>'\ + ' Adjusting continuum order to: ' + str(contorder)) lstart = self.cat.loc[line_idx, 'l_start'] lend = self.cat.loc[line_idx, 'l_end'] if fit_failed: temp_l = 0. temp_a = 0. temp_sl = 0. temp_sg = 0. significance = np.abs(temp_a) / np.median(temp_err / factor) if not fit_failed: self.logger.info(line_idx + ': STD of residuals: '\ + str('{:2.4f}'.format(std_resid)) + ' targeted: '\ + str('{:2.4f}'.format(resid_level))) self.logger.info(line_idx + ': Continuum deviation: '\ + str('{:2.4f}'.format(continuum_dev)) + ' targeted: '\ + str('{:2.4f}'.format(input_continuum_deviation[line_idx]))) self.logger.info(line_idx + ' Line significance: '\ + str('{:3.2f}'.format(significance))) if (temp_l - self.cat.loc[line_idx, 'l_lab'] < 0.8 * llimits[0]\ or temp_l - self.cat.loc[line_idx, 'l_lab'] > 0.8 * llimits[1])\ and not autoadjust: self.logger.warning(line_idx\ + ' exceeds 0.8 of lambda limits; lfit: ' + str(temp_l)\ + ' llab: ' + str(self.cat.loc[line_idx, 'l_lab'])\ + ' llimits = [' + str(llimits[0]) + ' ' + str(llimits[1]) + ']') self.logger.info('Finished line ' + line_idx) return line_idx, temp_l, temp_a, temp_sl, temp_sg, spec_select_idx,\ template_f, continuum, lstart, lend, contorder, fit_f, significance,\ fit_failed, fit_f_highres, spec_select_idx_highres, template_f_highres,\ continuum_highres
def _hducut(img_hdu, center_coord, cutout_size, correct_wcs=False, verbose=False): """ Takes an ImageHDU (image and associated metatdata in the fits format), as well as a center coordinate and size and make a cutout of that image, which is returned as another ImageHDU, including updated WCS information. Parameters ---------- img_hdu : `~astropy.io.fits.hdu.image.ImageHDU` The image and assciated metadata that is being cut out. center_coord : `~astropy.coordinates.sky_coordinate.SkyCoord` The coordinate to cut out around. cutout_size : array The size of the cutout as [nx,ny], where nx/ny can be integers (assumed to be pixels) or `~astropy.Quantity` values, either pixels or angular quantities. correct_wcs : bool Default False. If true a new WCS will be created for the cutout that is tangent projected and does not include distortions. verbose : bool Default False. If true intermediate information is printed. Returns ------- response : `~astropy.io.fits.hdu.image.ImageHDU` The cutout image and associated metadata. """ hdu_header = fits.Header(img_hdu.header, copy=True) # We are going to reroute the logging to a string stream temporarily so we can # intercept any message from astropy, chiefly the "Inconsistent SIP distortion information" # INFO message which will indicate that we need to remove existing SIP keywords # from a WCS whose CTYPE does not include SIP. In this we are taking the CTYPE to be # correct and adjusting the header keywords to match. hdlrs = log.handlers log.handlers = [] with log.log_to_list() as log_list: img_wcs = wcs.WCS(hdu_header, relax=True) for hd in hdlrs: log.addHandler(hd) no_sip = False if (len(log_list) > 0): if ("Inconsistent SIP distortion information" in log_list[0].msg): # Remove sip coefficients img_wcs.sip = None no_sip = True else: # Message(s) we didn't prepare for we want to go ahead and display for log_rec in log_list: log.log(log_rec.levelno, log_rec.msg, extra={"origin": log_rec.name}) img_data = img_hdu.data if verbose: print("Original image shape: {}".format(img_data.shape)) # Get cutout limits cutout_lims = get_cutout_limits(img_wcs, center_coord, cutout_size) if verbose: print("xmin,xmax: {}".format(cutout_lims[0])) print("ymin,ymax: {}".format(cutout_lims[1])) # These limits are not guarenteed to be within the image footprint xmin, xmax = cutout_lims[0] ymin, ymax = cutout_lims[1] ymax_img, xmax_img = img_data.shape # Check the cutout is on the image if (xmax <= 0) or (xmin >= xmax_img) or (ymax <= 0) or (ymin >= ymax_img): raise InvalidQueryError("Cutout location is not in image footprint!") # Adjust limits and figuring out the` padding padding = np.zeros((2, 2), dtype=int) if xmin < 0: padding[1, 0] = -xmin xmin = 0 if ymin < 0: padding[0, 0] = -ymin ymin = 0 if xmax > xmax_img: padding[1, 1] = xmax - xmax_img xmax = xmax_img if ymax > ymax_img: padding[0, 1] = ymax - ymax_img ymax = ymax_img img_cutout = img_hdu.data[ymin:ymax, xmin:xmax] # Adding padding to the cutout so that it's the expected size if padding.any(): # only do if we need to pad img_cutout = np.pad(img_cutout, padding, 'constant', constant_values=np.nan) if verbose: print("Image cutout shape: {}".format(img_cutout.shape)) # Getting the cutout wcs cutout_wcs = get_cutout_wcs(img_wcs, cutout_lims) # Updating the header with the new wcs info if no_sip: hdu_header.update(cutout_wcs.to_header(relax=False)) else: hdu_header.update(cutout_wcs.to_header(relax=True)) # relax arg is for sip distortions if they exist # Naming the extension and preserving the original name hdu_header["O_EXT_NM"] = (hdu_header.get("EXTNAME"), "Original extension name.") hdu_header["EXTNAME"] = "CUTOUT" # Moving the filename, if present, into the ORIG_FLE keyword hdu_header["ORIG_FLE"] = (hdu_header.get("FILENAME"), "Original image filename.") hdu_header.remove("FILENAME", ignore_missing=True) hdu = fits.ImageHDU(header=hdu_header, data=img_cutout) return hdu