def modifyData(self, data_model: MapModel) -> MapModel: old_norm = data_model.norm clip = old_norm.clip vmin = old_norm.vmin vmax = old_norm.vmax norm = mpl_normalize.ImageNormalize(vmin=vmin, vmax=vmax, clip=clip, stretch=self._getStretch()) data_model.norm = norm return data_model
def _getNorm(self, clip, vmax, vmin): index = self._ui.norm_combo.currentIndex() norm = list(self.norms.keys())[index] args = [input_w.value() for _, input_w in self.settings_widgets] if issubclass(norm, mpl_normalize.BaseStretch): stretch = norm(*args) return mpl_normalize.ImageNormalize(vmin=vmin, vmax=vmax, clip=clip, stretch=stretch) else: return norm(vmin=vmin, vmax=vmax, clip=clip, *args)
def onDataChanged(self, viewer_ctrl: ViewerController): norm = viewer_ctrl.model.norm if not issubclass(type(norm), mpl_normalize.ImageNormalize): # only image normalizations supported clip = norm.clip vmin = norm.vmin vmax = norm.vmax norm = mpl_normalize.ImageNormalize(vmin=vmin, vmax=vmax, clip=clip, stretch=LinearStretch()) stretch = norm.stretch self._ui.norm_combo.setCurrentText(type(stretch).__name__) for label, input_w in self.settings_widgets: input_w.setValue(getattr(stretch, label.text()))
def image_frame(flux, wcs=None, slices=('y', 'x', 1), fig=None, ax=None, fig_conf={}, axes_conf={}, output_file=None): # Plot Configuration defaultConf = STANDARD_PLOT.copy() defaultConf.update(fig_conf) rcParams.update(defaultConf) frame_size = flux.shape x, y = np.arange(0, frame_size[1]), np.arange(0, frame_size[0]) X, Y = np.meshgrid(x, y) norm = mpl_normalize.ImageNormalize(stretch=SqrtStretch()) # Axis set up for WCS if wcs is None: fig, ax = plt.subplots(figsize=(12, 8)) else: fig = plt.figure() ax = fig.add_subplot(projection=wcs, slices=('x', 'y', 1)) lower_limit = np.percentile(flux, 90) idcs_negative = flux < lower_limit flux_contours = np.ones(flux.shape) flux_contours[~idcs_negative] = flux[~idcs_negative] # Plot the data ax.imshow(np.log10(flux_contours), vmin=np.log10(lower_limit)) # ax.contour(X, Y, np.log10(flux_contours), vmin=np.log10(lower_limit), cmap=plt.cm.inferno) ax.update(axes_conf) if output_file is None: plt.show() else: plt.savefig(output_file, bbox_inches='tight') return
def aper_photometry(path_image_abs, apertures, logger = None, bkg_sigma = 1.5, dump_pickle = True, clobber = False): def msg(string, msgtype = None): if logger == None: print(string) else: print(string) if msgtype == 'info': logger.info(string) if msgtype == 'error': logger.error(string) if msgtype == 'warning': logger.warning(string) filename = os.path.basename(path_image_abs) dir_save = os.path.dirname(path_image_abs) filenames_combined = '\t'.join(os.listdir(dir_save)) if clobber == False \ and filename[0:-5]+'-aper_phot.png' in filenames_combined \ and filename[0:-5]+'-aper_phot.pdf' in filenames_combined \ and filename[0:-5]+'-aper_phot.csv' in filenames_combined \ and filename[0:-5]+'-aper_phot.ecsv' in filenames_combined \ and filename[0:-5]+'-aper_phot_table.obj' in filenames_combined: msg('Aperture photometry table already exists. ' + 'Reading pickle...', msgtype='info') try: phot_table = pickle.load(open(glob.glob(os.path.join( dir_save, filename[0:-5]+'-aper_phot_table.obj'))[0], 'rb')) return phot_table except: # pickle file corrupt or empty, proceed pass # image type notifications if 'master' in path_image_abs: if 'normalised' in path_image_abs: msg('Performing aperture photometry to' + 'normalised master object image {}...'.format(path_image_abs), msgtype='info') else: msg('Performing aperture photometry to' + 'un-normalised master image {}...'.format(path_image_abs), msgtype='info') elif 'reduced' in path_image_abs: msg('Performing aperture photometry to ' + 'reduced image frame {}...'.format(path_image_abs), msgtype='info') else: msg('Warning: Aperture photometry being performed to ' + 'a single exposure {}...'.format(path_image_abs), msgtype='warning') # read in data and header, including exptime try: hdu = fits.open(path_image_abs)['FPC'] except: hdu = fits.open(path_image_abs)[0] data = hdu.data header = hdu.header if 'EXPREQ' in header: exptime = header['EXPREQ'] elif 'EXPTIME' in header: exptime = header['EXPTIME'] else: msg('Exposure time not found in header. ' + 'Cannot determine magnitude.', msgtype='error') exptime = np.nan # iteratively determine background level # assuming backcground is homogenous, estimate background by sigma clipping # if background noise varies across image, generate 2D background instead msg('Determining background noise level...', msgtype='info') [mean, median, std] = sigma_clipped_stats(data, sigma=bkg_sigma, iters=3) threshold = median + (std * 4) segm = detect_sources(data, threshold, npixels=5) # turn segm into a mask mask = segm.data.astype(np.bool) # dilate the source mask to ensure complete masking of detected sources dilate_structure = np.ones((5, 5)) mask_dilated = ndimage.binary_dilation(mask, structure=dilate_structure) # get sigma clipping stats of background, without sources that are masekd [bkg_mean, bkg_median, bkg_std] = sigma_clipped_stats( data, sigma=bkg_sigma, mask=mask_dilated, iters = 3) # perform aperture photometry and create a table msg('Extracting aperture photometry data...', msgtype='info') phot_tables = [] for aperture in apertures: # aperture_photometry only supports one unique aperture at a time phot_table = aperture_photometry(data-bkg_median, aperture) phot_tables.append(phot_table) # vertically stack tables into one, given that they have the same columns phot_table = vstack(phot_tables) # clean up table - some entries may be a 1x1 array instead of a number # have to change datatype of column for column in ['xcenter', 'ycenter']: if phot_table[0][column].shape is not (): msg('Replacing arrays in column {} by float64...' .format(column)) # get column data in a list of float64 numbers column_data = [phot_table[row][column][0] for row in range(len(phot_table))] phot_table.replace_column(column, column_data) # add columns flux_data = [phot_table[row]['aperture_sum']/exptime for row in range(len(phot_table))] column_flux = Column(name='flux', data = flux_data) column_mag = Column(name='mag_instr', data = [-2.5 * np.log10(flux) for flux in flux_data]) column_bkg_mean = Column(name='background_mean', data = [bkg_mean]*len(phot_table)) column_bkg_median = Column(name='background_median', data = [bkg_median]*len(phot_table)) phot_table.add_columns([column_flux, column_mag, column_bkg_mean, column_bkg_median]) # plot and save msg('Saving aperture photometry results to {}...' .format(dir_save), msgtype='info') # if filename ends with fits, remove it in the filename if filename[-5:] == '.fits': path_save_prefix = os.path.join(dir_save, filename[0:-5]) else: path_save_prefix = os.path.join(dir_save, filename) norm_log = mpl_normalize.ImageNormalize(vmin=0, vmax=2000, stretch = LogStretch()) [fig, ax] = plt.subplots(figsize=(4,3)) ax.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm_log) for aperture in apertures: aperture.plot(ax=ax, lw=0.1, alpha=1, color='lime') ax.axis('off') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # save all files try: ascii.write(phot_table, path_save_prefix + '-aper_phot.ecsv', format = 'ecsv') # csv for readability in MS excel ascii.write(phot_table, path_save_prefix + '-aper_phot.csv', format = 'csv') # save figures fig.savefig(path_save_prefix + '-aper_phot.png', bbox_inches='tight', pad_inches=0, dpi=1200) pp = PdfPages(path_save_prefix + '-aper_phot.pdf') pp.savefig(fig, dpi=1200) pp.close() if dump_pickle: file_phot = open(path_save_prefix + '-aper_phot_table.obj', 'wb') pickle.dump(phot_table, file_phot) msg('Aperture photometry object, tables, and images ' + 'saved to {}'.format(dir_save), msgtype='info') except: msg('Unable to write to disk, check permissions.', msgtype='error') # memory leak? try: plt.close('all') del (hdu, data, header, mask, mask_dilated, phot_tables, flux_data, column_flux, column_mag, column_bkg_mean, column_bkg_median, norm_log, fig, ax, apertures, pp, file_phot) except: pass return phot_table
def segmentation_photometry(path_image_abs, path_error_abs = None, logger = None, bkg_sigma = 1.5, source_snr = 1.05, fwhm_kernel = 25, x_size_kernel = 100, y_size_kernel = 80, dump_pickle = False, clobber = True): """ given a fits file (master image), this function calculates photometry by source segmentation. make_source_mask not yet available in photutils v0.2.2, this version manually creates a source mask for determining background. """ def msg(string, msgtype = None): if logger == None: print(string) else: print(string) if msgtype == 'info': logger.info(string) if msgtype == 'error': logger.error(string) if msgtype == 'warning': logger.warning(string) filename = os.path.basename(path_image_abs) dir_save = os.path.dirname(path_image_abs) filenames_combined = '\t'.join(os.listdir(dir_save)) if clobber == False \ and filename[0:-5]+'-segm.obj' in filenames_combined \ and filename[0:-5]+'-props.obj' in filenames_combined \ and filename[0:-5]+'-centroid_outline.png' in filenames_combined \ and filename[0:-5]+'-centroid_outline.pdf' in filenames_combined \ and filename[0:-5]+'-segmentation.png' in filenames_combined \ and filename[0:-5]+'-segmentation.pdf' in filenames_combined: msg('Photometry properties table already exists. ' + 'Reading pickles...', msgtype='info') try: segm = pickle.load(open(glob.glob(os.path.join( dir_save, filename[0:-5]+'-segm.obj*'))[0], 'rb')) props_list = pickle.load(open(glob.glob(os.path.join( dir_save, filename[0:-5]+'-props.obj*'))[0], 'rb')) return [segm, props_list] except: # pickle file corrupt or empty, proceed pass elif clobber == False \ and filename[0:-5]+'-logstretch.png' in filenames_combined \ and filename[0:-5]+'-logstretch.pdf' in filenames_combined: msg('Non-detection from previous results.', msgtype='info') return [None, []] # image type notifications if 'master' in path_image_abs: if 'normalised' in path_image_abs: msg('Performing photometry to ' + 'normalised master object image {}...'.format(path_image_abs), msgtype='info') else: msg('Performing photometry to ' + 'un-normalised master image {}...'.format(path_image_abs), msgtype='info') elif 'reduced' in path_image_abs: msg('Performing photometry to ' + 'reduced image frame {}...'.format(path_image_abs), msgtype='info') else: msg('Warning: Photometry being performed to ' + 'a single exposure {}...'.format(path_image_abs), msgtype='warning') # read in data try: hdu = fits.open(path_image_abs)['FPC'] data = hdu.data except: hdu = fits.open(path_image_abs)[0] data = hdu.data # read in error in data msg('Reading master error image {}...' .format(path_error_abs)) try: hdu_error = fits.open(path_error_abs)[0] data_error = hdu_error.data except: data_error = np.zeros(data.shape) msg('No master error image available for {}' .format(path_image_abs)) header = hdu.header if 'EXPREQ' in header: exptime = header['EXPREQ'] elif 'EXPTIME' in header: exptime = header['EXPTIME'] else: msg('Exposure time not found in header. ' + 'Cannot determine magnitude.', msgtype='error') exptime = np.nan # === Iteratively determine background level === # assuming backcground is homogenous, estimate background by sigma clipping # if background noise varies across image, generate 2D background instead # using the Background function msg('Determining background noise level...', msgtype='info') [mean, median, std] = sigma_clipped_stats(data, sigma=bkg_sigma, iters=3) threshold = median + (std * 4) segm = detect_sources(data, threshold, npixels=5) # turn segm into a mask mask = segm.data.astype(np.bool) # dilate the source mask to ensure complete masking of detected sources dilate_structure = np.ones((5, 5)) mask_dilated = ndimage.binary_dilation(mask, structure=dilate_structure) # get sigma clipping stats of background, without sources that are masekd [bkg_mean, bkg_median, bkg_std] = sigma_clipped_stats( data, sigma=bkg_sigma, mask=mask_dilated, iters = 3) # === Detect sources by segmentation === msg('Determining threshold for source detection...', msgtype='info') # determine threshold for source detection # in current implementation, if all inputs are present, the formula is # threshold = background + (background_error * snr) threshold = detect_threshold(data, background = bkg_median, error = data_error+bkg_std, snr = source_snr) # calculate total error including poisson statistics try: # this is for v0.3 and above msg('Calculating total errors including background and Poisson...', msgtype='info') err_tot = calc_total_error(data, bkg_error= data_error+bkg_std, effective_gain=0.37) gain = None # in version earlier than 0.3, this function is not available except: # error must be of the same shape as the data array # this is for v0.2.2 err_tot = data_error + bkg_std gain = 0.37 msg('Preparing 2D Gaussian kernal...', msgtype='info') sigma_kernel = fwhm_kernel * gaussian_fwhm_to_sigma kernel = Gaussian2DKernel(sigma_kernel, x_size = x_size_kernel, y_size = y_size_kernel) # normalise kernel # The kernel models are normalized per default, ∫∞−∞f(x)dx=1∫−∞∞f(x)dx=1. # But because of the limited kernel array size, the normalization # for kernels with an infinite response can differ from one. kernel.normalize() # obtain a SegmentationImage object with the same shape as the data, # where sources are labeled by different positive integer values. # A value of zero is always reserved for the background. # if the threshold includes the background level as above, then the image # input into detect_sources() should not be background subtracted. msg('Segmentation processing...', msgtype='info') segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel) msg('Segmentation labels are: ' + repr(segm.labels), msgtype='info') # === Measure regional source properties === # source_properties() assumes that the data have been background-subtracted. # Background is the background level that was previously present # in the input data. # The input background does not get subtracted from the input data, # which should already be background-subtracted. msg('Extracting source properties...', msgtype='info') if gain is None: # gain is no longer supported in v0.3 and included in total error array props_list = source_properties(data-bkg_median, segm, background = bkg_median, error = err_tot) else: # still in v0.2.2 props_list = source_properties(data-bkg_median, segm, background = bkg_median, error = err_tot, effective_gain = gain) # add more properties that are not automatically calculated for i in range(len(props_list)): # source_sum is by definition background-subtracted already props_list[i].flux = props_list[i].source_sum/exptime props_list[i].flux_err = props_list[i].source_sum_err/exptime # flux = source_sum / exptime # instrumental magnitude = -2.5 * log10(flux) props_list[i].mag_instr = -2.5 * np.log10(props_list[i].flux) props_list[i].mag_instr_err = -2.5 / props_list[i].flux / np.log(10) \ * props_list[i].flux_err # assuming fwhm of a circule gaussian of the same cross section area props_list[i].fwhm = gaussian_sigma_to_fwhm * np.sqrt( props_list[i].semimajor_axis_sigma.value * props_list[i].semiminor_axis_sigma.value) # make plots and save to images # define approximate isophotal ellipses for each object apertures = [] r = 5 # approximate isophotal extent for props in props_list: position = (props.xcentroid.value, props.ycentroid.value) a = props.semimajor_axis_sigma.value * r b = props.semiminor_axis_sigma.value * r theta = props.orientation.value apertures.append(EllipticalAperture(position, a, b, theta=theta)) # === plot and save === # if filename ends with fits, remove it in the filename if filename[-5:] == '.fits': path_save_prefix = os.path.join(dir_save, filename[0:-5]) else: path_save_prefix = os.path.join(dir_save, filename) norm_log = mpl_normalize.ImageNormalize(vmin=0, vmax=2000, stretch = LogStretch()) if len(props_list) > 0: # Save segm, porps to object files, and also save props to table file. msg('Saving segmentation and source properties to {}...' .format(dir_save), msgtype='info') # at least one source was detected # create a table of properties props_table = properties_table(props_list) # add custom columns to the table: mag_instru and flux props_table['flux'] = [props_list[i].flux for i in range(len(props_list))] props_table['flux_err'] = [props_list[i].flux_err for i in range(len(props_list))] props_table['mag_instr'] = [props_list[i].mag_instr for i in range(len(props_list))] props_table['mag_instr_err'] = [props_list[i].mag_instr_err for i in range(len(props_list))] props_table['fwhm'] = [props_list[i].fwhm for i in range(len(props_list))] # plot centroid and segmentation outline [fig1, ax1] = plt.subplots(figsize=(4, 3)) ax1.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm_log) ax1.plot(props_table['xcentroid'], props_table['ycentroid'], linestyle='none', color='red', marker='+', markersize=2, markeredgewidth=0.1, alpha=1) segm_outline = np.array(segm.outline_segments(), dtype=float) segm_outline[segm_outline<1] = np.nan # get a copy of the gray color map segm_outline_cmap = plt.cm.winter # set how the colormap handles 'bad' values segm_outline_cmap.set_bad(alpha=0) ax1.imshow(segm_outline, origin='lower', cmap=segm_outline_cmap, alpha=1) ax1.get_xaxis().set_visible(False) ax1.get_yaxis().set_visible(False) fig1.tight_layout() # segmentation image and aperture using approximate elliptical apertures [fig2, ax2] = plt.subplots(figsize=(4, 3)) rand_cmap = random_cmap(segm.max + 1, random_state=8) ax2.imshow(segm, origin='lower', cmap=rand_cmap) ax2.plot(props_table['xcentroid'], props_table['ycentroid'], linestyle='none', color='red', marker='+', markersize=2, markeredgewidth=0.1, alpha=1) for aperture in apertures: aperture.plot(ax=ax2, lw=0.1, alpha=1, color='lime') ax2.axis('off') ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) fig2.tight_layout() try: # Enhanced CSV allows preserving table meta-data such as # column data types and units. # In this way a data table can be stored and read back as ASCII # with no loss of information. ascii.write(props_table, path_save_prefix + '-props.ecsv', format = 'ecsv') # csv for readability in MS excel ascii.write(props_table, path_save_prefix + '-props.csv', format = 'csv') # save figures fig1.savefig(path_save_prefix + '-centroid_outline.png', bbox_inches='tight', pad_inches=0, dpi=1200) fig2.savefig(path_save_prefix + '-segmentation.png', bbox_inches='tight', pad_inches=0, dpi=2000) pp1 = PdfPages(path_save_prefix + '-centroid_outline.pdf') pp1.savefig(fig1, dpi=1200) pp1.close() pp2 = PdfPages(path_save_prefix + '-segmentation.pdf') pp2.savefig(fig2, dpi=2000) pp2.close() if dump_pickle: # dump segmentation and properties to objects in binary mode file_segm = open(path_save_prefix + '-segm.obj', 'wb') pickle.dump(segm, file_segm) file_props = open(path_save_prefix + '-props.obj', 'wb') pickle.dump(props_list, file_props) msg('Segmentation, properties objects, tables, and images ' + 'saved to {}'.format(dir_save), msgtype='info') except: msg('Unable to write to disk, check permissions.', msgtype='error') # memory leak? try: plt.close('all') del (hdu, hdu_error, data, data_error, header, mask, mask_dilated, err_tot, kernel, apertures, norm_log, props_table, segm_outline, segm_outline_cmap, rand_cmap, fig1, ax1, fig2, ax2, pp1, pp2, file_segm, file_props) except: pass return [segm, props_list] else: msg('No source detected in {}'.format(path_image_abs), msgtype='warning') # save log scale stretched image, if no source was detected [fig0, ax0] = plt.subplots(figsize=(4, 3)) ax0.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm_log) ax0.get_xaxis().set_visible(False) ax0.get_yaxis().set_visible(False) try: fig0.savefig(path_save_prefix + '-logstretch.png', bbox_inches='tight', pad_inches=0, dpi=1200) pp0 = PdfPages(path_save_prefix + '-logstretch.pdf') pp0.savefig(fig0, dpi=1200) pp0.close() except: msg('Unable to write to disk, check permissions.', msgtype='error') return [None, []]
def segmentation_photometry(path_file_abs, bkg_sigma=3.0, source_snr=3.0, fwhm_kernel=2.0, x_size_kernel=3, y_size_kernel=3, clobber=False): """ aperture photometry from source segmentation make_source_mask not yet available in photutils v0.2.2, this version manually creates a source mask for determining background """ import os import copy import glob import pickle import numpy as np from scipy import ndimage import matplotlib #matplotlib.rcParams['text.usetex'] = True #matplotlib.rcParams['text.latex.unicode'] = True #from matplotlib.backends.backend_pdf import PdfPages import matplotlib.pyplot as plt from astropy.io import fits, ascii from astropy.convolution import Gaussian2DKernel from astropy.stats import sigma_clipped_stats, gaussian_fwhm_to_sigma # from astropy.table import Table from astropy.visualization import (LogStretch, mpl_normalize) # from astropy.extern.six.moves import StringIO from photutils import (detect_threshold, EllipticalAperture, source_properties, properties_table) from photutils.detection import detect_sources from photutils.utils import random_cmap # create preliminary mask #from photutils import make_source_mask #masterMask = make_source_mask(master, snr=2, npixels=5, dilate_size=11) # if LEDoff was used, get threshold from LEDoff/background # path_dataset = os.path.dirname(path_file_abs) + os.path.sep # filenameCombined = '\t'.join( # os.listdir(os.path.join(datasetDirLocal, 'master'))) # if 'master_ledoff_subtracted' in filename: # print('Using master_ledoff') # # path_file_abs = os.path.join(datasetDir, 'master', filename) # hdu = fits.open(path_file_abs)[0] # data_subtracted = hdu.data # # calculate threadhold # ledoff_pred = np.mean(data_subtracted) * \ # np.ones(data_subtracted.shape) # mse = mean_squared_error(data_subtracted, ledoff_pred) # rmse = np.sqrt(mse) # threshold = 7.0 * rmse # threshold_value = threshold # if no LEDoff was used, background subtraction is needed # there should exist no file named "subtracted" # if 'master.fit' in filenameCombined \ # or 'master_normalised.fit' in filenameCombined: #filenamedir = os.path.basename(path_file_abs) #print(filenamedir) #New stuff made by Parker f_dir, filename = os.path.split(path_file_abs) ff = os.path.splitext(filename)[0] new_dir = f_dir + '/' + ff if not os.path.exists(new_dir): os.makedirs(new_dir) dir_save = new_dir print("The photometry files will be saved in ", dir_save) filenames_combined = '\t'.join(os.listdir(dir_save)) if clobber == False \ and 'segm.obj' in filenames_combined \ and 'props.obj' in filenames_combined \ and 'props.csv' in filenames_combined\ and 'props.ecsv' in filenames_combined: print('Photometry properties table already exists. Reading objects...') segm = pickle.load( open(glob.glob(os.path.join(dir_save, '*segm.obj*'))[0], 'rb')) props = pickle.load( open(glob.glob(os.path.join(dir_save, '*props.obj*'))[0], 'rb')) return [segm, props] if 'master' in path_file_abs: if 'normalised' in path_file_abs: print('Performing photometry to ' + 'normalised master object image {}...'.format(path_file_abs)) else: print('Performing photometry to ' + 'un-normalised master image {}...'.format(path_file_abs)) else: print('Warning: Photometry being performed to ' + 'a single exposure {}...'.format(path_file_abs)) hdu = fits.open(path_file_abs)[0] data = hdu.data header = hdu.header if 'EXPREQ' in header: exptime = header['EXPREQ'] elif 'EXPTIME' in header: exptime = header['EXPTIME'] else: print('Exposure time not found in header. Cannot determine magnitude.') exptime = np.nan # === Iteratively determine background level === # assuming background is homogenous, estimate background by sigma clipping # if background noise varies across image, generate 2D background instead print('Determining background noise level...' '') [mean, median, std] = sigma_clipped_stats(data, sigma=bkg_sigma, iters=5) threshold = median + (std * 2.0) segm = detect_sources(data, threshold, npixels=5) # turn segm into a mask mask = segm.data.astype(np.bool) # dilate the source mask to ensure complete masking of detected sources dilate_structure = np.ones((5, 5)) mask_dilated = ndimage.binary_dilation(mask, structure=dilate_structure) # get sigma clipping stats of background, without sources that are masekd [bkg_mean, bkg_median, bkg_std] = sigma_clipped_stats(data, sigma=bkg_sigma, mask=mask_dilated, iters=3) # === Detect sources by segmentation === print('Determining threshold for source detection...') # determine threshold for source detection # in current implementation, if all inputs are present, the formula is # threshold = background + (background_error * snr) threshold = detect_threshold(data, background=bkg_median, error=bkg_std, snr=source_snr) print('Preparing 2D Gaussian kernal...') sigma_kernel = fwhm_kernel * gaussian_fwhm_to_sigma kernel = Gaussian2DKernel(sigma_kernel, x_size=x_size_kernel, y_size=y_size_kernel) # normalise kernel # The kernel models are normalized per default, ∫∞−∞f(x)dx=1∫−∞∞f(x)dx=1. # But because of the limited kernel array size, the normalization # for kernels with an infinite response can differ from one. kernel.normalize() # obtain a SegmentationImage object with the same shape as the data, # where sources are labeled by different positive integer values. # A value of zero is always reserved for the background. # if the threshold includes the background level as above, then the image # input into detect_sources() should not be background subtracted. print('Segmentation processing...') segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel) print('Segmentation labels are: ', repr(segm.labels)) # === Measure regional source properties === # source_properties() assumes that the data have been background-subtracted. # Background is the background level that was previously present # in the input data. # The input background does not get subtracted from the input data, # which should already be background-subtracted. print('Extracting source properties...') props = source_properties(data - bkg_median, segm, background=bkg_median) # add flux and instrumental magnitude to properties # flux = source_sum / exptime # instrumental magnitude = -2.5 * log10(flux) for i in range(len(props)): # source_sum is by definition background-subtracted already props[i].flux = props[i].source_sum / exptime props[i].mag_instr = -2.5 * np.log10(props[i].flux) # make plots and save to images # define approximate isophotal ellipses for each object apertures = [] r = 2.8 # approximate isophotal extent for prop in props: position = (prop.xcentroid.value, prop.ycentroid.value) a = prop.semimajor_axis_sigma.value * r b = prop.semiminor_axis_sigma.value * r theta = prop.orientation.value apertures.append(EllipticalAperture(position, a, b, theta=theta)) # create a table of properties try: props_table = properties_table(props) except: print('No source detected in {}'.format(path_file_abs)) return [None, None] props_table['flux'] = [props[i].flux for i in range(len(props))] props_table['mag_instr'] = [props[i].mag_instr for i in range(len(props))] # add custom columns to the table: mag_instru and flux # plot centroid and segmentation using approximate elliptical apertures norm = mpl_normalize.ImageNormalize(stretch=LogStretch()) rand_cmap = random_cmap(segm.max + 1, random_state=12345) #[fig1, (ax1, ax2)] = plt.subplots(1, 2, figsize = (12, 6)) #ax1.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm) #ax1.plot( # props_table['xcentroid'], props_table['ycentroid'], # ls='none', color='blue', marker='+', ms=10, lw=1.5) #ax2.imshow(segm, origin='lower', cmap=rand_cmap) #for aperture in apertures: # aperture.plot(ax=ax1, lw=1.0, alpha=1.0, color='red') # aperture.plot(ax=ax2, lw=1.0, alpha=1.0, color='red') # plot using actual segmentation outlines (to be improved) #[fig2, ax3] = plt.subplots(figsize = (6, 6)) #ax3.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm) #segm_outline = np.array(segm.outline_segments(), dtype=float) #segm_outline[segm_outline<1] = np.nan # get a copy of the gray color map #segm_outline_cmap = copy.copy(plt.cm.get_cmap('autumn')) # set how the colormap handles 'bad' values #segm_outline_cmap.set_bad(alpha=0) #ax3.imshow(segm_outline, origin='lower', cmap=segm_outline_cmap) # === save === # Save segm, porps to object files, and also save props to table file. print('Saving segmentation and source propdderties to {}...'.format( dir_save)) try: # if filename ends with fits, remove it in the filename if filename[-5:] == '.fits': dir_save_prefix = os.path.join(dir_save, filename[0:-5]) else: dir_save_prefix = os.path.join(dir_save, filename) # Enhanced CSV allows preserving table meta-data such as # column data types and units. # In this way a data table can be stored and read back as ASCII # with no loss of information. ascii.write(props_table, dir_save_prefix + '-phot_props.ecsv', format='ecsv') # csv for readability in MS excel ascii.write(props_table, dir_save_prefix + '-phot_props.csv', format='csv') # dump segmentation and properties to object files in binary mode file_segm = open(dir_save_prefix + '-phot_segm.obj', 'wb') pickle.dump(segm, file_segm) file_props = open(dir_save_prefix + '-phot_props.obj', 'wb') pickle.dump(props, file_props) # save figures #fig1.savefig(dir_save_prefix + '-phot_segm_fig1.png', dpi=600) #pp1 = PdfPages(dir_save_prefix + '-phot_segm_fig1.pdf') #pp1.savefig(fig1) #pp1.close() #fig2.savefig(dir_save_prefix + '-phot_segm_fig2.png', dpi=600) #pp2 = PdfPages(dir_save_prefix + '-phot_segm_fig2.pdf') #pp2.savefig(fig2) #pp2.close() print('Segmentation, properties objects, tables, and images saved to', dir_save) except: print('Unable to write to disk, check permissions.') return [segm, props]
def generate(image, wcs, title, flip_ra=False, flip_dec=False, log_stretch=False, cutout=None, primary_coord=None, secondary_coord=None, third_coord=None, slit=None, vmnx=None, extra_text=None, outfile=None): """ Basic method to generate a Finder chart figure Args: image (np.ndarray): Image for the finder wcs (astropy.wcs.WCS): WCS solution title (str): Title; typically the name of the primary source flip_ra (bool, default False): Flip the RA (x-axis). Useful for southern hemisphere finders. flip_dec (bool, default False): Flip the Dec (y-axis). Useful for southern hemisphere finders. log_stretch (bool, optional): Use a log stretch for the image display cutout (tuple, optional): SkyCoord (center coordinate) and Quantity (image angular size) for a cutout from the input image. primary_coord (astropy.coordinates.SkyCoord, optional): If provided, place a mark in red at this coordinate secondary_coord (astropy.coordinates.SkyCoord, optional): If provided, place a mark in cyan at this coordinate Assume it is an offset star (i.e. calculate offsets) third_coord (astropy.coordinates.SkyCoord, optional): If provided, place a mark in yellow at this coordinate slit (tuple, optional): If provided, places a rectangular slit with specified coordinates, width, length, and position angle on image (from North to East) [SkyCoords('21h44m25.255s',-40d54m00.1s', frame='icrs'), 1*u.arcsec, 10*u.arcsec, 20*u.deg] vmnx (tuple, optional): Used for scaling the image. Otherwise, the image is analyzed for these values. extra_text : str Extra text to be added at the bottom of the Figure. e.g. `DSS r-filter` outfile (str, optional): Filename for the figure. File type will be according to the extension Returns: matplotlib.pyplot.figure, matplotlib.pyplot.Axis """ utils.set_mplrc() plt.clf() fig = plt.figure(figsize=(7, 8.5)) # fig.set_size_inches(7.5,10.5) # Cutout? if cutout is not None: cutout_img = Cutout2D(image, cutout[0], cutout[1], wcs=wcs) # Overwrite wcs = cutout_img.wcs image = cutout_img.data # Axis ax = fig.add_axes([0.10, 0.20, 0.75, 0.5], projection=wcs) # Show if log_stretch: norm = mplnorm.ImageNormalize(stretch=LogStretch()) else: norm = None cimg = ax.imshow(image, cmap='Greys', norm=norm) # Flip so RA increases to the left if flip_ra is True: ax.invert_xaxis() if flip_dec is True: ax.invert_yaxis() # N/E overlay = ax.get_coords_overlay('icrs') overlay['ra'].set_ticks(color='white') overlay['dec'].set_ticks(color='white') overlay['ra'].set_axislabel('Right Ascension') overlay['dec'].set_axislabel('Declination') overlay.grid(color='green', linestyle='solid', alpha=0.5) # Contrast if vmnx is None: mean, median, stddev = sigma_clipped_stats( image ) # Also set clipping level and number of iterations here if necessary # vmnx = (median - stddev, median + 2 * stddev ) # sky level - 1 sigma and +2 sigma above sky level print("Using vmnx = {} based on the image stats".format(vmnx)) cimg.set_clim(vmnx[0], vmnx[1]) # Add Primary if primary_coord is not None: c = SphericalCircle((primary_coord.ra, primary_coord.dec), 2 * units.arcsec, transform=ax.get_transform('icrs'), edgecolor='red', facecolor='none') ax.add_patch(c) # Text jname = ltu.name_from_coord(primary_coord) ax.text(0.5, 1.34, jname, fontsize=28, horizontalalignment='center', transform=ax.transAxes) # Secondary if secondary_coord is not None: c_S1 = SphericalCircle((secondary_coord.ra, secondary_coord.dec), 2 * units.arcsec, transform=ax.get_transform('icrs'), edgecolor='cyan', facecolor='none') ax.add_patch(c_S1) # Text jname = ltu.name_from_coord(secondary_coord) ax.text(0.5, 1.24, jname, fontsize=22, color='blue', horizontalalignment='center', transform=ax.transAxes) # Print offsets if primary_coord is not None: sep = primary_coord.separation(secondary_coord).to('arcsec') PA = primary_coord.position_angle(secondary_coord) # RA/DEC dec_off = np.cos(PA) * sep # arcsec ra_off = np.sin(PA) * sep # arcsec (East is *higher* RA) ax.text( 0.5, 1.22, 'Offset from Ref. Star (cyan) to Target (red):\nRA(to targ) = {:.2f} DEC(to targ) = {:.2f}' .format(-1 * ra_off.to('arcsec'), -1 * dec_off.to('arcsec')), fontsize=15, horizontalalignment='center', transform=ax.transAxes, color='blue', va='top') # Add tertiary if third_coord is not None: c = SphericalCircle((third_coord.ra, third_coord.dec), 2 * units.arcsec, transform=ax.get_transform('icrs'), edgecolor='yellow', facecolor='none') ax.add_patch(c) # Slit if ((slit is not None) and (flag_photu is True)): # List of values - [coodinates, width, length, PA], # e.g. [SkyCoords('21h44m25.255s',-40d54m00.1s', frame='icrs'), 1*u.arcsec, 10*u.arcsec, 20*u.deg] slit_coords, width, length, pa = slit pa_deg = pa.to('deg').value aper = SkyRectangularAperture( positions=slit_coords, w=length, h=width, theta=pa ) # For theta=0, width goes North-South, which is slit length apermap = aper.to_pixel(wcs) apermap.plot(color='purple', lw=1) plt.text(0.5, -0.1, 'Slit PA={} deg'.format(pa_deg), color='purple', fontsize=15, ha='center', va='top', transform=ax.transAxes) if ((slit is not None) and (flag_photu is False)): raise IOError('Slit cannot be placed without photutils package') # Title ax.text(0.5, 1.44, title, fontsize=32, horizontalalignment='center', transform=ax.transAxes) # Extra text if extra_text is not None: ax.text(-0.1, -0.25, extra_text, fontsize=20, horizontalalignment='left', transform=ax.transAxes) # Sources # Labels #ax.set_xlabel(r'\textbf{DEC (EAST direction)}') #ax.set_ylabel(r'\textbf{RA (SOUTH direction)}') if outfile is not None: plt.savefig(outfile) plt.close() else: plt.show() # Return return fig, ax
def plotDirectCutouts(self, savePath=None, colourMap='viridis', gridSpecs=None): if self.directCutouts is not None: if gridSpecs is None: mplplot.figure(figsize=(10, 5)) for stampIndex, (grism, cutoutData) in enumerate( self.directCutouts.items()): if gridSpecs is None: subplotAxes = mplplot.subplot(2, 1, stampIndex + 1) else: subplotAxes = mplplot.subplot(gridSpecs[stampIndex]) if cutoutData is None: subplotAxes.text( 0.5, 0.5, 'Field {}, Object {}:\nNO DATA AVAILABLE.'.format( self.targetPar, self.targetObject), horizontalalignment='center', fontsize='large', transform=subplotAxes.transAxes) continue if np.all(cutoutData < 0): subplotAxes.text( 0.5, 0.5, 'Field {}, Object {}:\nNO NONZERO DATA AVAILABLE.'. format(self.targetPar, self.targetObject), horizontalalignment='center', fontsize='large', transform=subplotAxes.transAxes) continue norm = astromplnorm.ImageNormalize( cutoutData, interval=astrovis.AsymmetricPercentileInterval(0, 99.5), stretch=astrovis.LinearStretch(), clip=True) mplplot.imshow(cutoutData, origin='lower', interpolation='nearest', cmap=colourMap, norm=norm) mplplot.xlabel('X (pixels)') mplplot.ylabel('Y (pixels)') mplplot.title( 'Field {}, Object {}:\nDirect cutout for F{} (G{})'.format( self.targetPar, self.targetObject, self.getDirectFilterForGrism(grism), grism)) arcsecYAxis = subplotAxes.twinx() arcsecYAxis.set_ylim(*(np.array(subplotAxes.get_ylim()) - 0.5 * np.sum(subplotAxes.get_ylim())) * self.directHdus[grism][1]['IDCSCALE']) print( subplotAxes.get_ylim(), np.array(subplotAxes.get_ylim()), np.array(subplotAxes.get_ylim()) * self.directHdus[grism][1]['IDCSCALE'], *np.array(subplotAxes.get_ylim()) * self.directHdus[grism][1]['IDCSCALE']) arcsecYAxis.set_ylabel('$\Delta Y$ (arcsec)') mplplot.grid(color='white', ls='solid') try: mplplot.tight_layout() except ValueError as e: print( 'Error attempting tight_layout for: Field {}, Object {} ({})' .format(self.targetPar, self.targetObject, e)) return if savePath is not None: mplplot.savefig(savePath, dpi=300, bbox_inches='tight') mplplot.close() else: print( 'The loadDirectCutouts(...) method must be called before direct cutouts can be plotted.' )
def plotDrizzledStamps(self, extName='SCI', colourMap='viridis', applyWCS=True, savePath=None, gridSpecs=None, zerothOrderData=None): if self.stampHdus[extName] is not None: if gridSpecs is None: mplplot.figure(figsize=(10, 5)) for stampIndex, (grism, hduData) in enumerate( self.stampHdus[extName].items()): if hduData is None: if gridSpecs is None: subplotAxes = mplplot.subplot(2, 1, stampIndex + 1) else: subplotAxes = mplplot.subplot(gridSpecs[stampIndex]) subplotAxes.text( 0.5, 0.5, 'Field {}, Object {}:\nNO DATA AVAILABLE.'.format( self.targetPar, self.targetObject), horizontalalignment='center', fontsize='large', transform=subplotAxes.transAxes) continue if np.all(hduData[0] < 0): if gridSpecs is None: subplotAxes = mplplot.subplot(2, 1, stampIndex + 1) else: subplotAxes = mplplot.subplot(gridSpecs[stampIndex]) subplotAxes.text( 0.5, 0.5, 'Field {}, Object {}:\nNO NONZERO DATA AVAILABLE.'. format(self.targetPar, self.targetObject), horizontalalignment='center', fontsize='large', transform=subplotAxes.transAxes) continue stampHeader = hduData[1] stampData = hduData[0] wcsObject = None if applyWCS: wcsObject = self.buildWCSObject(stampHeader) wavelengthUnit = r'${\rm {\AA}}$' #wavelengthUnit = 'm' # leave X-dispersion in arsecond units xDispUnit = wcsObject.wcs.cunit[1] else: wavelengthUnit = xDispUnit = 'Pixel' # construct a normalization handler for the image cutoutData = stampData modelData = self.stampHdus['MOD'][grism][0] cutoutModel = modelData modelRect = None if self.doTrimBorderPixels: cutoutData, cutoutModel, modelRect = self.getTrimmedStampData( stampData, modelData, grism, wcs=wcsObject) stampData = cutoutData.data wcsObject = cutoutData.wcs norm = astromplnorm.ImageNormalize( stampData, interval=self.stretchInterval, stretch=self.stretchModel(self, None, stampData[cutoutModel > 0])) if gridSpecs is None: subplotAxes = mplplot.subplot(2, 1, stampIndex + 1, projection=wcsObject) else: subplotAxes = mplplot.subplot(gridSpecs[stampIndex], projection=wcsObject) # Force stamps to plot within full wavelength range. subplotAxes.set_xlim( wcsObject.wcs_world2pix( StampPlotter.plottedWavelengthRange[0].value, 0, 1)[0], wcsObject.wcs_world2pix( StampPlotter.plottedWavelengthRange[1].value, 0, 1)[0]) # Using aspect='auto' will force the subplots to stretch to fill the # available space. mplplot.imshow(stampData, origin='lower', interpolation='nearest', norm=norm, cmap=colourMap, aspect='auto') mplplot.xlabel('Wavelength ({})'.format(wavelengthUnit)) mplplot.ylabel('Cross-dispersion ({})'.format(xDispUnit)) mplplot.title( 'Field {}, Object {}: \nDrizzled stamp for G{}'.format( self.targetPar, self.targetObject, grism)) mplplot.grid(color='white', ls='solid') if zerothOrderData is not None and zerothOrderData[ grism] is not None: # print ('Setting stamp path to {}'.format(self.stampPaths[grism])) # zerothOrderData[grism].setDrizzledStampFilePath(self.stampPaths[grism]) # midpointWavelength = 0.5*(StampPlotter.grismRanges[grism][0] + StampPlotter.grismRanges[grism][1]).to(astrounits.angstrom).value # zerothOrderData[grism].getWavelengthZeroOrderFlag(midpointWavelength) self.plotZerothOrders(zerothOrderData, subplotAxes, grism, wcsObject) # if modelRect is not None : # mplplot.gca().add_patch(modelRect) mplplot.tight_layout(h_pad=5.0) if savePath is not None: mplplot.savefig(savePath, dpi=300, bbox_inches='tight') mplplot.close() else: print( 'The loadDrizzledStamps(...) method must be called before drizzled stamps can be plotted.' )
def generate(image, wcs, title, log_stretch=False, cutout=None, primary_coord=None, secondary_coord=None, third_coord=None, vmnx=None, outfile=None): """ Basic method to generate a Finder chart figure Args: image (np.ndarray): Image for the finder wcs (astropy.wcs.WCS): WCS solution title (str): TItle; typically the name of the primry source log_stretch (bool, optional): Use a log stretch for the image display cutout (tuple, optional): SkyCoord (center coordinate) and Quantity (image angular size) for a cutout from the input image. primary_coord (astropy.coordinates.SkyCoord, optional): If provided, place a mark in red at this coordinate secondary_coord (astropy.coordinates.SkyCoord, optional): If provided, place a mark in cyan at this coordinate Assume it is an offset star (i.e. calculate offsets) third_coord (astropy.coordinates.SkyCoord, optional): If provided, place a mark in yellow at this coordinate vmnx (tuple, optional): Used for scaling the image. Otherwise, the image is analyzed for these values. outfile (str, optional): Filename for the figure. File type will be according to the extension Returns: matplotlib.pyplot.figure, matplotlib.pyplot.Axis """ utils.set_mplrc() plt.clf() fig = plt.figure(dpi=600) fig.set_size_inches(7.5, 10.5) # Cutout? if cutout is not None: cutout_img = Cutout2D(image, cutout[0], cutout[1], wcs=wcs) # Overwrite wcs = cutout_img.wcs image = cutout_img.data # Axis ax = fig.add_axes([0.10, 0.20, 0.80, 0.5], projection=wcs) # Show if log_stretch: norm = mplnorm.ImageNormalize(stretch=LogStretch()) else: norm = None cimg = ax.imshow(image, cmap='Greys', norm=norm) # Flip so RA increases to the left ax.invert_xaxis() # N/E overlay = ax.get_coords_overlay('icrs') overlay['ra'].set_ticks(color='white') overlay['dec'].set_ticks(color='white') overlay['ra'].set_axislabel('Right Ascension') overlay['dec'].set_axislabel('Declination') overlay.grid(color='green', linestyle='solid', alpha=0.5) # Contrast if vmnx is None: mean, median, stddev = sigma_clipped_stats( image ) # Also set clipping level and number of iterations here if necessary # vmnx = (median - stddev, median + 2 * stddev ) # sky level - 1 sigma and +2 sigma above sky level print("Using vmnx = {} based on the image stats".format(vmnx)) cimg.set_clim(vmnx[0], vmnx[1]) # Add Primary if primary_coord is not None: c = SphericalCircle((primary_coord.ra, primary_coord.dec), 2 * units.arcsec, transform=ax.get_transform('icrs'), edgecolor='red', facecolor='none') ax.add_patch(c) # Text jname = ltu.name_from_coord(primary_coord) ax.text(0.5, 1.34, jname, fontsize=28, horizontalalignment='center', transform=ax.transAxes) # Secondary if secondary_coord is not None: c_S1 = SphericalCircle((secondary_coord.ra, secondary_coord.dec), 2 * units.arcsec, transform=ax.get_transform('icrs'), edgecolor='cyan', facecolor='none') ax.add_patch(c_S1) # Text jname = ltu.name_from_coord(secondary_coord) ax.text(0.5, 1.24, jname, fontsize=22, color='blue', horizontalalignment='center', transform=ax.transAxes) # Print offsets if primary_coord is not None: sep = primary_coord.separation(secondary_coord).to('arcsec') PA = primary_coord.position_angle(secondary_coord) # RA/DEC dec_off = np.cos(PA) * sep # arcsec ra_off = np.sin(PA) * sep # arcsec (East is *higher* RA) ax.text(0.5, 1.14, 'RA(to targ) = {:.2f} DEC(to targ) = {:.2f}'.format( -1 * ra_off.to('arcsec'), -1 * dec_off.to('arcsec')), fontsize=18, horizontalalignment='center', transform=ax.transAxes) # Add tertiary if third_coord is not None: c = SphericalCircle((third_coord.ra, third_coord.dec), 2 * units.arcsec, transform=ax.get_transform('icrs'), edgecolor='yellow', facecolor='none') ax.add_patch(c) # Slit? ''' if slit is not None: r = Rectangle((primary_coord.ra.value, primary_coord.dec.value), slit[0]/3600., slit[1]/3600., angle=360-slit[2], transform=ax.get_transform('icrs'), facecolor='none', edgecolor='red') ax.add_patch(r) ''' # Title ax.text(0.5, 1.44, title, fontsize=32, horizontalalignment='center', transform=ax.transAxes) # Sources # Labels #ax.set_xlabel(r'\textbf{DEC (EAST direction)}') #ax.set_ylabel(r'\textbf{RA (SOUTH direction)}') if outfile is not None: plt.savefig(outfile) plt.close() else: plt.show() # Return return fig, ax
'Temperature', 'Temperature', 'Temperature', ), ('Column', 'Temperature', 'Temperature_FullRange', 'GasTemperature'), (asinh_norm.AsinhNorm, matplotlib.colors.Normalize, matplotlib.colors.Normalize, matplotlib.colors.Normalize), ((0, 100), (15, 45), (10, 200), (10, 200))): im, wcs, label, fn, stretch, (vmin, vmax) = stuff fig1 = pl.figure(1, figsize=figsize) fig1.clf() ax = wcsaxes.WCSAxesSubplot(fig1, 1, 1, 1, wcs=wcs) fig1.add_axes(ax) norm = mpl_normalize.ImageNormalize(vmin=vmin, vmax=vmax, stretch=stretch()) ims = ax.imshow(im, cmap=cm, vmin=vmin, vmax=vmax, norm=norm) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05, axes_class=matplotlib.axes.Axes) cb = pl.colorbar(mappable=ims, cax=cax) cax.set_ylabel(label) cb.ax.yaxis.set_label_position('right') ax.set_xlabel("Galactic Longitude") ax.set_ylabel("Galactic Latitude")