def segmentation_photometry(path_image_abs, path_error_abs = None, logger = None, bkg_sigma = 1.5, source_snr = 1.05, fwhm_kernel = 25, x_size_kernel = 100, y_size_kernel = 80, dump_pickle = False, clobber = True): """ given a fits file (master image), this function calculates photometry by source segmentation. make_source_mask not yet available in photutils v0.2.2, this version manually creates a source mask for determining background. """ def msg(string, msgtype = None): if logger == None: print(string) else: print(string) if msgtype == 'info': logger.info(string) if msgtype == 'error': logger.error(string) if msgtype == 'warning': logger.warning(string) filename = os.path.basename(path_image_abs) dir_save = os.path.dirname(path_image_abs) filenames_combined = '\t'.join(os.listdir(dir_save)) if clobber == False \ and filename[0:-5]+'-segm.obj' in filenames_combined \ and filename[0:-5]+'-props.obj' in filenames_combined \ and filename[0:-5]+'-centroid_outline.png' in filenames_combined \ and filename[0:-5]+'-centroid_outline.pdf' in filenames_combined \ and filename[0:-5]+'-segmentation.png' in filenames_combined \ and filename[0:-5]+'-segmentation.pdf' in filenames_combined: msg('Photometry properties table already exists. ' + 'Reading pickles...', msgtype='info') try: segm = pickle.load(open(glob.glob(os.path.join( dir_save, filename[0:-5]+'-segm.obj*'))[0], 'rb')) props_list = pickle.load(open(glob.glob(os.path.join( dir_save, filename[0:-5]+'-props.obj*'))[0], 'rb')) return [segm, props_list] except: # pickle file corrupt or empty, proceed pass elif clobber == False \ and filename[0:-5]+'-logstretch.png' in filenames_combined \ and filename[0:-5]+'-logstretch.pdf' in filenames_combined: msg('Non-detection from previous results.', msgtype='info') return [None, []] # image type notifications if 'master' in path_image_abs: if 'normalised' in path_image_abs: msg('Performing photometry to ' + 'normalised master object image {}...'.format(path_image_abs), msgtype='info') else: msg('Performing photometry to ' + 'un-normalised master image {}...'.format(path_image_abs), msgtype='info') elif 'reduced' in path_image_abs: msg('Performing photometry to ' + 'reduced image frame {}...'.format(path_image_abs), msgtype='info') else: msg('Warning: Photometry being performed to ' + 'a single exposure {}...'.format(path_image_abs), msgtype='warning') # read in data try: hdu = fits.open(path_image_abs)['FPC'] data = hdu.data except: hdu = fits.open(path_image_abs)[0] data = hdu.data # read in error in data msg('Reading master error image {}...' .format(path_error_abs)) try: hdu_error = fits.open(path_error_abs)[0] data_error = hdu_error.data except: data_error = np.zeros(data.shape) msg('No master error image available for {}' .format(path_image_abs)) header = hdu.header if 'EXPREQ' in header: exptime = header['EXPREQ'] elif 'EXPTIME' in header: exptime = header['EXPTIME'] else: msg('Exposure time not found in header. ' + 'Cannot determine magnitude.', msgtype='error') exptime = np.nan # === Iteratively determine background level === # assuming backcground is homogenous, estimate background by sigma clipping # if background noise varies across image, generate 2D background instead # using the Background function msg('Determining background noise level...', msgtype='info') [mean, median, std] = sigma_clipped_stats(data, sigma=bkg_sigma, iters=3) threshold = median + (std * 4) segm = detect_sources(data, threshold, npixels=5) # turn segm into a mask mask = segm.data.astype(np.bool) # dilate the source mask to ensure complete masking of detected sources dilate_structure = np.ones((5, 5)) mask_dilated = ndimage.binary_dilation(mask, structure=dilate_structure) # get sigma clipping stats of background, without sources that are masekd [bkg_mean, bkg_median, bkg_std] = sigma_clipped_stats( data, sigma=bkg_sigma, mask=mask_dilated, iters = 3) # === Detect sources by segmentation === msg('Determining threshold for source detection...', msgtype='info') # determine threshold for source detection # in current implementation, if all inputs are present, the formula is # threshold = background + (background_error * snr) threshold = detect_threshold(data, background = bkg_median, error = data_error+bkg_std, snr = source_snr) # calculate total error including poisson statistics try: # this is for v0.3 and above msg('Calculating total errors including background and Poisson...', msgtype='info') err_tot = calc_total_error(data, bkg_error= data_error+bkg_std, effective_gain=0.37) gain = None # in version earlier than 0.3, this function is not available except: # error must be of the same shape as the data array # this is for v0.2.2 err_tot = data_error + bkg_std gain = 0.37 msg('Preparing 2D Gaussian kernal...', msgtype='info') sigma_kernel = fwhm_kernel * gaussian_fwhm_to_sigma kernel = Gaussian2DKernel(sigma_kernel, x_size = x_size_kernel, y_size = y_size_kernel) # normalise kernel # The kernel models are normalized per default, ∫∞−∞f(x)dx=1∫−∞∞f(x)dx=1. # But because of the limited kernel array size, the normalization # for kernels with an infinite response can differ from one. kernel.normalize() # obtain a SegmentationImage object with the same shape as the data, # where sources are labeled by different positive integer values. # A value of zero is always reserved for the background. # if the threshold includes the background level as above, then the image # input into detect_sources() should not be background subtracted. msg('Segmentation processing...', msgtype='info') segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel) msg('Segmentation labels are: ' + repr(segm.labels), msgtype='info') # === Measure regional source properties === # source_properties() assumes that the data have been background-subtracted. # Background is the background level that was previously present # in the input data. # The input background does not get subtracted from the input data, # which should already be background-subtracted. msg('Extracting source properties...', msgtype='info') if gain is None: # gain is no longer supported in v0.3 and included in total error array props_list = source_properties(data-bkg_median, segm, background = bkg_median, error = err_tot) else: # still in v0.2.2 props_list = source_properties(data-bkg_median, segm, background = bkg_median, error = err_tot, effective_gain = gain) # add more properties that are not automatically calculated for i in range(len(props_list)): # source_sum is by definition background-subtracted already props_list[i].flux = props_list[i].source_sum/exptime props_list[i].flux_err = props_list[i].source_sum_err/exptime # flux = source_sum / exptime # instrumental magnitude = -2.5 * log10(flux) props_list[i].mag_instr = -2.5 * np.log10(props_list[i].flux) props_list[i].mag_instr_err = -2.5 / props_list[i].flux / np.log(10) \ * props_list[i].flux_err # assuming fwhm of a circule gaussian of the same cross section area props_list[i].fwhm = gaussian_sigma_to_fwhm * np.sqrt( props_list[i].semimajor_axis_sigma.value * props_list[i].semiminor_axis_sigma.value) # make plots and save to images # define approximate isophotal ellipses for each object apertures = [] r = 5 # approximate isophotal extent for props in props_list: position = (props.xcentroid.value, props.ycentroid.value) a = props.semimajor_axis_sigma.value * r b = props.semiminor_axis_sigma.value * r theta = props.orientation.value apertures.append(EllipticalAperture(position, a, b, theta=theta)) # === plot and save === # if filename ends with fits, remove it in the filename if filename[-5:] == '.fits': path_save_prefix = os.path.join(dir_save, filename[0:-5]) else: path_save_prefix = os.path.join(dir_save, filename) norm_log = mpl_normalize.ImageNormalize(vmin=0, vmax=2000, stretch = LogStretch()) if len(props_list) > 0: # Save segm, porps to object files, and also save props to table file. msg('Saving segmentation and source properties to {}...' .format(dir_save), msgtype='info') # at least one source was detected # create a table of properties props_table = properties_table(props_list) # add custom columns to the table: mag_instru and flux props_table['flux'] = [props_list[i].flux for i in range(len(props_list))] props_table['flux_err'] = [props_list[i].flux_err for i in range(len(props_list))] props_table['mag_instr'] = [props_list[i].mag_instr for i in range(len(props_list))] props_table['mag_instr_err'] = [props_list[i].mag_instr_err for i in range(len(props_list))] props_table['fwhm'] = [props_list[i].fwhm for i in range(len(props_list))] # plot centroid and segmentation outline [fig1, ax1] = plt.subplots(figsize=(4, 3)) ax1.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm_log) ax1.plot(props_table['xcentroid'], props_table['ycentroid'], linestyle='none', color='red', marker='+', markersize=2, markeredgewidth=0.1, alpha=1) segm_outline = np.array(segm.outline_segments(), dtype=float) segm_outline[segm_outline<1] = np.nan # get a copy of the gray color map segm_outline_cmap = plt.cm.winter # set how the colormap handles 'bad' values segm_outline_cmap.set_bad(alpha=0) ax1.imshow(segm_outline, origin='lower', cmap=segm_outline_cmap, alpha=1) ax1.get_xaxis().set_visible(False) ax1.get_yaxis().set_visible(False) fig1.tight_layout() # segmentation image and aperture using approximate elliptical apertures [fig2, ax2] = plt.subplots(figsize=(4, 3)) rand_cmap = random_cmap(segm.max + 1, random_state=8) ax2.imshow(segm, origin='lower', cmap=rand_cmap) ax2.plot(props_table['xcentroid'], props_table['ycentroid'], linestyle='none', color='red', marker='+', markersize=2, markeredgewidth=0.1, alpha=1) for aperture in apertures: aperture.plot(ax=ax2, lw=0.1, alpha=1, color='lime') ax2.axis('off') ax2.get_xaxis().set_visible(False) ax2.get_yaxis().set_visible(False) fig2.tight_layout() try: # Enhanced CSV allows preserving table meta-data such as # column data types and units. # In this way a data table can be stored and read back as ASCII # with no loss of information. ascii.write(props_table, path_save_prefix + '-props.ecsv', format = 'ecsv') # csv for readability in MS excel ascii.write(props_table, path_save_prefix + '-props.csv', format = 'csv') # save figures fig1.savefig(path_save_prefix + '-centroid_outline.png', bbox_inches='tight', pad_inches=0, dpi=1200) fig2.savefig(path_save_prefix + '-segmentation.png', bbox_inches='tight', pad_inches=0, dpi=2000) pp1 = PdfPages(path_save_prefix + '-centroid_outline.pdf') pp1.savefig(fig1, dpi=1200) pp1.close() pp2 = PdfPages(path_save_prefix + '-segmentation.pdf') pp2.savefig(fig2, dpi=2000) pp2.close() if dump_pickle: # dump segmentation and properties to objects in binary mode file_segm = open(path_save_prefix + '-segm.obj', 'wb') pickle.dump(segm, file_segm) file_props = open(path_save_prefix + '-props.obj', 'wb') pickle.dump(props_list, file_props) msg('Segmentation, properties objects, tables, and images ' + 'saved to {}'.format(dir_save), msgtype='info') except: msg('Unable to write to disk, check permissions.', msgtype='error') # memory leak? try: plt.close('all') del (hdu, hdu_error, data, data_error, header, mask, mask_dilated, err_tot, kernel, apertures, norm_log, props_table, segm_outline, segm_outline_cmap, rand_cmap, fig1, ax1, fig2, ax2, pp1, pp2, file_segm, file_props) except: pass return [segm, props_list] else: msg('No source detected in {}'.format(path_image_abs), msgtype='warning') # save log scale stretched image, if no source was detected [fig0, ax0] = plt.subplots(figsize=(4, 3)) ax0.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm_log) ax0.get_xaxis().set_visible(False) ax0.get_yaxis().set_visible(False) try: fig0.savefig(path_save_prefix + '-logstretch.png', bbox_inches='tight', pad_inches=0, dpi=1200) pp0 = PdfPages(path_save_prefix + '-logstretch.pdf') pp0.savefig(fig0, dpi=1200) pp0.close() except: msg('Unable to write to disk, check permissions.', msgtype='error') return [None, []]
def segmentation_photometry(path_file_abs, bkg_sigma=3.0, source_snr=3.0, fwhm_kernel=2.0, x_size_kernel=3, y_size_kernel=3, clobber=False): """ aperture photometry from source segmentation make_source_mask not yet available in photutils v0.2.2, this version manually creates a source mask for determining background """ import os import copy import glob import pickle import numpy as np from scipy import ndimage import matplotlib #matplotlib.rcParams['text.usetex'] = True #matplotlib.rcParams['text.latex.unicode'] = True #from matplotlib.backends.backend_pdf import PdfPages import matplotlib.pyplot as plt from astropy.io import fits, ascii from astropy.convolution import Gaussian2DKernel from astropy.stats import sigma_clipped_stats, gaussian_fwhm_to_sigma # from astropy.table import Table from astropy.visualization import (LogStretch, mpl_normalize) # from astropy.extern.six.moves import StringIO from photutils import (detect_threshold, EllipticalAperture, source_properties, properties_table) from photutils.detection import detect_sources from photutils.utils import random_cmap # create preliminary mask #from photutils import make_source_mask #masterMask = make_source_mask(master, snr=2, npixels=5, dilate_size=11) # if LEDoff was used, get threshold from LEDoff/background # path_dataset = os.path.dirname(path_file_abs) + os.path.sep # filenameCombined = '\t'.join( # os.listdir(os.path.join(datasetDirLocal, 'master'))) # if 'master_ledoff_subtracted' in filename: # print('Using master_ledoff') # # path_file_abs = os.path.join(datasetDir, 'master', filename) # hdu = fits.open(path_file_abs)[0] # data_subtracted = hdu.data # # calculate threadhold # ledoff_pred = np.mean(data_subtracted) * \ # np.ones(data_subtracted.shape) # mse = mean_squared_error(data_subtracted, ledoff_pred) # rmse = np.sqrt(mse) # threshold = 7.0 * rmse # threshold_value = threshold # if no LEDoff was used, background subtraction is needed # there should exist no file named "subtracted" # if 'master.fit' in filenameCombined \ # or 'master_normalised.fit' in filenameCombined: #filenamedir = os.path.basename(path_file_abs) #print(filenamedir) #New stuff made by Parker f_dir, filename = os.path.split(path_file_abs) ff = os.path.splitext(filename)[0] new_dir = f_dir + '/' + ff if not os.path.exists(new_dir): os.makedirs(new_dir) dir_save = new_dir print("The photometry files will be saved in ", dir_save) filenames_combined = '\t'.join(os.listdir(dir_save)) if clobber == False \ and 'segm.obj' in filenames_combined \ and 'props.obj' in filenames_combined \ and 'props.csv' in filenames_combined\ and 'props.ecsv' in filenames_combined: print('Photometry properties table already exists. Reading objects...') segm = pickle.load( open(glob.glob(os.path.join(dir_save, '*segm.obj*'))[0], 'rb')) props = pickle.load( open(glob.glob(os.path.join(dir_save, '*props.obj*'))[0], 'rb')) return [segm, props] if 'master' in path_file_abs: if 'normalised' in path_file_abs: print('Performing photometry to ' + 'normalised master object image {}...'.format(path_file_abs)) else: print('Performing photometry to ' + 'un-normalised master image {}...'.format(path_file_abs)) else: print('Warning: Photometry being performed to ' + 'a single exposure {}...'.format(path_file_abs)) hdu = fits.open(path_file_abs)[0] data = hdu.data header = hdu.header if 'EXPREQ' in header: exptime = header['EXPREQ'] elif 'EXPTIME' in header: exptime = header['EXPTIME'] else: print('Exposure time not found in header. Cannot determine magnitude.') exptime = np.nan # === Iteratively determine background level === # assuming background is homogenous, estimate background by sigma clipping # if background noise varies across image, generate 2D background instead print('Determining background noise level...' '') [mean, median, std] = sigma_clipped_stats(data, sigma=bkg_sigma, iters=5) threshold = median + (std * 2.0) segm = detect_sources(data, threshold, npixels=5) # turn segm into a mask mask = segm.data.astype(np.bool) # dilate the source mask to ensure complete masking of detected sources dilate_structure = np.ones((5, 5)) mask_dilated = ndimage.binary_dilation(mask, structure=dilate_structure) # get sigma clipping stats of background, without sources that are masekd [bkg_mean, bkg_median, bkg_std] = sigma_clipped_stats(data, sigma=bkg_sigma, mask=mask_dilated, iters=3) # === Detect sources by segmentation === print('Determining threshold for source detection...') # determine threshold for source detection # in current implementation, if all inputs are present, the formula is # threshold = background + (background_error * snr) threshold = detect_threshold(data, background=bkg_median, error=bkg_std, snr=source_snr) print('Preparing 2D Gaussian kernal...') sigma_kernel = fwhm_kernel * gaussian_fwhm_to_sigma kernel = Gaussian2DKernel(sigma_kernel, x_size=x_size_kernel, y_size=y_size_kernel) # normalise kernel # The kernel models are normalized per default, ∫∞−∞f(x)dx=1∫−∞∞f(x)dx=1. # But because of the limited kernel array size, the normalization # for kernels with an infinite response can differ from one. kernel.normalize() # obtain a SegmentationImage object with the same shape as the data, # where sources are labeled by different positive integer values. # A value of zero is always reserved for the background. # if the threshold includes the background level as above, then the image # input into detect_sources() should not be background subtracted. print('Segmentation processing...') segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel) print('Segmentation labels are: ', repr(segm.labels)) # === Measure regional source properties === # source_properties() assumes that the data have been background-subtracted. # Background is the background level that was previously present # in the input data. # The input background does not get subtracted from the input data, # which should already be background-subtracted. print('Extracting source properties...') props = source_properties(data - bkg_median, segm, background=bkg_median) # add flux and instrumental magnitude to properties # flux = source_sum / exptime # instrumental magnitude = -2.5 * log10(flux) for i in range(len(props)): # source_sum is by definition background-subtracted already props[i].flux = props[i].source_sum / exptime props[i].mag_instr = -2.5 * np.log10(props[i].flux) # make plots and save to images # define approximate isophotal ellipses for each object apertures = [] r = 2.8 # approximate isophotal extent for prop in props: position = (prop.xcentroid.value, prop.ycentroid.value) a = prop.semimajor_axis_sigma.value * r b = prop.semiminor_axis_sigma.value * r theta = prop.orientation.value apertures.append(EllipticalAperture(position, a, b, theta=theta)) # create a table of properties try: props_table = properties_table(props) except: print('No source detected in {}'.format(path_file_abs)) return [None, None] props_table['flux'] = [props[i].flux for i in range(len(props))] props_table['mag_instr'] = [props[i].mag_instr for i in range(len(props))] # add custom columns to the table: mag_instru and flux # plot centroid and segmentation using approximate elliptical apertures norm = mpl_normalize.ImageNormalize(stretch=LogStretch()) rand_cmap = random_cmap(segm.max + 1, random_state=12345) #[fig1, (ax1, ax2)] = plt.subplots(1, 2, figsize = (12, 6)) #ax1.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm) #ax1.plot( # props_table['xcentroid'], props_table['ycentroid'], # ls='none', color='blue', marker='+', ms=10, lw=1.5) #ax2.imshow(segm, origin='lower', cmap=rand_cmap) #for aperture in apertures: # aperture.plot(ax=ax1, lw=1.0, alpha=1.0, color='red') # aperture.plot(ax=ax2, lw=1.0, alpha=1.0, color='red') # plot using actual segmentation outlines (to be improved) #[fig2, ax3] = plt.subplots(figsize = (6, 6)) #ax3.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm) #segm_outline = np.array(segm.outline_segments(), dtype=float) #segm_outline[segm_outline<1] = np.nan # get a copy of the gray color map #segm_outline_cmap = copy.copy(plt.cm.get_cmap('autumn')) # set how the colormap handles 'bad' values #segm_outline_cmap.set_bad(alpha=0) #ax3.imshow(segm_outline, origin='lower', cmap=segm_outline_cmap) # === save === # Save segm, porps to object files, and also save props to table file. print('Saving segmentation and source propdderties to {}...'.format( dir_save)) try: # if filename ends with fits, remove it in the filename if filename[-5:] == '.fits': dir_save_prefix = os.path.join(dir_save, filename[0:-5]) else: dir_save_prefix = os.path.join(dir_save, filename) # Enhanced CSV allows preserving table meta-data such as # column data types and units. # In this way a data table can be stored and read back as ASCII # with no loss of information. ascii.write(props_table, dir_save_prefix + '-phot_props.ecsv', format='ecsv') # csv for readability in MS excel ascii.write(props_table, dir_save_prefix + '-phot_props.csv', format='csv') # dump segmentation and properties to object files in binary mode file_segm = open(dir_save_prefix + '-phot_segm.obj', 'wb') pickle.dump(segm, file_segm) file_props = open(dir_save_prefix + '-phot_props.obj', 'wb') pickle.dump(props, file_props) # save figures #fig1.savefig(dir_save_prefix + '-phot_segm_fig1.png', dpi=600) #pp1 = PdfPages(dir_save_prefix + '-phot_segm_fig1.pdf') #pp1.savefig(fig1) #pp1.close() #fig2.savefig(dir_save_prefix + '-phot_segm_fig2.png', dpi=600) #pp2 = PdfPages(dir_save_prefix + '-phot_segm_fig2.pdf') #pp2.savefig(fig2) #pp2.close() print('Segmentation, properties objects, tables, and images saved to', dir_save) except: print('Unable to write to disk, check permissions.') return [segm, props]
hdu_subtracted = fits.PrimaryHDU(master_subtracted) # save background subtracted image hdu_subtracted.writeto('master_subtracted.fits', clobber=True) # plot plt.imshow(master_subtracted, origin='lower', cmap=plt.cm.gray) # segmentation at a given sigma level, for regional properties threshold = 5.0 * bkg.background_rms # since data is background-subtracted # perform segmentation whether flat was available or not if 'master_subtracted_normalised' in locals(): segm = detect_sources(master_subtracted_normalised, threshold, npixels=5) elif 'master_subtracted' in locals(): segm = detect_sources(master_subtracted, threshold, npixels=5) print(segm.labels) cmapRand = random_cmap(segm.max + 1, random_state=12345) plt.imshow(segm, origin='lower', cmap=cmapRand) # measure regional source properties from segmentation # the centroid is from image moments, already intensity-weighted if 'bkg' in locals(): props = source_properties(master_subtracted, segm, error=bkg.background_rms, background=bkg.background) else: props = source_properties(master_subtracted_normalised, segm, error=master_ledoff_subtracted - np.mean(master_ledoff_subtracted), background=master_ledoff_subtracted)
def aperture_photometry(self, path_file_abs): """ aperture photometry from source segmentation make_source_mask not yet available in photutils v0.2.1 wait for v0.3 release aperture_photometry() assumes that the data have been background-subtracted. """ # create preliminary mask #from photutils import make_source_mask #masterMask = make_source_mask(master, snr=2, npixels=5, dilate_size=11) # if LEDoff was used, get threshold from LEDoff/background # path_dataset = os.path.dirname(path_file_abs) + os.path.sep # filenameCombined = '\t'.join( # os.listdir(os.path.join(datasetDirLocal, 'master'))) # if 'master_ledoff_subtracted' in filename: # self.msg('Using master_ledoff') # # path_file_abs = os.path.join(datasetDir, 'master', filename) # hdu = fits.open(path_file_abs)[0] # data_subtracted = hdu.data # # calculate threadhold # ledoff_pred = np.mean(data_subtracted) * \ # np.ones(data_subtracted.shape) # mse = mean_squared_error(data_subtracted, ledoff_pred) # rmse = np.sqrt(mse) # threshold = 7.0 * rmse # threshold_value = threshold # if no LEDoff was used, background subtraction is needed # there should exist no file named "subtracted" # if 'master.fit' in filenameCombined \ # or 'master_normalised.fit' in filenameCombined: if 'master.fit' in path_file_abs: self.msg('Photometry using un-normalised master image') elif 'master_normalised.fit' in path_file_abs: self.msg('Photometry using normalised master image') hdu = fits.open(path_file_abs)[0] data = hdu.data if 'EXPTIME' in hdu.header: exptime = hdu.header['EXPTIME'] else: exptime = hdu.header['EXPREQ'] # === background subtraction === """ if no LEDoff was used, background subtraction is needed. there should exist no file named "subtracted". create 2D image of background and background rms and apply sigma-clipping to each region in the low-res background map to get mean, median, and std/rms. sigma-clipping is the most widely used method though not as good as using mask; still superior to robust standard deviation using median absolute deviation (MAD-STD). """ # create background # [mean, median, std] = sigma_clipped_stats(master, sigma=3.0, iters=5) # bkg = Background(master, (50, 50), filter_size=(3, 3), method='median') bkg = Background(data, (100, 100), filter_shape=(3, 3), method='median') # plot background image # plt.imshow(bkg.background, norm=normalisation, origin='lower', cmap=plt.cm.gray) plt.imshow(bkg.background, origin='lower', cmap=plt.cm.gray) [fig, ax] = plt.subplots(figsize=(8, 8)) # make background-substracted image data_subtracted = data - bkg.background # plot subtracted image plt.imshow(data_subtracted, origin='lower', cmap=plt.cm.gray) # === segmentation at a given sigma level === # perform segmentation whether flat is available or not self.msg('Determining threshold for target detection...') # because data is background-subtracted threshold_array = 5.0 * bkg.background_rms # print out threshold value threshold_value = threshold_array.flat[0] self.msg('Threshold for target detection is: ' + repr(threshold_value)) self.msg('Detecting sources and performing segmentation...') segm = detect_sources(data_subtracted, threshold_array, npixels=5) self.msg('Segmentation labels are:') self.msg((repr(segm.labels))) # === regional properties === # measure regional source properties from segmentation # the centroid is from image moments, already intensity-weighted self.msg('Measuring source properties...') if 'bkg' in locals(): # use the background determined from master_subtracted props = source_properties(data_subtracted, segm, error=bkg.background_rms, background=bkg.background) # elif 'master_ledoff_subtracted' in filenameCombined: # path_file_abs = os.path.join( # datasetDirLocal, 'master', 'master_ledoff_subtracted.fits') # hdu = fits.open(path_file_abs)[0] # master_ledoff_subtracted = hdu.data # props = source_properties(data_subtracted, segm, # error = master_ledoff_subtracted \ # - np.mean(master_ledoff_subtracted), # background = master_ledoff_subtracted) # add instrumental magnitude to properties # instrumental magnitude = -2.5 * log10(flux) for i in range(len(props)): # source_sum is by definition background-subtracted already props[i].mag_instr = -2.5 * np.log10(props[i].source_sum / exptime) # create table from props object # there are other properties available, see list of SourceProperties: # http://goo.gl/rkfQ9V props_table_columns = [ 'id', 'xcentroid', 'ycentroid', 'area', 'max_value', 'source_sum', 'mag_instr' ] props_table_display = properties_table(props, columns=props_table_columns) props_table_save = properties_table(props) # self.msg(repr(props_table_display)) print(repr(props_table_display)) # check and create analysis folder if it doesn't exist path_dataset = os.path.dirname(path_file_abs) path_analysis = path_dataset.replace('/data/images/fpc/', '/data/images/fpc_analysis/') if not os.path.exists(path_analysis): os.makedirs(path_analysis) # save background subtracted image if 'master.fit' in path_file_abs: path_save = os.path.join(path_dataset, 'master_subtracted.fits') elif 'master_object.fit' in path_file_abs: path_save = os.path.join(path_dataset, 'master_object_subtracted.fits') elif 'master_normalised.fit' in path_file_abs: path_save = os.path.join(path_dataset, 'master_normalised_subtracted.fits') hdu_subtracted = fits.PrimaryHDU(data_subtracted) hdu_subtracted.writeto(path_save, clobber=True) # save properties to table file path_save = os.path.join(path_dataset, 'props_table.csv') ascii.write(props_table_save, path_save, format='csv') # === update UI === # plot segmentated image self.rmmpl() figure_photometry = Figure() cmap_rand = random_cmap(segm.max + 1, random_state=12345) axes = figure_photometry.add_subplot(111) axes.imshow(segm, origin='lower', cmap=cmap_rand) axes.plot(props_table_save['xcentroid'], props_table_save['ycentroid'], ls='none', color='red', marker='+', ms=10, lw=1.5) self.addmpl(figure_photometry) # set properties table font and font size self.ui.tablePhot.setCurrentFont(QtGui.QFont(TEXT_BROWSER_FONT)) self.ui.tablePhot.setFontPointSize(TEXT_BROWSER_FONT_SIZE) self.ui.tablePhot.setPlainText(repr(props_table_display)) self.msg('Photometry completed for {}.'.format(path_file_abs))
def aperture_photometry(self, filename): # aperture photometry from source segmentation # determine threshold for background detection # if LEDoff was used, get threshold from LEDoff/background filepath = os.path.join(datasetDirLocal, 'master', filename) filenameCombined = '\t'.join( os.listdir(os.path.join(datasetDirLocal, 'master'))) if 'master_ledoff_subtracted' in filename: self.msg('Using master_ledoff') # filepath = os.path.join(datasetDir, 'master', filename) hdu = fits.open(filepath)[0] data_subtracted = hdu.data # calculate threadhold ledoff_pred = np.mean(data_subtracted) * np.ones( data_subtracted.shape) mse = mean_squared_error(data_subtracted, ledoff_pred) rmse = np.sqrt(mse) threshold = 7.0 * rmse threshold_value = threshold # if no LEDoff was used, background subtraction is needed # there should exist no file named "subtracted" elif 'master.fit' in filenameCombined \ or 'master_normalised.fit' in filenameCombined: self.ui.statusbar.showMessage('Using master or master_normalised') # create preliminary mask """ make_source_mask not yet available in photutils v0.2.1 wait for v0.3 release """ #from photutils import make_source_mask #masterMask = make_source_mask(master, snr=2, npixels=5, dilate_size=11) # background subtraction """ create 2D image of background and background rms and apply sigma-clipping to each region in the low-res background map to get mean, median, and std/rms. sigma-clipping is the most widely used method though not as good as using mask; still superior to robust standard deviation using median absolute deviation (MAD-STD) """ hdu = fits.open(filepath)[0] data = hdu.data if 'EXPTIME' in hdu.header: exptime = hdu.header['EXPTIME'] else: exptime = hdu.header['EXPREQ'] self.msg('Determining threshold for target detection...') # calculate threashold # [mean, median, std] = sigma_clipped_stats(master, sigma=3.0, iters=5) bkg = Background(data, (100, 100), filter_shape=(3, 3), method='median') # bkg = Background(master, (50, 50), filter_size=(3, 3), method='median') # plt.imshow(bkg.background, norm=normalisation, origin='lower', cmap=plt.cm.gray) plt.imshow(bkg.background, origin='lower', cmap=plt.cm.gray) [fig, ax] = plt.subplots(figsize=(8, 8)) # make background-substracted image data_subtracted = data - bkg.background # plot plt.imshow(data_subtracted, origin='lower', cmap=plt.cm.gray) # save background subtracted image if 'master.fit' in filename: hdu_subtracted = fits.PrimaryHDU(data_subtracted) hdu_subtracted.writeto('master_subtracted.fits', clobber=True) elif 'master_normalised.fit' in filename: hdu_normalised_subtracted = fits.PrimaryHDU(data_subtracted) hdu_normalised_subtracted.writeto( 'master_normalised_subtracted.fits', clobber=True) # segmentation at a given sigma level, for regional properties threshold = 5.0 * bkg.background_rms # since data is background-subtracted threshold_value = threshold.flat[0] self.msg('Threshold for target detection is: ' + repr(threshold_value)) # perform segmentation whether flat was available or not self.msg('Performing segmentation...') segm = detect_sources(data_subtracted, threshold, npixels=5) self.msg('Segmentation labels are:') self.msg((str(segm.labels))) # measure regional source properties from segmentation # the centroid is from image moments, already intensity-weighted self.msg('Measuring source properties') if 'bkg' in locals(): props = source_properties(data_subtracted, segm, error=bkg.background_rms, background=bkg.background) elif 'master_ledoff_subtracted' in filenameCombined: filepath = os.path.join(datasetDirLocal, 'master', 'master_ledoff_subtracted.fits') hdu = fits.open(filepath)[0] master_ledoff_subtracted = hdu.data props = source_properties(data_subtracted, segm, error=master_ledoff_subtracted - np.mean(master_ledoff_subtracted), background=master_ledoff_subtracted) # instrumental magnitude = -2.5 * log10(flux) for i in range(len(props)): props[i].mag_instr = -2.5 * np.log10(props[i].source_sum / exptime) # source_sum are by definition background-subtracted already propsTableColumns = [ 'id', 'xcentroid', 'ycentroid', 'area', 'max_value', 'source_sum', 'mag_instr' ] # there are other properties available, see list of SourceProperties # http://photutils.readthedocs.io/en/latest/api/photutils.segmentation.SourceProperties.html#photutils.segmentation.SourceProperties propsTable = properties_table(props, columns=propsTableColumns) self.ui.statusbar.showMessage(repr(propsTable)) # plot segmentated image self.rmmpl() segFig = Figure() cmapRand = random_cmap(segm.max + 1, random_state=12345) axes = segFig.add_subplot(111) axes.imshow(segm, origin='lower', cmap=cmapRand) axes.plot(propsTable['xcentroid'], propsTable['ycentroid'], ls='none', color='red', marker='+', ms=10, lw=1.5) self.addmpl(segFig) # set properties table font and font size self.ui.tablePhot.setCurrentFont(QtGui.QFont('Courier')) self.ui.tablePhot.setFontPointSize(9) self.ui.tablePhot.setPlainText(repr(propsTable)) self.msg('Photometry completed')
def prepare_data(file, data_dir, folder): """ prepare_data picks a file with the image of the galaxy, detect the central object, rotate it to the major axis, and returns the data and errors ready to fit a warp curve, along with the maximum distance from the center """ # check if there is a folder for the figures, if not create it if not os.path.isdir('../figs/' + str(folder)): os.mkdir('../figs/' + str(folder)) # check if there is a folder for the text output, if not create it if not os.path.isdir('../output/' + str(folder)): os.mkdir('../output/' + str(folder)) print(data_dir + '/' + str(file)) hdu = fits.open(data_dir + '/' + str(file[:-1]))[ 0] #fits.open(data_dir+'/'+str(file[:-1]))[0] wcs = WCS(hdu.header) data = hdu.data sigma_clip = SigmaClip(sigma=3., iters=10) bkg_estimator = MedianBackground() bkg = Background2D(data, (25, 25), filter_size=(3, 3), sigma_clip=sigma_clip, bkg_estimator=bkg_estimator) if (cfg.DATA_TYPE == 'REAL'): weight = fits.open(data_dir + '/' + str(file[:-1]))[1].data else: weight = bkg.background_rms threshold = bkg.background + (3. * bkg.background_rms) sigma = 2.0 * gaussian_fwhm_to_sigma # FWHM = 2. kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) kernel.normalize() segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel) rand_cmap = random_cmap(segm.max + 1, random_state=12345) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) ax1.imshow(data, origin='lower', cmap='Greys_r') ax2.imshow(segm, origin='lower', cmap=rand_cmap) plt.savefig('../figs/' + str(folder) + '/' + str(file[:-5]) + 'fig2.png') plt.close() props = source_properties(data, segm) tbl = properties_table(props) my_min = 100000. x_shape = np.float(data.shape[0]) y_shape = np.float(data.shape[1]) r = 3. # approximate isophotal extent apertures = [] for prop in props: position = (prop.xcentroid.value, prop.ycentroid.value) a = prop.semimajor_axis_sigma.value * r b = prop.semiminor_axis_sigma.value * r theta = prop.orientation.value apertures.append(EllipticalAperture(position, a, b, theta=theta)) my_dist = np.sqrt((prop.xcentroid.value - x_shape / 2.)**2 + (prop.ycentroid.value - y_shape / 2.)**2) if (my_dist < my_min): my_label = prop.id - 1 my_min = my_dist mytheta = props[my_label].orientation.value mysize = np.int(np.round(r * props[my_label].semimajor_axis_sigma.value)) my_x = props[my_label].xcentroid.value my_y = props[my_label].ycentroid.value mask_obj = np.ones(data.shape, dtype='bool') mask_obj[(segm.data != 0) * (segm.data != props[my_label].id)] = 0 weigth = weight[mask_obj] rand_cmap = random_cmap(segm.max + 1, random_state=12345) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) ax1.imshow(data, origin='lower', cmap='Greys_r') ax2.imshow(segm, origin='lower', cmap=rand_cmap) for aperture in apertures: aperture.plot(color='blue', lw=1.5, alpha=0.5, ax=ax1) aperture.plot(color='white', lw=1.5, alpha=1.0, ax=ax2) plt.savefig('../figs/' + str(folder) + '/' + str(file[:-5]) + 'fig3.png') plt.close() data_rot = rotate(data, np.rad2deg(mytheta)) data_rot = data_rot[data_rot.shape[0] / 2 - 100:data_rot.shape[0] / 2 + 100, data_rot.shape[1] / 2 - 100:data_rot.shape[1] / 2 + 100] w_rot = rotate(weight, np.rad2deg(mytheta)) w = w_rot[w_rot.shape[0] / 2 - 100:w_rot.shape[0] / 2 + 100, w_rot.shape[1] / 2 - 100:w_rot.shape[1] / 2 + 100] plt.figure() plt.imshow(data_rot, origin='lower', cmap='Greys_r') plt.savefig('../figs/' + str(folder) + '/' + str(file[:-5]) + 'fig4.png') plt.close() newx, newy, newtheta, newsize = find_centroid(data_rot) print('old center = ', my_x, my_y, mysize) print('new center = ', newx, newy, np.rad2deg(newtheta), newsize) x_shape2 = np.float(data_rot.shape[0]) y_shape2 = np.float(data_rot.shape[1]) np.savetxt( '../output/' + str(folder) + '/' + str(file[:-5]) + '_size_xcent_ycent_xy_shape.txt', np.array([newsize, newx, newy, x_shape2, y_shape2])) return data_rot, w, newsize, newx, newy, x_shape2, y_shape2