def psfpy(data): x1, y1 = centroid_com(data) x2, y2 = centroid_1dg(data) x3, y3 = centroid_2dg(data) # print(data) # print(x1,y1,x2,y2,x3,y3) # mean, median, std = sigma_clipped_stats(data, sigma=3.0, iters=5) # data -= median # subtract background cat = data_properties(data) # print (cat) #列输出 columns = [ 'id', 'xcentroid', 'ycentroid', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'orientation', 'cxx', 'cyy', 'cxy', 'eccentricity' ] tbl = cat.to_table(columns=columns) tbl['cxy'].info.format = '.10f' eccef = float(tbl['eccentricity']) str1 = str(tbl['semimajor_axis_sigma']) semimajorf = str1.split("[")[1].split("]")[0] str1 = str(tbl['semiminor_axis_sigma']) semiminorf = str1.split("[")[1].split("]")[0] str1 = str(tbl['orientation']) orientationf = str1.split("[")[1].split("]")[0] str1 = str(tbl['cxx']) cxxf = str1.split("[")[1].split("]")[0] str1 = str(tbl['cxy']) cxyf = str1.split("[")[1].split("]")[0] str1 = str(tbl['cyy']) cyyf = str1.split("[")[1].split("]")[0] return (x1, y1, x2, y2, x3, y3, semimajorf, semiminorf, eccef, orientationf, cxxf, cxyf, cyyf)
def get_psf(self, satLimit=16e3): """ Computes the average PSF properties from the brightest stars in the image. Parameters ---------- satLimit : int or float, optional, default: 16e3 Sources which contain any pixels with more than this number of counts will be discarded from the returned list of sources on account of being saturated. Returns ------- medianPSF : numpy.ndarray A small postage stamp of the PSF computed from the median star profiles. PSFparams : dict The properties of the Gaussian which best fits the array in PSFstamp. See astropy.modeling.functional_models.Gaussian2D for more information about how these values are defined. The keys of the dictionary are 'smajor': float semimajor axis width in pixels 'sminor': float semiminor axis width in pixels 'theta': Rotation of the major axis in CCW degreesfrom horizontal axis """ # Set the standard stellar cutout size cutoutSize = 21 # Grab the star positions xStars, yStars = self.image.get_sources( satLimit = satLimit, crowdLimit = np.sqrt(2)*cutoutSize , edgeLimit = cutoutSize + 1 ) # Catch the case where no stars were located if ((xStars.size == 1 and xStars[0] is None) or (yStars.size == 1 and yStars[0] is None)): warnings.warn('There are no well behaving stars') outStamp = np.zeros((cutoutSize, cutoutSize)) outDict = {'smajor':None, 'sminor':None, 'theta':None} return outStamp, outDict # Count the number of stars and limit the list to either 50 stars or # the brightest 25% of the stars numberOfStars = xStars.size if numberOfStars > 50: xStars, yStars = xStars[0:50], yStars[0:50] # Grab the list of star cutouts starCutouts = self.extract_star_cutouts(xStars, yStars, cutoutSize=cutoutSize) # Loop through each cutout and grab its data properties sxList = [] syList = [] for starCutout in starCutouts: # Retrieve the properties of the star in this patch props = data_properties(starCutout) # Store the gaussian component eigen-values sxList.append(props.semimajor_axis_sigma.value) syList.append(props.semiminor_axis_sigma.value) # Find potential outliers and mask them sxArr = sigma_clip(sxList) syArr = sigma_clip(syList) # Find out which stars have good values in BOTH sx and sy badSXSY = np.logical_or(sxArr.mask, syArr.mask) goodSXSY = np.logical_not(badSXSY) # Cut out any patches with bad sx or bad sy values if np.sum(goodSXSY) == 0: warnings.warn('There are no well behaving stars') outStamp = np.zeros((cutoutSize, cutoutSize)) outDict = {'smajor':None, 'sminor':None, 'theta':None} return outStamp, outDict # raise IndexError('There are no well behaving stars') # If some of the patches are bad, then cut those out of the patch list if np.sum(badSXSY) > 0: goodInds = (np.where(goodSXSY))[0] starCutouts = starCutouts[goodInds, :, :] # Compute an "median patch" starCutoutArray = np.array(starCutouts) medianPSF = np.median(starCutoutArray, axis=0) # Build a gaussian + 2Dpolynomial (1st degree) model to fit median patch # Build a gaussian model for fitting stars gauss_init = models.Gaussian2D( amplitude=1000.0, x_mean=10.0, y_mean=10.0, x_stddev=3.0, y_stddev=3.0, theta=0.0 ) # Build a 2Dpolynomial (1st degree) model to fit the background level bkg_init = models.Polynomial2D(1) PSF_init = gauss_init + bkg_init fitter = fitting.LevMarLSQFitter() # Generate arrays for the x and y pixel positions yy, xx = np.mgrid[0:cutoutSize, 0:cutoutSize] # Finllay, re-fit a gaussian to this median patch # Ignore model warning from the fitter with warnings.catch_warnings(): # Fit the model to the patch warnings.simplefilter('ignore') PSF_model = fitter(PSF_init, xx, yy, medianPSF) # Modulate the fitted theta value into a reasonable range goodTheta = (PSF_model.theta_0.value % (2*np.pi)) PSF_model.theta_0 = goodTheta # Build a 2D polynomial background to subtract bkg_model = models.Polynomial2D(1) # Transfer the background portion of the PSF model to the # polynomial plane model. bkg_model.c0_0 = PSF_model.c0_0_1 bkg_model.c1_0 = PSF_model.c1_0_1 bkg_model.c0_1 = PSF_model.c0_1_1 # Subtract the planar background and renormalize the median PSF medianPSF -= bkg_model(xx, yy) medianPSF /= medianPSF.sum() # Return the fitted PSF values smajor, sminor, theta = ( PSF_model.x_stddev_0.value, PSF_model.y_stddev_0.value, PSF_model.theta_0.value ) # Define return values and return them to the user PSFparams = {'smajor':smajor, 'sminor':sminor, 'theta':theta} return (medianPSF, PSFparams)
for img, label in zip([imgs.super, imgs.img], ['super resolution', 'observed']): print('-'*10, label) data = img.data ndim = data.shape[0] print('image flux: {0:.2f}'.format(np.sum(data))) print('pixel scale/kpc: {0:.2f}'.format(img.pixel_scale_kpc)) print('ndim: {0}'.format(ndim)) from photutils import data_properties cat = data_properties(data) print('a: {0:.2f}'.format(cat.semimajor_axis_sigma.value * img.pixel_scale_kpc)) # only equal to r_e_major for n=1 print('b: {0:.2f}'.format(cat.semiminor_axis_sigma.value * img.pixel_scale_kpc)) # only equal to r_e_minor for n=1 print('e: {0:.2f}'.format(cat.ellipticity.value)) print('theta: {0:.2f}'.format(cat.orientation.value /np.pi)) # --- measure curve of growth centre = (ndim/2., ndim/2.) radii_pix = np.arange(1.0, 50., 1.0) radii_kpc = radii_pix * img.pixel_scale_kpc apertures = [CircularAperture(centre, r=r) for r in radii_pix] #r in pixels phot_table = aperture_photometry(data, apertures) flux = np.array([phot_table['aperture_sum_{0}'.format(i)][0] for i, r in enumerate(radii_pix)])
# Find isolated stars, good for photometry goodStars = np.array(separations) > 15 goodStars = np.logical_and(goodStars, np.array(maxVals) < 1.6e4) goodStars = np.logical_and(goodStars, farFromEdge) goodInds = np.where(goodStars) # Cull to only include the good stars starCatalog = starCatalog[goodInds] sx , sy = sx[goodInds], sy[goodInds] # Refine star positions using the photutils.data_properties function sxRefined = [] syRefined = [] for sx1, sy1 in zip(sx, sy): cutout = combinedImage.data[sy1-10:sy1+10,sx1-10:sx1+10] cutoutProperties = data_properties(cutout) sxRefined.append(sx1 + cutoutProperties.xcentroid.value - 11) syRefined.append(sy1 + cutoutProperties.ycentroid.value - 11) # Replaced the estimated positions with the refined positions sx, sy = sxRefined, syRefined # Store the star positions in a single array xyStars = np.array([sx, sy]).T # Analyze the PSF of the images photAnalyzer_V = ai.utilitywrappers.PhotometryAnalyzer(stokesI_V) photAnalyzer_R = ai.utilitywrappers.PhotometryAnalyzer(stokesI_R) _, psf_V = photAnalyzer_V.get_psf() _, psf_R = photAnalyzer_R.get_psf()
radius=0.1, columns=columns) print(str(len(cat0)) + ' objects found') # Measure the morphology around the overdensity shp = clipped.shape x0 = np.int(np.floor(peaks0['x_peak'] - 10)) if x0 < 0: x0 = 0 x1 = np.int(np.ceil(peaks0['x_peak'] + 10)) if x1 > (shp[1] - 1): x1 = (shp[1] - 1) # X is 2nd dimension y0 = np.int(np.floor(peaks0['y_peak'] - 10)) if y0 < 0: y0 = 0 y1 = np.int(np.ceil(peaks0['y_peak'] + 10)) if y1 > (shp[0] - 1): y1 = (shp[0] - 1) # Y is 1st dimension clipped0 = clipped[y0:y1 + 1, x0:x1 + 1] props = data_properties(clipped0) pcolumns = [ 'id', 'xcentroid', 'ycentroid', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'orientation' ] # semi axes in pixels and orientation in radians # 1 pixel is 1 armcin, good unit to use pcat = props.to_table(columns=pcolumns) peaks[i]['x_centroid'] = props['xcentroid'].value + x0 peaks[i]['y_centroid'] = props['ycentroid'].value + y0 peaks[i]['asemi'] = props['semimajor_axis_sigma'].value # pixel=arcmin peaks[i]['bsemi'] = props['semiminor_axis_sigma'].value # pixel=arcmin peaks[i]['theta'] = np.rad2deg(props['orientation'].value) # ra & dec positions of centroid # use xvec/yvec defined above xf = extrap1d(interp1d(np.arange(len(xvec)),
def _perform(self): """ Returns an Argument() with the parameters that depends on this operation. """ self.log.info(f"Running {self.__class__.__name__} action") nx, ny = self.action.args.kd.pixeldata[0].shape ## Do photometry measurement self.log.debug(f' Add pixel positions to catalog') x, y = self.action.args.wcs.all_world2pix(self.action.args.calibration_catalog['raMean'], self.action.args.calibration_catalog['decMean'], 1) self.action.args.calibration_catalog.add_column(Column(data=x, name='x')) self.action.args.calibration_catalog.add_column(Column(data=y, name='y')) buffer = 10 in_image = (self.action.args.calibration_catalog['x'] > buffer)\ & (self.action.args.calibration_catalog['x'] < nx-buffer)\ & (self.action.args.calibration_catalog['y'] > buffer)\ & (self.action.args.calibration_catalog['y'] < ny-buffer) self.log.debug(f' Only {np.sum(in_image)} stars are within image pixel boundaries') self.action.args.calibration_catalog = self.action.args.calibration_catalog[in_image] positions = [p for p in zip(self.action.args.calibration_catalog['x'], self.action.args.calibration_catalog['y'])] self.log.debug(f' Attemping shape measurement for {len(positions)} stars') fwhm = list() elliptiticty = list() orientation = list() xcentroid = list() ycentroid = list() for i, entry in enumerate(self.action.args.calibration_catalog): xi = int(x[i]) yi = int(y[i]) if xi > 10 and xi < nx-10 and yi > 10 and yi < ny-10: im = self.action.args.kd.pixeldata[0].data[yi-10:yi+10,xi-10:xi+10] properties = photutils.data_properties(im) fwhm.append(2.355*(properties.semimajor_axis_sigma.value**2\ + properties.semiminor_axis_sigma.value**2)**0.5) orientation.append(properties.orientation.to(u.deg).value) elliptiticty.append(properties.elongation) xcentroid.append(properties.xcentroid.value) ycentroid.append(properties.ycentroid.value) else: fwhm.append(np.nan) orientation.append(np.nan) elliptiticty.append(np.nan) xcentroid.append(np.nan) ycentroid.append(np.nan) wnan = np.isnan(fwhm) nmasked = np.sum(wnan) self.log.info(f" Measured {len(fwhm)-nmasked} indivisual FWHM values") self.action.args.calibration_catalog.add_column(MaskedColumn( data=fwhm, name='FWHM', mask=wnan)) self.action.args.calibration_catalog.add_column(MaskedColumn( data=orientation, name='orientation', mask=wnan)) self.action.args.calibration_catalog.add_column(MaskedColumn( data=elliptiticty, name='elliptiticty', mask=wnan)) self.action.args.calibration_catalog.add_column(MaskedColumn( data=xcentroid, name='xcentroid', mask=wnan)) self.action.args.calibration_catalog.add_column(MaskedColumn( data=ycentroid, name='ycentroid', mask=wnan)) self.log.debug(f' Attemping aperture photometry for {len(positions)} stars') FWHM_pix = self.action.args.fwhm if self.action.args.fwhm is not None else 8 ap_radius = int(2.0*FWHM_pix) star_apertures = photutils.CircularAperture(positions, ap_radius) sky_apertures = photutils.CircularAnnulus(positions, r_in=int(np.ceil(1.5*ap_radius)), r_out=int(np.ceil(2.0*ap_radius))) self.log.debug(f' Running photutils.aperture_photometry') phot_table = photutils.aperture_photometry( self.action.args.kd.pixeldata[0].data, [star_apertures, sky_apertures]) self.log.debug(f' Subtracting sky flux') phot_table['sky'] = phot_table['aperture_sum_1'] / sky_apertures.area() med_sky = np.nanmedian(phot_table['sky']) self.log.info(f' Median Sky = {med_sky:.0f} e-/pix') self.action.args.sky_background = med_sky self.action.args.calibration_catalog.add_column(phot_table['sky']) bkg_sum = phot_table['aperture_sum_1'] / sky_apertures.area() * star_apertures.area() final_sum = (phot_table['aperture_sum_0'] - bkg_sum)/self.action.args.kd.exptime() phot_table['flux'] = final_sum self.action.args.calibration_catalog.add_column(phot_table['flux']) wzero = (self.action.args.calibration_catalog['flux'] < 0) self.log.debug(f' Masking {np.sum(wzero)} stars with <0 flux') self.action.args.calibration_catalog['flux'].mask = wzero # Estimate flux from catalog magnitude self.log.debug(f' Estimate flux from catalog magnitude') d_telescope = self.cfg['Telescope'].getfloat('d_primary_mm', 508) d_obstruction = self.cfg['Telescope'].getfloat('d_obstruction_mm', 127) A = 3.14*(d_telescope/2/1000)**2 - 3.14*(d_obstruction/2/1000)**2 # m^2 self.action.args.f0 = estimate_f0(A, band=self.action.args.band) # photons / sec catflux = self.action.args.f0\ * 10**(-self.action.args.calibration_catalog[f'{self.action.args.band}MeanApMag']/2.5) self.action.args.calibration_catalog.add_column(Column(data=catflux*u.photon/u.second, name='catflux')) self.log.debug(f' Fit, clipping brightest 5% and faintest 5% of stars') bad = (self.action.args.calibration_catalog['flux'].mask == True) nclip = int(np.floor(0.05*len(self.action.args.calibration_catalog[~bad]))) fitted_line = sigma_clipping_line_fit(self.action.args.calibration_catalog[~bad]['catflux'][nclip:-nclip].data, self.action.args.calibration_catalog[~bad]['flux'][nclip:-nclip].data, intercept_fixed=True) self.log.info(f" Slope (e-/photon) = {fitted_line.slope.value:.3g}") self.action.args.zero_point_fit = fitted_line deltas = self.action.args.calibration_catalog['flux'].data\ - fitted_line(self.action.args.calibration_catalog['catflux'].data) mean, med, std = stats.sigma_clipped_stats(deltas) self.log.info(f" Fit StdDev = {std:.2g}") return self.action.args
# preparing the output table for new columns for col in columns: sources[col] = np.zeros_like(sources, dtype=float) for ii in range(len(sources)): data = im[int(sources['ycentroid'][ii]) - 6:int(sources['ycentroid'][ii]) + 7, int(sources['xcentroid'][ii]) - 6:int(sources['xcentroid'][ii]) + 7] profil = np.sum(data / np.sum(data), axis=1) rv_content = np.sum(np.gradient(profil)**2) sources['rvcontent'][ii] = rv_content tbl = data_properties(data).to_table(columns=columns) for col in columns: sources[col][ii] = np.array(tbl[col])[0] print('PSF {0} / {1}, elongation = {2}, semi-major axis = {3}'. format(ii, len(sources), np.array(tbl['elongation'])[0], np.array(tbl['semimajor_axis_sigma'])[0])) sources.write(tbl_name) # reading the list of sources sources = Table.read(tbl_name) x = sources['xcentroid'] y = sources['ycentroid']