def PSF_photometry(data, coord_table, sigma_psf=10, scale=0.67, step=0.5): FLUX = [] bkgrms = MADStdBackgroundRMS() std = bkgrms(data) iraffind = IRAFStarFinder(threshold=3.5 * std, fwhm=sigma_psf * gaussian_sigma_to_fwhm, minsep_fwhm=0.01, roundhi=5.0, roundlo=-5.0, sharplo=0.0, sharphi=2.0) daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm) mmm_bkg = MMMBackground() fitter = LevMarLSQFitter() psf_model = IntegratedGaussianPRF(sigma=sigma_psf) # psf_model.x_0.fixed = True # psf_model.y_0.fixed = True pos = Table(names=['x_0', 'y_0'], data=[coord_table['X'], coord_table['Y']])[coord_table['good_star']] photometry = IterativelySubtractedPSFPhotometry(finder=iraffind, group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=LevMarLSQFitter(), niters=1, fitshape=(41, 41)) result_tab = photometry(image=data, init_guesses=pos) return result_tab['flux_fit']
def astropy_psf_photometry(img, sigma_psf, aperture=3, x0=None, y0=None, filter=True, sigma_filter=1): """ performs PSF photometry on an image. If x0 and y0 are None will attempt to locate the target by searching for the brightest PSF in the field :param img: 2D array, image on which to perform PSF photometry :param sigma_psf: float, standard deviation of the PSF :param aperture: int, size of the paerture (pixels) :param x0: x position of the target (pixels) :param y0: y position of the target (pixels) :param filter: If True will apply a gaussian filter to the image with standard deviation sigma_filter before performing PSF photometry :param sigma_filter: standard deviation of gaussian filter to apply to the image :return: x0 column of photometry table, y0 column of photometry table, flux column of photometry table """ if filter: image = ndimage.gaussian_filter(img, sigma=sigma_filter, order=0) else: image = img bkgrms = MADStdBackgroundRMS() std = bkgrms(image[image != 0]) iraffind = IRAFStarFinder(threshold=2 * std, fwhm=sigma_psf * gaussian_sigma_to_fwhm, minsep_fwhm=0.01, roundhi=5.0, roundlo=-5.0, sharplo=0.0, sharphi=2.0) daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm) mmm_bkg = MMMBackground() fitter = LevMarLSQFitter() psf_model = IntegratedGaussianPRF(sigma=sigma_psf) if x0 and y0: pos = Table(names=['x_0', 'y_0'], data=[x0, y0]) photometry = BasicPSFPhotometry(group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=LevMarLSQFitter(), fitshape=(11, 11)) res = photometry(image=image, init_guesses=pos) return res['x_fit'], res['y_fit'], res['flux_0'] photometry = BasicPSFPhotometry(finder=iraffind, group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=fitter, fitshape=(11, 11), aperture_radius=aperture) res = photometry(image=image) return res['x_0'], res['y_0'], res['flux_0']
def do_photometry_basic(image: np.ndarray, σ_psf: float) -> Tuple[Table, np.ndarray]: """ Find stars in an image with IRAFStarFinder :param image: The image data you want to find stars in :param σ_psf: expected deviation of PSF :return: tuple result table, residual image """ bkgrms = MADStdBackgroundRMS() std = bkgrms(image) iraffind = IRAFStarFinder(threshold=3 * std, sigma_radius=σ_psf, fwhm=σ_psf * gaussian_sigma_to_fwhm, minsep_fwhm=2, roundhi=5.0, roundlo=-5.0, sharplo=0.0, sharphi=2.0) daogroup = DAOGroup(0.1 * σ_psf * gaussian_sigma_to_fwhm) mmm_bkg = MMMBackground() # my_psf = AiryDisk2D(x_0=0., y_0=0.,radius=airy_minimum) # psf_model = prepare_psf_model(my_psf, xname='x_0', yname='y_0', fluxname='amplitude',renormalize_psf=False) psf_model = IntegratedGaussianPRF(sigma=σ_psf) # psf_model = AiryDisk2D(radius = airy_minimum)#prepare_psf_model(AiryDisk2D,xname ="x_0",yname="y_0") # psf_model = Moffat2D([amplitude, x_0, y_0, gamma, alpha]) # photometry = IterativelySubtractedPSFPhotometry(finder=iraffind, group_maker=daogroup, # bkg_estimator=mmm_bkg, psf_model=psf_model, # fitter=LevMarLSQFitter(), # niters=2, fitshape=(11,11)) photometry = BasicPSFPhotometry(finder=iraffind, group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=LevMarLSQFitter(), aperture_radius=11.0, fitshape=(11, 11)) result_table = photometry.do_photometry(image) return result_table, photometry.get_residual_image()
def do_photometry_epsf( image: np.ndarray, epsf: photutils.psf.EPSFModel, star_finder: Optional[photutils.StarFinderBase], initial_guess: Optional[Table] = None, config: Config = Config()) -> Table: """ Given an image an a epsf model, perform photometry and return star positions (and more) in table :param image: input image :param epsf: EPSF model to use in photometry :param star_finder: which starfinder to use? :param initial_guess: initial estimates for star positions :param config: :return: Table with results """ separation_factor = config.separation_factor clip_sigma = config.clip_sigma photometry_iterations = config.photometry_iterations epsf = photutils.psf.prepare_psf_model( epsf, renormalize_psf=False) # renormalize is super slow... background_rms = MADStdBackgroundRMS() _, img_median, img_stddev = sigma_clipped_stats(image, sigma=clip_sigma) fwhm_guess = estimate_fwhm(epsf.psfmodel) grouper = DAOGroup(separation_factor * fwhm_guess) epsf.fwhm = astropy.modeling.Parameter( 'fwhm', 'this is not the way to add this I think') epsf.fwhm.value = fwhm_guess photometry = IterativelySubtractedPSFPhotometry( finder=star_finder, group_maker=grouper, bkg_estimator=background_rms, psf_model=epsf, fitter=LevMarLSQFitter(), niters=photometry_iterations, fitshape=config.fitshape) return photometry.do_photometry(image, init_guesses=initial_guess)
def __init__(self, imfit_io, config_file_or_dict={}): if type(config_file_or_dict) == str: logger.info(f'Loading config file {config_file_or_dict}') config = load_config(config_file_or_dict) else: config = config_file_or_dict.copy() self.input_config = config.copy() self.imfit_io = config.pop('imfit_io', imfit_io) self.sersic_fitter = SersicFitter(out_dir=self.imfit_io) self.use_hsc_bright_mask = config.pop('use_hsc_bright_mask', dict(phot=False, imfit=True)) self.residual_image_forced = None self.residual_image = None # daofinder parameters self.threshold = config.pop('threshold', 3.0) self.daofinder_opt = dict( sigma_radius=config.pop('sigma_radius', 3.0), sharphi=config.pop('sharphi', 2.0), sharplo=config.pop('sharplo', 0.), roundlo=config.pop('roundlo', -1.0), roundhi=config.pop('roundhi', 1.0), ) # daogroup parameter self.crit_separation = config.pop('crit_separation', 1.5) # TODO: make these bkgrd methods options self.bkg = MMMBackground() self.bkgrms = MADStdBackgroundRMS() # phot parameters self.aperture_radius = config.pop('aperture_radius', 1.0) self.phot_opts = dict( fitshape=config.pop('fitshape', (15, 15)), niters=config.pop('niters', 3), bkg_estimator=self.bkg, ) self.master_band = config.pop('master_band', 'i') self.max_match_sep = config.pop('max_match_sep', 1.0) self.min_match_bands = config.pop('min_match_bands', 4)
def init_setup(): fitimage = fits.open('Serpens3/idxq28010_drz.fits') imdata = fitimage[1].data head = fitimage[0].header bkgrms = MADStdBackgroundRMS() std = bkgrms(imdata) mean = np.mean(imdata) sigma_psf = 2.0 iraffind = IRAFStarFinder(threshold=3.5 * std, fwhm=sigma_psf * gaussian_sigma_to_fwhm, minsep_fwhm=0.01, roundhi=5.0, roundlo=-5.0, sharplo=0.0, sharphi=2.0) daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm) mmm_bkg = MMMBackground() return imdata, bkgrms, std, sigma_psf, iraffind, daogroup, mmm_bkg, mean
def find_stars(data): # take background statistics of the image mean, median, sig = sigma_clipped_stats(data, sigma=10.0) bkgrms = MADStdBackgroundRMS() std = bkgrms(data) # setup calibration parameters thresh = 5. * std # detect stars at 5-sigma level sigma_psf = 5. # size in pixels of targets fwhm_sigma = sigma_psf * gaussian_sigma_to_fwhm # convert to full-width at half-maximum subt_data = data - median maximum = np.quantile(subt_data.flatten(), 0.99) # DAOStarFinder stfind = DAOStarFinder(threshold=thresh, fwhm=fwhm_sigma) sources = stfind(subt_data) positions = np.transpose((sources['xcentroid'], sources['ycentroid'])) return positions
def test_psf_fitting_group(overlap_image): """ Test psf_photometry when two input stars are close and need to be fit together """ from photutils.background import MADStdBackgroundRMS # There are a few models here that fail, be it something # created by EPSFBuilder or simpler the Moffat2D one # unprepared_psf = Moffat2D(amplitude=1, gamma=2, alpha=2.8, x_0=0, y_0=0) # psf = prepare_psf_model(unprepared_psf, xname='x_0', yname='y_0', fluxname=None) psf = prepare_psf_model(Gaussian2D(), renormalize_psf=False) psf.fwhm = Parameter('fwhm', 'this is not the way to add this I think') psf.fwhm.value = 10 separation_crit = 10 # choose low threshold and fwhm to find stars no matter what basic_phot = BasicPSFPhotometry(finder=DAOStarFinder(1, 1), group_maker=DAOGroup(separation_crit), bkg_estimator=MADStdBackgroundRMS(), fitter=LevMarLSQFitter(), psf_model=psf, fitshape=31) # this should not raise AttributeError: Attribute "offset_0_0" not found basic_phot(image=overlap_image)
# Image data. hdulist = fits.open(image_file) print(hdulist[0].header) xsize = hdulist[0].header['NAXIS1'] ysize = hdulist[0].header['NAXIS2'] data = hdulist[0].data hdulist.close() # Crop image # crop = cutout_footprint(hdu_data, (1200, 600), (600, 1200)) # hdu_crop = crop[0] # take background statistics of the image mean, median, sig = sigma_clipped_stats(data, sigma=10.0) bkgrms = MADStdBackgroundRMS() std = bkgrms(data) # setup calibration parameters print(mean, median, std) thresh = 5. * std # detect stars at 5-sigma level sigma_psf = 5. # size in pixels of targets fwhm_sigma = sigma_psf * gaussian_sigma_to_fwhm # convert to full-width at half-maximum subt_data = data - median maximum = np.quantile(subt_data.flatten(), 0.99) print median
def compute_photutils(settings, image_data): # Taken from photuils example http://photutils.readthedocs.io/en/stable/psf.html # See also http://photutils.readthedocs.io/en/stable/api/photutils.psf.DAOPhotPSFPhotometry.html#photutils.psf.DAOPhotPSFPhotometry sigma_psf = settings.sigma_psf crit_separation = settings.crit_separation threshold = settings.threshold box_size = settings.box_size niters = settings.iters bkgrms = MADStdBackgroundRMS(SigmaClip(sigma=3.)) std = bkgrms(image_data) logger.info('Using sigma=%f, threshold=%f, separation=%f, box_size=%d, niters=%d, std=%f' % \ (sigma_psf, threshold, crit_separation, box_size, niters, std)) fitter = LevMarLSQFitter() # See findpars args http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?findpars photargs = { 'crit_separation': crit_separation * sigma_psf * gaussian_sigma_to_fwhm, #'crit_separation': crit_separation, 'threshold': threshold * std, 'fwhm': sigma_psf * gaussian_sigma_to_fwhm, 'sigma_radius': sigma_psf * gaussian_sigma_to_fwhm, #'sigma': 3.0, 'fitter': fitter, 'niters': niters, 'fitshape': (box_size, box_size), 'sharplo': 0.2, 'sharphi': 2.0, 'roundlo': -1.0, 'roundhi': 1.0, 'psf_model': IntegratedGaussianPRF(sigma=sigma_psf), 'aperture_radius': sigma_psf * gaussian_sigma_to_fwhm, } # starfinder takes 'exclude border' # photargs['psf_model'].sigma.fixed = False photometry = DAOPhotPSFPhotometry(**photargs) # Column names: # 'flux_0', 'x_fit', 'x_0', 'y_fit', 'y_0', 'flux_fit', 'id', 'group_id', # 'flux_unc', 'x_0_unc', 'y_0_unc', 'iter_detected' result_tab = photometry(image=image_data) # Only use from final iteration # result_tab = result_tab[result_tab['iter_detected'] == niters] logger.info('Fit info: %s' % fitter.fit_info['message']) # Filter out negative flux #result_tab = result_tab[result_tab['flux_fit'] >= 0] # Formula: https://en.wikipedia.org/wiki/Instrumental_magnitude result_tab['mag'] = -2.5 * np.log10(result_tab['flux_fit']) result_tab['mag_unc'] = np.abs(-2.5 * np.log10(result_tab['flux_fit'] + result_tab['flux_unc']) - \ -2.5 * np.log10(result_tab['flux_fit'] - result_tab['flux_unc'])) / 2.0 # http://www.ucolick.org/~bolte/AY257/s_n.pdf #result_tab['snr'] = 1.0875 / result_tab['mag_unc'] result_tab['snr'] = 1.0 / (np.power(10, (result_tab['mag_unc'] / 2.5)) - 1) residual_image = photometry.get_residual_image() return result_tab, residual_image, std
def daophot(cnam, ccd, xlo, xhi, ylo, yhi, niters, method, fwhm, beta, gfac, thresh, rejthresh): """ Perform iterative PSF photometry and star finding on region of CCD """ print(xlo, ylo, xhi, yhi) # first check we are within a single window wnam1 = ccd.inside(xlo, ylo, 2) wnam2 = ccd.inside(xhi, yhi, 2) if wnam1 != wnam2: raise hcam.HipercamError( 'PSF photometry cannot currently be run across seperate windows') wnam = wnam1 print(wnam) # background stats from whole windpw # estimate background RMS wind = ccd[wnam] rms_func = MADStdBackgroundRMS(sigma_clip=SigmaClip(sigma=rejthresh)) bkg_rms = rms_func(wind.data) bkg_func = MMMBackground(sigma_clip=SigmaClip(sigma=rejthresh)) bkg = bkg_func(wind.data) print(' Background estimate = {}, BKG RMS = {}'.format(bkg, bkg_rms)) # crop window to ROI wind = ccd[wnam].window(xlo, xhi, ylo, yhi) # correct FWHM for binning fwhm /= wind.xbin if method == 'm': psf_model = MoffatPSF(fwhm, beta) print(' FWHM = {:.1f}, BETA={:.1f}'.format(fwhm, beta)) else: psf_model = IntegratedGaussianPRF(sigma=fwhm * gaussian_fwhm_to_sigma) print(' FWHM = {:.1f}'.format(fwhm)) # region to extract around positions for fits fitshape = int(5 * fwhm) # ensure odd if fitshape % 2 == 0: fitshape += 1 photometry_task = DAOPhotPSFPhotometry(gfac * fwhm, thresh * bkg_rms, fwhm, psf_model, fitshape, niters=niters, sigma=rejthresh) with warnings.catch_warnings(): warnings.simplefilter('ignore') results = photometry_task(wind.data - bkg) # filter out junk fits tiny = 1e-30 bad_errs = (results['flux_unc'] < tiny) | (results['x_0_unc'] < tiny) | ( results['y_0_unc'] < tiny) results = results[~bad_errs] results.write('table_{}.fits'.format(cnam)) print(' found {} stars'.format(len(results))) xlocs, ylocs = results['x_fit'], results['y_fit'] # convert to device coordinates xlocs = wind.x(xlocs) ylocs = wind.y(ylocs) return xlocs, ylocs
def jwst_camera_fpa_data(data_dir, pattern, standardized_data_dir, parameters, overwrite_source_extraction=False): """Generate standardized focal plane alignment (fpa) data based on JWST camera image. """ save_plot = parameters['save_plot'] file_list = glob.glob(os.path.join(data_dir, '*{}'.format(pattern))) if len(file_list) == 0: raise RuntimeError('No data found') file_list.sort() for f in file_list: plt.close('all') print() print('Data directory: {}'.format(data_dir)) print('Image being processed: {}'.format(f)) im = datamodels.open(f) if hasattr(im, 'data') is False: im.data = fits.getdata(f) #im.dq = np.zeros(im.data.shape) header_info = OrderedDict() for attribute in 'telescope'.split(): header_info[attribute] = getattr(im.meta, attribute) # observations for attribute in 'date time visit_number visit_id visit_group activity_id program_number'.split( ): header_info['observation_{}'.format(attribute)] = getattr( im.meta.observation, attribute) header_info['epoch_isot'] = '{}T{}'.format( header_info['observation_date'], header_info['observation_time']) # instrument for attribute in 'name filter pupil detector'.split(): header_info['instrument_{}'.format(attribute)] = getattr( im.meta.instrument, attribute) # subarray for attribute in 'name'.split(): header_info['subarray_{}'.format(attribute)] = getattr( im.meta.subarray, attribute) # aperture for attribute in 'name position_angle pps_name'.split(): try: value = getattr(im.meta.aperture, attribute) except AttributeError: value = None header_info['aperture_{}'.format(attribute)] = value header_info['INSTRUME'] = header_info['instrument_name'] header_info['SIAFAPER'] = header_info['aperture_name'] instrument_name = getattr(im.meta.instrument, 'name') instrument_detector = getattr(im.meta.instrument, 'detector') instrument_filter = getattr(im.meta.instrument, 'filter') # temporary solution, this should come from populated aperture attributes #if header_info['subarray_name'] == 'FULL': # master_apertures = pysiaf.read.read_siaf_detector_layout() # if header_info['instrument_name'].lower() in ['niriss', 'miri']: # header_info['SIAFAPER'] = master_apertures['AperName'][np.where(master_apertures['InstrName']==header_info['instrument_name'])[0][0]] # elif header_info['instrument_name'].lower() in ['fgs']: # header_info['SIAFAPER'] = 'FGS{}_FULL'.format(header_info['instrument_detector'][-1]) # elif header_info['instrument_name'].lower() in ['nircam']: # header_info['SIAFAPER'] = header_info['aperture_name'] #else: # sys.exit('Only FULL arrays are currently supported.') # target for attribute in 'ra dec catalog_name proposer_name'.split(): header_info['target_{}'.format(attribute)] = getattr( im.meta.target, attribute) # pointing for attribute in 'ra_v1 dec_v1 pa_v3'.split(): try: value = getattr(im.meta.pointing, attribute) except AttributeError: value = None header_info['pointing_{}'.format(attribute)] = value # add HST style keywords header_info['PROGRAM_VISIT'] = '{}_{}'.format( header_info['observation_program_number'], header_info['observation_visit_id']) header_info['PROPOSID'] = header_info['observation_program_number'] header_info['DATE-OBS'] = header_info['observation_date'] header_info['TELESCOP'] = header_info['telescope'] header_info['INSTRUME'] = header_info['instrument_name'] try: header_info['APERTURE'] = header_info['SIAFAPER'] except KeyError: header_info['APERTURE'] = None header_info['CHIP'] = 0 # TBD: Need to remove making yet another directory #extracted_sources_dir = os.path.join(standardized_data_dir, 'extraction') #if os.path.isdir(extracted_sources_dir) is False: # os.makedirs(extracted_sources_dir) extracted_sources_file = os.path.join( standardized_data_dir, #extracted_sources_dir, '{}_extracted_sources.fits'.format( os.path.basename(f).split('.')[0])) mask_extreme_slope_values = False parameters['maximum_slope_value'] = 1000. # Check if extracted_sources_file exists, or overwrite_source_extraction is set to True if (not os.path.isfile(extracted_sources_file)) or ( overwrite_source_extraction): data = copy.deepcopy(im.data) #dq = copy.deepcopy(im.dq) # Convert image data to counts per second photmjsr = getattr(im.meta.photometry, 'conversion_megajanskys') data_cps = data / photmjsr if mask_extreme_slope_values: # clean up extreme slope values bad_index = np.where( np.abs(data) > parameters['maximum_slope_value']) data[bad_index] = 0. dq[bad_index] = -1 bkgrms = MADStdBackgroundRMS() mmm_bkg = MMMBackground() bgrms = bkgrms(data_cps) bgavg = mmm_bkg(data_cps) # Default parameters that generally works for NIRCam/NIRISS images sigma_factor = 10 round_lo, round_hi = 0.0, 0.6 sharp_lo, sharp_hi = 0.3, 1.4 fwhm_lo, fwhm_hi = 1.0, 20.0 fwhm = 2.0 minsep_fwhm = 7 # NOTE: minsep_fwhm>5 to reject artifacts around saturated stars flux_percent_lo, flux_percent_hi = 10, 99 # if 'sharp_lo' in parameters: # sharp_lo = parameters['sharp_lo'] ### ### TBD1: Relocate params below to config parts/files ### # Use different criteria for selecting good stars if parameters['nominalpsf']: # If using Nominal PSF models if instrument_name == 'NIRISS': #fwhm_lo, fwhm_hi = 1.0, 2.0 sharp_lo, sharp_hi = 0.6, 1.4 elif instrument_name == 'FGS': #fwhm_lo, fwhm_hi = 1.0, 1.4 sharp_lo, sharp_hi = 0.6, 1.4 elif instrument_name == 'NIRCAM': sharp_lo, sharp_hi = 0.6, 1.4 elif instrument_name == 'MIRI': sharp_lo, sharp_hi = 0.8, 1.0 fwhm_lo, fwhm_hi = 1.5, 2.2 sigma_factor = 3 elif instrument_name == 'NIRSPEC': sharp_lo, sharp_hi = 0.6, 0.8 round_lo, round_hi = 0.0, 0.3 fwhm_lo, fwhm_hi = 1.0, 1.75 else: ### ### For OTE commissioning, tweak the params below after finding ### the correct ranges by runnin the photometry notebook. ### # If using Commissioning (non-phased) PSF models if instrument_name == 'NIRISS': sharp_lo, sharp_hi = 0.6, 1.4 fwhm_lo, fwhm_hi = 1.4, 2.4 ################################################################################ ################################################################################ ################################################################################ elif instrument_name == 'FGS': sigma_factor = 10 minsep_fwhm = 2.5 sharp_lo, sharp_hi = 0.45, 0.7 round_lo, round_hi = 0.0, 0.3 flux_percent_lo, flux_percent_hi = 2, 99 fwhm = 4 ################################################################################ ################################################################################ ################################################################################ # Below works well for F200W and F356W images elif instrument_name == 'NIRCAM': sigma_factor = 3 minsep_fwhm = 2.5 sharp_lo, sharp_hi = 0.5, 0.7 round_lo, round_hi = 0.0, 0.2 flux_percent_lo, flux_percent_hi = 2, 99 if 'F200W' in instrument_filter: fwhm = 10 elif 'F356W' in instrument_filter: fwhm = 8 elif 'F090W' in instrument_filter: fwhm = 5.5 elif 'F277W' in instrument_filter: fwhm = 6.5 else: fwhm = 3 ################################################################################ ################################################################################ ################################################################################ elif instrument_name == 'MIRI': sharl_lo, sharp_hi = 0.5, 1.0 fwhm_lo, fwhm_hi = 1.5, 2.2 sigma_factor = 3 elif instrument_name == 'NIRSPEC': sharp_lo, sharp_hi = 0.5, 0.8 round_lo, round_hi = 0.0, 0.3 fwhm_lo, fwhm_hi = 1.0, 1.75 # Use IRAFStarFinder for source detection iraffind = IRAFStarFinder(threshold=sigma_factor * bgrms + bgavg, fwhm=fwhm, minsep_fwhm=minsep_fwhm, roundlo=round_lo, roundhi=round_hi, sharplo=sharp_lo, sharphi=sharp_hi) # Create default mask with all False values datamask = np.zeros( data_cps.shape, dtype=bool) # This creates an array with all False # Mask the left (for NRS1) and right regions (for NRS2) for NIRSpec if instrument_detector == 'NRS1': datamask[:, :1023] = True # Mask everything on the left side elif instrument_detector == 'NRS2': datamask[:, 1024:] = True # Mask everything on the right side iraf_extracted_sources = iraffind(data_cps, mask=datamask) # Perform some basic filtering # Remove sources based on flux percentile # 10-99% works well for filtering out too faint or saturated sources flux_min = np.percentile(iraf_extracted_sources['flux'], flux_percent_lo) flux_max = np.percentile(iraf_extracted_sources['flux'], flux_percent_hi) iraf_extracted_sources.remove_rows( np.where(iraf_extracted_sources['flux'] < flux_min)) iraf_extracted_sources.remove_rows( np.where(iraf_extracted_sources['flux'] > flux_max)) # Also remove sources based on fwhm ### ### Don't use below for now - 2/23/2022 (Don't use it unless we get lots of bad sources) ### #iraf_extracted_sources.remove_rows(np.where(iraf_extracted_sources['fwhm']<fwhm_lo)) #iraf_extracted_sources.remove_rows(np.where(iraf_extracted_sources['fwhm']>fwhm_hi)) # Now improve the positions by re-running centroiding algorithm if necessary. # NOTE: For now, re-centroiding will be turned off ### ### TBD2: Add re-centroiding algorithm adopted from Paul here ### #xarr = sources_masked['xcentroid'] #yarr = sources_masked['ycentroid'] #newx, newy = centroid_sources(data_cps, xarr, yarr, box_size=5, centroid_func=centroid_2dg) #coords = np.column_stack((newx, newy)) #srcaper = CircularAnnulus(coords, r_in=1, r_out=3) #srcaper_masks = srcaper.to_mask(method='center') #satflag = np.zeros((len(newx),),dtype=int) #i = 0 #for mask in srcaper_masks: # srcaper_dq = mask.multiply(dqarr) # srcaper_dq_1d = srcaper_dq[mask.data>0] # badpix = np.logical_and(srcaper_dq_1d>2, srcaper_dq_1d<7) # reallybad = np.where(srcaper_dq_1d==1) # if ((len(srcaper_dq_1d[badpix]) > 1) or (len(srcaper_dq_1d[reallybad]) > 0)): # satflag[i] = 1 # i =+1 #goodx = newx[np.where(satflag==0)] #goody = newy[np.where(satflag==0)] #print('Number of sources before removing saturated or bad pixels: ', len(xarr)) #print('Number of sources without saturated or bad pixels: ', len(goodx)) #print(' ') #coords = np.column_stack((goodx,goody)) print('Number of extracted sources after filtering: {} sources'. format(len(iraf_extracted_sources))) if parameters['use_epsf'] is True: size = 25 hsize = (size - 1) / 2 x = iraf_extracted_sources['xcentroid'] y = iraf_extracted_sources['ycentroid'] mask = ((x > hsize) & (x < (data_cps.shape[1] - 1 - hsize)) & (y > hsize) & (y < (data_cps.shape[0] - 1 - hsize))) stars_tbl = Table() stars_tbl['x'] = x[mask] stars_tbl['y'] = y[mask] print('Using {} stars to build epsf'.format(len(stars_tbl))) data_cps_bkgsub = data_cps.copy() data_cps_bkgsub -= bgavg nddata = NDData(data=data_cps_bkgsub) stars = extract_stars(nddata, stars_tbl, size=size) # # Figure - PSF stars # nrows = 10 ncols = 10 fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(20, 20), squeeze=True) ax = ax.ravel() for i in range(nrows * ncols): if i <= len(stars) - 1: norm = simple_norm(stars[i], 'log', percent=99.) ax[i].imshow(stars[i], norm=norm, origin='lower', cmap='viridis') plt.title('{} sample stars for epsf'.format( header_info['APERTURE'])) if save_plot: figname = os.path.join( extracted_sources_dir, '{}_sample_psfs.pdf'.format( os.path.basename(f).split('.')[0])) plt.savefig(figname) if parameters['show_extracted_sources']: plt.show() # # Timer for ePSF construction # tic = time.perf_counter() epsf_builder = EPSFBuilder(oversampling=4, maxiters=3, progress_bar=False) print("Building ePSF ...") epsf, fitted_stars = epsf_builder(stars) toc = time.perf_counter() print("Time elapsed for building ePSF:", toc - tic) # # Figure - ePSF plot # norm_epsf = simple_norm(epsf.data, 'log', percent=99.) plt.figure() plt.imshow(epsf.data, norm=norm_epsf, origin='lower', cmap='viridis') plt.colorbar() plt.title('{} epsf using {} stars'.format( header_info['APERTURE'], len(stars_tbl))) if save_plot: figname = os.path.join( extracted_sources_dir, '{}_epsf.pdf'.format( os.path.basename(f).split('.')[0])) plt.savefig(figname) if parameters['show_extracted_sources']: plt.show() daogroup = DAOGroup(5.0 * 2.0) psf_model = epsf.copy() tic = time.perf_counter() photometry = IterativelySubtractedPSFPhotometry( finder=iraffind, group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=LevMarLSQFitter(), niters=1, fitshape=(11, 11), aperture_radius=5) print('Performing source extraction and photometry ...') epsf_extracted_sources = photometry(data_cps) toc = time.perf_counter() print("Time elapsed for PSF photometry:", toc - tic) print('Final source extraction with epsf: {} sources'.format( len(epsf_extracted_sources))) epsf_extracted_sources['xcentroid'] = epsf_extracted_sources[ 'x_fit'] epsf_extracted_sources['ycentroid'] = epsf_extracted_sources[ 'y_fit'] extracted_sources = epsf_extracted_sources extracted_sources.write(extracted_sources_file, overwrite=True) norm = simple_norm(data_cps, 'sqrt', percent=99.) diff = photometry.get_residual_image() plt.figure() ax1 = plt.subplot(1, 2, 1) plt.xlabel("X [pix]") plt.ylabel("Y [pix]") ax1.imshow(data_cps, norm=norm, cmap='Greys') ax2 = plt.subplot(1, 2, 2) plt.xlabel("X [pix]") plt.ylabel("Y [pix]") ax2.imshow(diff, norm=norm, cmap='Greys') plt.title('PSF subtracted image for {}'.format( os.path.basename(f))) if save_plot: figname = os.path.join( extracted_sources_dir, '{}_psfsubtracted_image.pdf'.format( os.path.basename(f).split('.')[0])) plt.savefig(figname) if parameters['show_psfsubtracted_image']: plt.show() else: extracted_sources = iraf_extracted_sources extracted_sources.write(extracted_sources_file, overwrite=True) positions = np.transpose((extracted_sources['xcentroid'], extracted_sources['ycentroid'])) apertures = CircularAperture(positions, r=10) norm = simple_norm(data_cps, 'sqrt', percent=99.) plt.figure(figsize=(12, 12)) plt.xlabel("X [pix]") plt.ylabel("Y [pix]") plt.imshow(data_cps, norm=norm, cmap='Greys', origin='lower') apertures.plot(color='blue', lw=1.5, alpha=0.5) title_string = '{}: {} selected sources'.format( os.path.basename(f), len(extracted_sources)) plt.title(title_string) plt.tight_layout() if save_plot: figname = os.path.join( standardized_data_dir, '{}_extracted_sources.pdf'.format( os.path.basename(f).split('.')[0])) plt.savefig(figname) if parameters['show_extracted_sources']: plt.show() plt.close() else: extracted_sources = Table.read(extracted_sources_file) print('Extracted {} sources from {}'.format(len(extracted_sources), f)) impose_positive_flux = True if impose_positive_flux and parameters['use_epsf']: extracted_sources.remove_rows( np.where(extracted_sources['flux_fit'] < 0)[0]) print('Only {} sources have positve flux'.format( len(extracted_sources))) astrometry_uncertainty_mas = 5 if len(extracted_sources) > 0: # Cal images are in DMS coordinates which correspond to the SIAF Science (SCI) frame extracted_sources['x_SCI'], extracted_sources[ 'y_SCI'] = extracted_sources['xcentroid'], extracted_sources[ 'ycentroid'] # For now, astrometric uncertainty defaults to 5 mas for each source. extracted_sources['sigma_x_mas'] = np.ones( len(extracted_sources)) * astrometry_uncertainty_mas extracted_sources['sigma_y_mas'] = np.ones( len(extracted_sources)) * astrometry_uncertainty_mas # transfer info to astropy table header for key, value in header_info.items(): extracted_sources.meta[key] = value extracted_sources.meta['DATAFILE'] = os.path.basename(f) extracted_sources.meta['DATAPATH'] = os.path.dirname(f) extracted_sources.meta['EPOCH'] = header_info['epoch_isot'] out_file = os.path.join( standardized_data_dir, '{}_FPA_data.fits'.format( extracted_sources.meta['DATAFILE'].split('.')[0])) print('Writing {}'.format(out_file)) with warnings.catch_warnings(): warnings.simplefilter('ignore', AstropyWarning, append=True) extracted_sources.write(out_file, overwrite=True) return im
def SubmitEvent(self): #Not a fan of globals but this is the easiest way to grab the file location global fileLocation #sigma_psf = 2.88 #Grab the Sigma from the Entry box in the GUI SigmaPSF = SigmaPSFentry.get() #Turn the string into a float sigma_psf = float(SigmaPSF) #Grab the number of iterations from Entry box in GUI N_iters1 = nitersEntry.get() #Turn the string into a float N_iters = float(N_iters1) #Test cases to make sure that information was flowing from the GUI to the program #print(SigmaPSF) #print(N_iters) #Open the file as a fits (allows us to handle it) then turn that into readable data. with fits.open(fileLocation) as hdul: image = hdul[0].data #automatically gathered information needed to run the Star Finder bkgrms = MADStdBackgroundRMS() std = bkgrms(image) #Find the stars iraffind = IRAFStarFinder(threshold=3.5 * std, fwhm=sigma_psf * gaussian_sigma_to_fwhm, minsep_fwhm=0.01, roundhi=5.0, roundlo=-5.0, sharplo=0.0, sharphi=2.0) #Group the stars daogroup = DAOGroup(2.0 * sigma_psf * gaussian_sigma_to_fwhm) #More automatically gathered info needed for IS-PSFPhotometry to take places mmm_bkg = MMMBackground() fitter = LevMarLSQFitter() #Grabbed from the user input psf_model = IntegratedGaussianPRF(sigma=sigma_psf) #Run IS-PSFPhotometry photometry = IterativelySubtractedPSFPhotometry( finder=iraffind, group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=LevMarLSQFitter(), niters=N_iters, fitshape=(11, 11)) #Do photometry on the image result_tab = photometry(image=image) #grab the resiudal image residual_image = photometry.get_residual_image() #Get the results of the photometry and print the aspects we want. phot_results = photometry(image) with open("output.txt", "w") as text_file: print(phot_results['x_fit', 'y_fit', 'flux_fit'], file=text_file) print(phot_results['x_fit', 'y_fit', 'flux_fit']) print("Sum of pixels: {}".format(sum(sum(residual_image)))) #Plot images made# #Start by creating plots. plt.subplot(1, 5, 1) #Show the first plot (which is just the raw image) plt.imshow(image, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.title('Raw') plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) #Create the second plot plt.subplot(1, 5, 2) #Show the residual_image plt.imshow(residual_image, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.title('PSF') plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) #Draw in the sum of pixels. plt.text(0, 65, "Sum of pixels: {}".format(sum(sum(residual_image))), fontsize=7) #Create the third plot which is the subtracted images combined. sb = image - residual_image plt.subplot(1, 5, 3) plt.imshow(sb, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.title('PSF-S') plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) with open("AP_RI.txt", "w") as f: for _ in range(len(residual_image)): f.write(str(residual_image[_])) with open("AP_BS.txt", "w") as f: for _ in range(len(sb)): f.write(str(sb[_])) print("Starting creation of CSV") subprocess.run(['py', 'create_CSV.py'], shell=False) print("Starting creation of Stats") subprocess.run(['py', 'create_info.py'], shell=False) print("Starting Threshold") subprocess.run(['py', 'threshold.py'], shell=False) with open("APC_Res.csv", "r") as f: APC_Res = f.read() APC_Res = APC_Res.split(",") APC_Res = [float(i) for i in APC_Res] #Every (SquareRoot of the Pixels) datapoints create a new array. Into a 2D Array. #I'm going to use the Correct_Res list as the main list and store the temp list every Sqrt(pix) in it, #then reset that list and continue until the pixel count is met. #Have an internal counter. Reset that every Sqrt(Pix) temp_list = np.array([]) SqrPixels = math.sqrt(len(APC_Res)) internal_counter = 0 #print(SqrPixels) #print(len(APC_Res)) Corrected_Res = np.array([[]]) for _ in range(len(APC_Res)): if internal_counter <= SqrPixels - 2: try: temp_list = np.append(temp_list, APC_Res[_ - 1]) #print(_) if _ + 1 == (int(SqrPixels) * int(SqrPixels)): Corrected_Res = np.append(Corrected_Res, temp_list) except: print("Not right 2.0") internal_counter = internal_counter + 1 else: internal_counter = 0 #print(temp_list) Corrected_Res = np.append(Corrected_Res, temp_list) temp_list = [] temp_list = np.append(temp_list, APC_Res[_ - 1]) #print("Resetting Counter & List {}".format(_)) if _ + 1 == (int(SqrPixels) * int(SqrPixels)): Corrected_Res = np.append(Corrected_Res, temp_list) #print(_+1) #print("Iteration {}".format(_)) #print(residual_image) #print("\n") #print(Corrected_Res) Corrected_Res = np.reshape(Corrected_Res, (int(SqrPixels), int(SqrPixels))) Correct_BS = image - Corrected_Res plt.subplot(1, 5, 4) plt.imshow(Corrected_Res, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.title('CPSF') plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) plt.subplot(1, 5, 5) plt.imshow(Correct_BS, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.title('CPSF-S') plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) #Number of bins n_bins = 20 #Not super sure why this works the way that it does if I’m being truthful, took tinkering to work, and lots of documentation examples. fig, axs = plt.subplots(1, 2) # We can set the number of bins with the `bins` kwarg axs[0].hist(residual_image, bins=n_bins) plt.title('Residual Image Hist') axs[1].hist(sb, bins=n_bins) plt.title('Background Subtracted Hist') #plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04) #All Pixels from residual image fig = plt.figure() ax = fig.add_subplot(111, projection='3d') delta = (6 * (1 / len(sb))) nx = ny = np.arange(-3.0, 3.0, delta) X, Y = np.meshgrid(nx, ny) #print(X) #print(Y) x, y, z = X * len(sb), Y * len(sb), sb ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis') figi = plt.figure() axi = figi.add_subplot(111, projection='3d') deltai = (6 * (1 / len(sb))) nxi = nyi = np.arange(-3.0, 3.0, deltai) Xi, Yi = np.meshgrid(nxi, nyi) #print(X) #print(Y) xi, yi, zi = Xi * len(Correct_BS), Yi * len(Correct_BS), Correct_BS axi.plot_surface(xi, yi, zi, rstride=1, cstride=1, cmap='viridis') plt.show()
def Flux(self,x,y): x = int(x) y = int(y) r = 25 data = self.hdulist[self.fz].data[x-r:x+r,y-r:y+r] data = (lacosmic.lacosmic(data,2,10,10, effective_gain = self.gain, readnoise = self.readnoise))[0] bkgrms = MADStdBackgroundRMS() std = bkgrms(data) iraffind = IRAFStarFinder(threshold=self.limit*std, fwhm=self.sigma_psf*gaussian_sigma_to_fwhm, minsep_fwhm=0.01, roundhi=5.0, roundlo=-5.0, sharplo=0.0, sharphi=2.0) daogroup = DAOGroup(2.0*self.sigma_psf*gaussian_sigma_to_fwhm) mmm_bkg = MMMBackground() psf_model = IntegratedGaussianPRF(sigma=self.sigma_psf) from photutils.psf import IterativelySubtractedPSFPhotometry photometry = IterativelySubtractedPSFPhotometry(finder=iraffind, group_maker=daogroup, bkg_estimator=mmm_bkg, psf_model=psf_model, fitter=LevMarLSQFitter(), niters=1, fitshape=(21,21)) result_tab = photometry(image=data) """ if plot == 1: residual_image = photometry.get_residual_image() print(result_tab['x_fit','y_fit']) plt.figure(self.filename+' data') plt.imshow(data, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.show() plt.figure(self.filename+' residual') plt.imshow(residual_image, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.show() plt.figure(self.filename+' PSF') plt.imshow(data-residual_image, cmap='viridis', aspect=1, interpolation='nearest', origin='lower') plt.show() """ if len(result_tab) > 5: return(0,0) if len(result_tab) ==0: print('None') return(0,0) result_tab['Minus'] = np.zeros(len(result_tab)) for i in range(len(result_tab)): if 18.5 < result_tab['x_fit'][i] < 28.5 and 18.5 < result_tab['y_fit'][i] < 28.5: #if 15 < result_tab['x_fit'][i] < 25 and 15 < result_tab['y_fit'][i] < 25: result_tab['Minus'][i] = 1 else: result_tab['Minus'][i] = 0 mask = result_tab['Minus'] == 1.0 result_tab = result_tab[mask] if len(result_tab) != 1: return(0,0) flux_counts = float(result_tab['flux_fit'][0]) flux_unc = float(result_tab['flux_unc'][0]) flux_unc = flux_unc/flux_counts return(flux_counts,flux_unc)
def cheating_astrometry(image, input_table, psf: np.ndarray, filename: str = '?', config: Config = Config.instance()): """ Evaluate the maximum achievable precision of the EPSF fitting approach by using a hand-defined psf :param input_table: :param image: :param filename: :param psf: :param config: :return: """ try: print(f'starting job on image {filename} with {config}') origin = np.array(psf.shape) / 2 # type: ignore epsf = photutils.psf.EPSFModel(psf, flux=1, origin=origin, oversampling=1, normalize=False) epsf = photutils.psf.prepare_psf_model(epsf, renormalize_psf=False) finder = get_finder(image, config) #fwhm = estimate_fwhm(epsf.psfmodel) fwhm = config.fwhm_guess grouper = DAOGroup(config.separation_factor * fwhm) epsf.fwhm = astropy.modeling.Parameter( 'fwhm', 'this is not the way to add this I think') epsf.fwhm.value = fwhm bkgrms = MADStdBackgroundRMS() photometry = BasicPSFPhotometry(finder=finder, group_maker=grouper, bkg_estimator=bkgrms, psf_model=epsf, fitter=LevMarLSQFitter(), fitshape=config.fitshape) guess_table = input_table.copy() guess_table = cut_edges(guess_table, 101, image.shape[0]) guess_table.rename_columns(['x', 'y'], ['x_0', 'y_0']) guess_table['x_0'] += np.random.uniform(-0.1, +0.1, size=len(guess_table['x_0'])) guess_table['y_0'] += np.random.uniform(-0.1, +0.1, size=len(guess_table['y_0'])) result_table = photometry(image, guess_table) return PhotometryResult(image, input_table, result_table, epsf, None, config, filename) except Exception as ex: import traceback print(f'error in cheating_astrometry({filename}, {psf}, {config})') error = ''.join( traceback.format_exception(type(ex), ex, ex.__traceback__)) print(error) return error