def test_tapering_Gaussian(self): self.actualSetUp() size_required = 0.003542 self.componentvis, _, _ = weight_visibility(self.componentvis, self.model, algoritm='uniform') self.componentvis = taper_visibility_gaussian(self.componentvis, beam=size_required) psf, sumwt = invert_2d(self.componentvis, self.model, dopsf=True) export_image_to_fits( psf, '%s/test_weighting_gaussian_taper_psf.fits' % self.dir) xfr = fft_image(psf) xfr.data = xfr.data.real.astype('float') export_image_to_fits( xfr, '%s/test_weighting_gaussian_taper_xfr.fits' % self.dir) npixel = psf.data.shape[3] sl = slice(npixel // 2 - 7, npixel // 2 + 8) fit = fit_2dgaussian(psf.data[0, 0, sl, sl]) if fit.x_stddev <= 0.0 or fit.y_stddev <= 0.0: raise ValueError('Error in fitting to psf') # fit_2dgaussian returns sqrt of variance. We need to convert that to FWHM. # https://en.wikipedia.org/wiki/Full_width_at_half_maximum scale_factor = numpy.sqrt(8 * numpy.log(2.0)) size = numpy.sqrt(fit.x_stddev * fit.y_stddev) * scale_factor # Now we need to convert to radians size *= numpy.pi * self.model.wcs.wcs.cdelt[1] / 180.0 # Very impressive! Desired 0.01 Acheived 0.0100006250829 assert numpy.abs(size - size_required) < 0.001 * size_required, \ "Fit should be %f, actually is %f" % (size_required, size)
def restore_cube(model: Image, psf: Image, residual=None, **kwargs) -> Image: """ Restore the model image to the residuals :params psf: Input PSF :return: restored image """ assert isinstance(model, Image), model assert isinstance(psf, Image), psf assert residual is None or isinstance(residual, Image), residual restored = copy_image(model) npixel = psf.data.shape[3] sl = slice(npixel // 2 - 7, npixel // 2 + 8) size = get_parameter(kwargs, "psfwidth", None) if size is None: # isotropic at the moment! from scipy.optimize import minpack try: fit = fit_2dgaussian(psf.data[0, 0, sl, sl]) if fit.x_stddev <= 0.0 or fit.y_stddev <= 0.0: log.debug( 'restore_cube: error in fitting to psf, using 1 pixel stddev' ) size = 1.0 else: size = max(fit.x_stddev, fit.y_stddev) log.debug('restore_cube: psfwidth = %s' % (size)) except minpack.error as err: log.debug('restore_cube: minpack error, using 1 pixel stddev') size = 1.0 except ValueError as err: log.debug( 'restore_cube: warning in fit to psf, using 1 pixel stddev') size = 1.0 else: log.debug('restore_cube: Using specified psfwidth = %s' % (size)) # TODO: Remove filter when astropy fixes convolve import warnings warnings.simplefilter(action='ignore', category=FutureWarning) from astropy.convolution import Gaussian2DKernel, convolve_fft # By convention, we normalise the peak not the integral so this is the volume of the Gaussian norm = 2.0 * numpy.pi * size**2 gk = Gaussian2DKernel(size) for chan in range(model.shape[0]): for pol in range(model.shape[1]): restored.data[chan, pol, :, :] = norm * convolve_fft( model.data[chan, pol, :, :], gk, normalize_kernel=False, allow_huge=True) if residual is not None: restored.data += residual.data return restored
def source_detection(ccd, fwhm=3.0, sigma=3.0, iters=5, threshold=5.0, find_fwhm=True): """ Returns an astropy table containing the position of sources within the image. Parameters ---------------- ccd : numpy.ndarray The CCD Image array. fwhm : float, optional Full-width half-max of stars in the image. sigma : float, optional. The number of standard deviations to use as the lower and upper clipping limit. iters : int, optional The number of iterations to perform sigma clipping threshold : float, optional. The absolute image value above which to select sources. find_fwhm : bool, optional If ``True``, estimate the FWHM of each source by fitting a 2D Gaussian to it. Returns ----------- sources an astropy table of the positions of sources in the image. If `find_fwhm` is ``True``, includes a column called ``FWHM``. """ data = ccd mean, median, std = sigma_clipped_stats(data, sigma=sigma, maxiters=iters) daofind = DAOStarFinder(fwhm=fwhm, threshold=threshold * std) sources = daofind(data - median) if find_fwhm: fwhm_fit = [] for source in sources: x = source['xcentroid'] y = source['ycentroid'] cutout = Cutout2D(data, (x, y), 5 * fwhm) fit = fit_2dgaussian(cutout.data) fwhm_fit.append(gaussian_sigma_to_fwhm * (fit.x_stddev + fit.y_stddev) / 2) sources['FWHM'] = fwhm_fit return sources
def restore_cube(model: Image, psf: Image, residual=None, **kwargs) -> Image: """ Restore the model image to the residuals :params psf: Input PSF :return: restored image """ assert isinstance(model, Image), "Type is %s" % (type(model)) assert isinstance(psf, Image), "Type is %s" % (type(psf)) assert residual is None or isinstance( residual, Image), "Type is %s" % (type(residual)) restored = copy_image(model) npixel = psf.data.shape[3] sl = slice(npixel // 2 - 7, npixel // 2 + 8) size = get_parameter(kwargs, "psfwidth", None) if size is None: # isotropic at the moment! try: fit = fit_2dgaussian(psf.data[0, 0, sl, sl]) if fit.x_stddev <= 0.0 or fit.y_stddev <= 0.0: log.debug( 'restore_cube: error in fitting to psf, using 1 pixel stddev' ) size = 1.0 else: size = max(fit.x_stddev, fit.y_stddev) log.debug('restore_cube: psfwidth = %s' % (size)) except: log.debug( 'restore_cube: warning in fit to psf, using 1 pixel stddev') size = 1.0 else: log.debug('restore_cube: Using specified psfwidth = %s' % (size)) # By convention, we normalise the peak not the integral so this is the volume of the Gaussian norm = 2.0 * numpy.pi * size**2 gk = Gaussian2DKernel(size) for chan in range(model.shape[0]): for pol in range(model.shape[1]): restored.data[chan, pol, :, :] = norm * convolve( model.data[chan, pol, :, :], gk, normalize_kernel=False) if residual is not None: restored.data += residual.data return restored
def compute_fwhm(ccd, sources, fwhm_estimate=5, x_column='xcenter', y_column='ycenter', fit=True): fwhm_x = [] fwhm_y = [] for source in sources: x = source[x_column] y = source[y_column] sky = source['sky_per_pix_avg'] # Cutout2D needs no units on the center position, so remove unit # if it is present. try: x = x.value y = y.value sky = sky.value except AttributeError: pass cutout = Cutout2D(ccd, (x, y), 5 * fwhm_estimate) if fit: fit = fit_2dgaussian(cutout.data) fwhm_x.append(gaussian_sigma_to_fwhm * fit.x_stddev) fwhm_y.append(gaussian_sigma_to_fwhm * fit.y_stddev) print('Still fitting!!') else: dat = np.where(cutout.data - sky > 0, cutout.data - sky, 0) mom1 = _moments(dat, order=1) xc = mom1[0, 1] / mom1[0, 0] yc = mom1[1, 0] / mom1[0, 0] moments = _moments_central(dat, center=(xc, yc), order=2) mom_scale = (moments / mom1[0, 0]) fwhm_xm = 2 * np.sqrt(np.log(2) * mom_scale[0, 2]) fwhm_ym = 2 * np.sqrt(np.log(2) * mom_scale[2, 0]) fwhm_x.append(fwhm_xm) fwhm_y.append(fwhm_ym) return np.array(fwhm_x), np.array(fwhm_y)
def begin(index): chunk_size = 50 #i = int(index) i = int(index.split('-')[0]) mgi = int(index.split('-')[1]) color = index.split('-')[2] #mgi = int(index.split('-')[1]) try: print(index) #filename = 'Test Data Extract/' + str(i) + '.fit' #filename = str(i) + '-g.fit' #filename = '/data/marvels/billzhu/2175 Dataset/' + str(index) + '-g.fit' filename = '/data/marvels/billzhu/Reference Dataset/0.37 - 0.55/' + color + '/' + str(index) + '.fit' #print(filename) #filename = '/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '.fit' hdulist = fits.open(filename) #qlist = fits.open('MG II Test Cut/' + str(i) + '_MG.fit') #qlist = fits.open('/data/marvels/billzhu/2175 Quasar Cut/' + str(index) + '_DUST.fit') qlist = fits.open('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(index) + '_REF.fit') #qlist = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') x = qlist[0].header['XCOORD'] y = qlist[0].header['YCOORD'] #print("%f, %f" % (x, y)) qlist.close() except: print("No coordinates") return # Save some frickin time half = 700 scidata = hdulist[0].data.astype(float) mean, median, std = sigma_clipped_stats(scidata, sigma=3.0, iters=5) if x + chunk_size > 2048: filler = np.array([float(median)] * len(scidata)) for j in range(chunk_size): scidata = np.insert(scidata, len(scidata[0]), filler, 1) if x - chunk_size < 0: x += chunk_size filler = np.array([float(median)] * len(scidata)) for j in range(chunk_size): scidata = np.insert(scidata, 0, filler, 1) if y + chunk_size > 1489: filler = np.array([float(median)] * len(scidata[0])) for j in range(chunk_size): scidata = np.insert(scidata, len(scidata), filler, 0) if y - chunk_size < 0: y += chunk_size filler = np.array([float(median)] * len(scidata[0])) for j in range(chunk_size): scidata = np.insert(scidata, 0, filler, 0) #if 'SKY' in hdulist[0].header.keys(): # scidata -= float(hdulist[0].header['SOFTBIAS']) # scidata -= float(hdulist[0].header['SKY']) #else: scidata -= median psfindex = -1 quasar = 0 bkg_sigma = mad_std(scidata) # DAOStarFinder algorithm that finds all sources greater than 3 sigma above the background value, with strict roundness parameters daofind = DAOStarFinder(fwhm = 2., threshold = 5.*bkg_sigma) #print("%f, %f" % (x, y)) sources = daofind(scidata[y - 10 : y + 10, x - 10 : x + 10]) # Update coordinates of the sources sources['xcentroid'] += x - 10 sources['ycentroid'] += y - 10 #print(sources) # Create new column that contains the FWHM of each source for comparison later FWHM = np.empty([len(sources)]) column = Column(FWHM, name = 'FWHM') sources.add_column(column) # Find the quasar and calculate its FWHM for j in range(len(sources)): if abs(round(sources['xcentroid'][j]) - x) < 3.0 and abs(round(sources['ycentroid'][j]) - y) < 3.0: quasar = sources[j] width = int(np.sqrt(sources['npix'][j])) #print("%d %d %f %f" % (j, width, sources['xcentroid'][j], sources['ycentroid'][j])) data = scidata[int(sources['ycentroid'][j] - width - 1) : int(sources['ycentroid'][j] + width) + 2, int(sources['xcentroid'][j] - width - 1) : int(sources['xcentroid'][j] + width) + 2] """ plt.imshow(data, origin='lower', interpolation='nearest', cmap='viridis') plt.show() plt.pause(3) """ gauss = 0 if(np.ma.count(data) >= 7): gauss = photutils.fit_2dgaussian(data, mask = None) fwhm = 0 if gauss != 0: fwhm = 2*np.sqrt(2*np.log(2))*np.sqrt(gauss.x_stddev) quasar['FWHM'] = fwhm qsigma = np.sqrt(gauss.x_stddev**2 + gauss.y_stddev**2) print(quasar['FWHM']) break ztot = 10000 print(quasar) # If no quasar is found, the field image is deemed corrupt and not used if quasar == 0: return # Define cutout image limits, adjusted to field image boundaries as necessary i.e. x, y < 0 or > max x/y values yl = y - half yu = y + half xl = x - half xu = x + half if yl < 0: yl = 0 if yu > len(scidata): yu = len(scidata) if xl < 0: xl = 0 if xu > len(scidata[0]): xu = len(scidata[0]) image = scidata[yl : yu, xl : xu] bkg_sigma = mad_std(scidata) daofind = DAOStarFinder(fwhm = quasar['FWHM'], threshold=7.*bkg_sigma, roundlo = -0.20, roundhi = 0.20) sources = daofind.find_stars(scidata) #print(len(sources)) #qsocut = fits.open('/data/marvels/billzhu/2175 Quasar Cut/' + str(index) + '_DUST.fit') qsocut = fits.open('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(index) + '_REF.fit') #qsocut = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') qsodata = qsocut[0].data.astype(float) # Shift the source coordinates to the actual image coordinates #sources['xcentroid'] += xl #sources['ycentroid'] += yl # If no sources found, skip iteration if len(sources) <= 0: return # Interpolates the PSF sources to their actual centroids, upsampling it 2x, and adds it to large array for PCA temp = 1 largearr = [] for j in range(len(sources)): if abs(sources['xcentroid'][j] - quasar['xcentroid']) < 2 and abs(sources['ycentroid'][j] - quasar['ycentroid']) < 2: continue chunk_size = 50 pXc = sources['xcentroid'][j] pYc = sources['ycentroid'][j] #print("%f, %f" % (pXc, pYc)) xr = np.arange(int(pXc) - chunk_size - 5, int(pXc) + chunk_size + 6) yr = np.arange(int(pYc) - chunk_size - 5, int(pYc) + chunk_size + 6) preshift = scidata[int(pYc) - chunk_size - 5 : int(pYc) + chunk_size + 6, int(pXc) - chunk_size - 5: int(pXc) + chunk_size + 6] shifted = [] try: spline = interpolate.interp2d(xr, yr, preshift) xrf = np.arange(pXc - chunk_size, pXc + chunk_size + 1, 1) yrf = np.arange(pYc - chunk_size, pYc + chunk_size + 1, 1) except: #print("ERROR") continue if len(xrf) > 101: xrf = xrf[:-1].copy() if len(yrf) > 101: yrf = yrf[:-1].copy() shifted = spline(xrf, yrf) cont = False # Safety that screens out images with multiple sources by checking incremental means # CHECK DISCONTINUED DUE TO INCOMPLETENESS / SOME ERRORS """ meanarr = [] for k in range(0, 5): tempcut = list(shifted[20 - 4 * k : 21 + 4 * k, 20 - 4 * k : 21 + 4 * k]) mean1 = np.mean(tempcut) #print(mean1) if len(meanarr) > 0 and mean1 > meanarr[len(meanarr) - 1]: cont = True #print(temp) #fits.writeto(str(temp) + '.fit', shifted, clobber = True) #temp += 1 break meanarr.append(mean1) """ # Originally discontinued, but upon closer inspection, the same source finder parameters is used as in the original source search, thus sources found # will be the same, i.e. ideal source finder #check_source = daofind.find_stars(preshift) #if len(check_source) > 1: # continue # If the source has a weird shape i.e. due to gravitational lensing, then check if the maximum pixel is within 2 pixels of the center to ensure consistency mean1, median1, std1 = sigma_clipped_stats(shifted, sigma=3.0, iters=5) daofind = DAOStarFinder(fwhm = 2, threshold=3.0*bkg_sigma) sources1 = daofind.find_stars(shifted) cont = checkInner(shifted, sources1) if cont == True: continue shifted = checkOutter(shifted, mean1, std1) """ max_coords = np.unravel_index(shifted.argmax(), shifted.shape) max_coords = list(max_coords) #print(max_coords) for k in range(len(max_coords)//2): yt = max_coords[2 * k] xt = max_coords[2 * k + 1] #print("%f, %f" % (xt, yt)) if distance(xt, yt, 20, 20) > 4: cont = True break """ #fits.writeto(str(temp) + '.fit', shifted, clobber = True) #print(temp) #print(meanarr) #shifted /= np.max(shifted) #shifted *= np.max(qsodata) largearr.append(np.reshape(shifted, 10201)) largearr = np.array(largearr) print(np.shape(largearr)) # Set number of components in PCA, use incremental PCA (IPCA) due to high efficiency and speed numcomp = 20 # Need a healthy number of sources to make the PSF fitting in order to decrease noise, setting at 5% threshold if len(largearr) < 8: return print(numcomp) mean_vector = [] #print(np.shape(largearr)) try: for j in range(0, 10201): mean_vector.append(np.mean(largearr[:, j])) except: print("NO SOURCE FOUND") return largearr -= mean_vector ipca = IncrementalPCA(n_components=numcomp) ipca.fit(largearr) ipca_comp = ipca.components_ #print(np.shape(ipca_comp)) ipca_comp = ipca_comp.T #print(ipca_comp) #print(np.shape(largearr[0, :])) #print(np.shape(ipca_comp)) total_res = 0 max_median = 10000000 """ # Calculate optimal number of coefficients to be taken for p, take in enumerate([12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120]): if take > len(largearr)//4 * 3 or take < len(largearr) //2: continue totalres = 0 for j in range(len(largearr)): coeff = np.dot(largearr[j, :], ipca_comp[:, 0:take]) fit = np.dot(ipca_comp[:, 0:take], coeff[0:take]) resfit = largearr[j, :] - fit total_res += resfit m1, m2, s1 = sigma_clipped_stats(total_res, sigma=3.0, iters=5) if m2 < max_median: max_median = m1 take_final = take # Lowest mean means lowest residual plt.imshow(np.reshape(fit, (42, 42)), origin='lower', interpolation='nearest', cmap='viridis') plt.show() plt.pause(2) plt.close() """ #if 'SKY' in hdulist[0].header.keys(): # qsodata -= float(hdulist[0].header['SOFTBIAS']) # qsodata -= float(hdulist[0].header['SKY']) #else: qsodata -= median take_final = 4 # Final fitting of the first n components, as determined by take_final, into the quasar to build a PSF fit qsodata = np.reshape(qsodata, 10201) coeff = np.dot(qsodata, ipca_comp[:, 0:take_final]) final_fit = np.dot(ipca_comp[:, 0:take_final], coeff[0:take_final]) final_fit += mean_vector final_fit = np.reshape(final_fit, (101, 101)) #final_fit /= len(largearr) qsodata = np.reshape(qsodata, (101, 101)) """ qx = np.arange(0, len(qsodata)) qy = np.arange(0, len(qsodata)) spline = interpolate.interp2d(qx, qy, qsodata) qxf = np.arange(0, len(qsodata), 0.1) qyf = np.arange(0, len(qsodata), 0.1) qsodata = spline(qxf, qyf) spline = interpolate.interp2d(qx, qy, final_fit) final_fit = spline(qxf, qyf) """ gauss_fit = photutils.fit_2dgaussian(final_fit[40 : 61, 40 : 61], mask = None) fit_fwhm = 2*np.sqrt(2*np.log(2))*np.sqrt(gauss_fit.x_stddev) #print(fit_fwhm) #print("%f, %f" % (quasar['FWHM'], fit_fwhm)) ffwhm = max(quasar['FWHM'], fit_fwhm) ffphoton_1sig = photonCount(50, 50, 2 * ffwhm, final_fit) #qsophoton_1sig = photonCount(50, 50, 6, qsodata) """ for j in range(len(final_fit)): for k in range(len(final_fit)): if distance(50, 50, j, k) < 3: final_fit[j][k] /= ffphoton_1sig final_fit[j][k] *= qsophoton_1sig """ #final_fit /= ffphoton_1sig #final_fit *= qsophoton_1sig line_data = linecache.getline('Full Data.txt', i).split() if color == 'g': mag = float(line_data[6]) if color == 'r': mag = float(line_data[8]) if color == 'i': mag = float(line_data[10]) if color == 'z': mag = float(line_data[12]) if color == 'u': mag = float(line_data[4]) try: multiplier = 10**(mag / (-2.5)) * 10**8 * hdulist[0].header['FLUX20'] final_fit /= ffphoton_1sig final_fit *= multiplier except: #final_fit *= qsodata[50, 50] return """ header = hdulist[0].header mag20 = header['flux20'] - median plt.imshow(qsodata, origin='lower', interpolation='nearest', cmap='viridis') plt.show() plt.imshow(totalfit, origin='lower', interpolation='nearest', cmap='viridis') plt.show() #plt.pause(3) """ print("%f, %f" % (qsodata[50][50], final_fit[50][50])) residue = qsodata - final_fit """ residue /= mag20 f1 = fits.open('/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/4-g.fit') h1 = f1[0].header mean1, median1, stddev1 = sigma_clipped_stats(f1[0].data.astype(float), sigma=3.0, iters=5) mag20_1 = h1['flux20'] - median1 residue *= mag20_1 qdata = linecache.readline('Full Data.txt', i) c = coord.SkyCoord(ra = float(qdata[1]), dec = float(qdata[2])) sfd = SFDQuery() residue *= 10**(0.4 * sfd(c)) """ #plt.imshow(residue, origin='lower', interpolation='nearest', cmap='viridis') #plt.show() # Only used for reference quasars """ for j in range(42): for k in range(42): if shifted[k, j] > threshold: #print("Over") check = checkNoise(j, k, 21, 21, residue) if check == True: #print("Over True") counter += 1 if counter > 10: return """ try: #fits.writeto('/data/marvels/billzhu/2175 PSF Cut/' + str(index) + '_PSF.fit', final_fit, hdulist[0].header, clobber = True) #fits.writeto('/data/marvels/billzhu/2175 PSF Subtract/' + str(index) + '_SUB.fit', residue, hdulist[0].header, clobber = True) fits.writeto('/data/marvels/billzhu/Reference PSF Cut/0.37 - 0.55/' + color + '/' + str(index) + '_PSF.fit', final_fit, hdulist[0].header, clobber = True) fits.writeto('/data/marvels/billzhu/Reference PSF Subtract/0.37 - 0.55/' + color + '/' + str(index) + '_SUB.fit', residue, hdulist[0].header, clobber = True) #fits.writeto('/data/marvels/billzhu/MG II PSF Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_PSF.fit', final_fit, hdulist[0].header, clobber = True) #fits.writeto('/data/marvels/billzhu/MG II PSF Subtract/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_SUB.fit', residue, hdulist[0].header, clobber = True) #fits.writeto('Reference Subtract/' + str(i) + '_SUB.fit', residue, hdulist[0].header, clobber = True) #fits.writeto('Reference PSF Cut/' + str(i) + '_PSF.fit', final_fit, hdulist[0].header, clobber = True) print('\n') print("DONE TO BOTTOM") except: print('HEADER IS CORRUPT')
def begin(i): chunk_size = 10 #for j in range(3): # num, x, y = reader.readline().split() num, x, y = reader.readline().split() x = int(x) y = int(y) print("%f %f" % (x, y)) print(str(i)) filename = 'Test Data Extract/' + str(i) + '.fit' hdulist = fits.open(filename) half = 300 scidata = hdulist[0].data.astype(float) mean, median, std = sigma_clipped_stats(scidata, sigma=3.0, iters=5) if x + half + chunk_size > 2048: filler = np.array([float(median)] * len(scidata)) for j in range(10): scidata = np.insert(scidata, len(scidata[0]), filler, 1) if x - half - chunk_size < 0: x += chunk_size filler = np.array([float(median)] * len(scidata)) for j in range(10): scidata = np.insert(scidata, 0, filler, 1) if y + half + chunk_size > 1489: filler = np.array([float(median)] * len(scidata[0])) for j in range(10): scidata = np.insert(scidata, len(scidata), filler, 0) if y - half - chunk_size < 0: y += chunk_size filler = np.array([float(median)] * len(scidata[0])) for j in range(10): scidata = np.insert(scidata, 0, filler, 0) scidata -= median psfindex = -1 quasar = 0 bkg_sigma = mad_std(scidata) daofind = DAOStarFinder(fwhm = 2., threshold = 3.*bkg_sigma) sources = daofind(scidata[y - 10 : y + 10, x - 10 : x + 10]) sources['xcentroid'] += x - 10 sources['ycentroid'] += y - 10 FWHM = np.empty([len(sources)]) column = Column(FWHM, name = 'FWHM') sources.add_column(column) # Find the quasar and calculate its FWHM for j in range(len(sources)): if abs(round(sources['xcentroid'][j]) - x) < 2 and abs(round(sources['ycentroid'][j]) - y) < 2: quasar = sources[j] width = int(np.sqrt(sources['npix'][j])) #print("%d %d %f %f" % (j, width, sources['xcentroid'][j], sources['ycentroid'][j])) data = scidata[int(sources['ycentroid'][j] - width/2) : int(sources['ycentroid'][j] + width/2), int(sources['xcentroid'][j] - width/2) : int(sources['xcentroid'][j] + width/2)] gauss = 0 if(np.ma.count(data) >= 7): gauss = photutils.fit_2dgaussian(data, mask = None) fwhm = 0 if gauss != 0: fwhm = 2*np.sqrt(2*np.log(2))*np.sqrt(gauss.x_stddev**2 + gauss.y_stddev**2) quasar['FWHM'] = fwhm #print(quasar['FWHM']) break ztot = 10000 print(quasar) if quasar == 0: return yl = y - half yu = y + half xl = x - half xu = x + half if yl < 0: yl = 0 if yu > len(scidata): yu = len(scidata) if xl < 0: xl = 0 if xu > len(scidata[0]): xu = len(scidata[0]) image = scidata[yl : yu, xl : xu] bkg_sigma = mad_std(scidata) daofind = DAOStarFinder(fwhm = quasar['FWHM'], threshold=3.*bkg_sigma, roundlo = -0.15, roundhi = 0.15) sources = daofind.find_stars(image) # Shift the source coordinates to the actual image coordinates sources['xcentroid'] += xl sources['ycentroid'] += yl #print(sources) # If no sources found, go to next iteration with larger dimensions if len(sources) <= 0: return # Calculate the FWHM of each identified source, and append them into a column that is added to the source table # Splices the data array for the quasar, with the alleged centroid at the center # Fits a 2D Gaussian curve onto the array, and uses the relation between sigma and fwhm FWHM = [] for j in range(len(sources)): width = int(np.sqrt(sources['npix'][j])) #print("%d %d %f %f" % (j, width, sources['xcentroid'][j], sources['ycentroid'][j])) data = scidata[int(sources['ycentroid'][j] - width/2) - 1 : int(sources['ycentroid'][j] + width/2) + 1, int(sources['xcentroid'][j] - width/2) - 1 : int(sources['xcentroid'][j] + width/2) + 1] gauss = 0 if(np.ma.count(data) >= 7): gauss = photutils.fit_2dgaussian(data, mask = None) fwhm = 0 if gauss != 0: fwhm = 2*np.sqrt(2*np.log(2))*np.sqrt(gauss.x_stddev**2 + gauss.y_stddev**2) FWHM.append(fwhm) column = Column(FWHM, name = 'FWHM') sources.add_column(column) def distance(x, y): return math.sqrt((x - quasar['xcentroid']) ** 2 + (y - quasar['ycentroid']) ** 2) def fwhmdiff(fwhm): return (fwhm - quasar['FWHM']) def lumdiff(flux): return (flux - quasar['flux']) def xdiff(x): return quasar['xcentroid'] - x def ydiff(y): return quasar['ycentroid'] - y davg = 0 favg = 0 lavg = 0 distset = [] fwhmset = [] lumset = [] # Standardize the sources and calculate the best source by combining distance, fwhm difference, and peak flux difference for j in range(len(sources)): d = distance(sources['xcentroid'][j], sources['ycentroid'][j]) distset.append(d) davg += d f = fwhmdiff(sources['FWHM'][j]) fwhmset.append(f) favg += f l = lumdiff(sources['flux'][j]) lumset.append(l) lavg += l davg /= len(sources) favg /= len(sources) lavg /= len(sources) dstd = np.std(distset) fstd = np.std(fwhmset) lstd = np.std(lumset) # Weight of the three variables places FWHM difference as most important, flux difference as next important, and distance as least important psflist = [] indexlist = [] zlist = [] for j in range(len(sources)): z = 1/2 * abs(distance(sources['xcentroid'][j], sources['ycentroid'][j])/(dstd)) + 4/3 * abs(fwhmdiff(sources['FWHM'][j])/(fstd)) + 2/3 * abs(lumdiff(sources['flux'][j])/(lstd)) #print(str(z) + " " + str(abs(sources['peak'][j] - quasar['peak']))) if z > 0 and sources['peak'][j] > 0.7 * quasar['peak'] and inbounds(sources['xcentroid'][j], sources['ycentroid'][j]) and math.sqrt((sources['xcentroid'][j] - quasar['xcentroid'])**2 + (sources['ycentroid'][j] - quasar['ycentroid'])**2) > 1: #print(str(z)) ztot = z psfindex = j if len(psflist) < 5 and z < 4: psflist.append(tuplet(j, z)) indexlist.append(j) zlist.append(z) else: if len(psflist) > 5 and z < max(zlist) and z < 4: faker = psflist.remove(psf.getZ(max(zlist))) indexlist.remove(faker.getIndex()) zlist.remove(faker.getZ()) psflist.append(tuple(j, z)) indexlist.append(j) zlist.append(z) if len(psflist) == 0: return stdev = 10000000 residue = 0 cutout = 0 print(indexlist) for j in indexlist: psf = sources[j] chunk_size = 10 # Find the actual centroid of the PSF using 2D Gaussian Fitting, since DAOStarFinder is inaccurate print(psf) """ preshift = scidata[int(psf['ycentroid'] - chunk_size) : int(psf['ycentroid'] + chunk_size + 1), int(psf['xcentroid'] - chunk_size) : int(psf['xcentroid'] + chunk_size + 1)] mean, med2, std = sigma_clipped_stats(preshift, sigma=3.0, iters=5) mask = [[False for x in range(int(chunk_size*2) + 1)] for y in range(int(chunk_size*2) + 1)] for j in range(0, int(chunk_size*2 + 1)): for k in range(0, int(chunk_size*2 + 1)): if scidata[int(psf['ycentroid'] + k - chunk_size), int(psf['xcentroid'] + j - chunk_size)] < med2: mask[j][k] = True #pXc, pYc = centroid_2dg(preshift, mask = mask, error = None) #pXc += int(psf['xcentroid']) - chunk_size #pYc += int(psf['ycentroid']) - chunk_size """ pXc = psf['xcentroid'] pYc = psf['ycentroid'] print("%f, %f" % (pXc, pYc)) xr = np.arange(int(pXc) - chunk_size, int(pXc) + chunk_size + 1) yr = np.arange(int(pYc) - chunk_size, int(pYc) + chunk_size + 1) preshift = scidata[int(pYc) - chunk_size : int(pYc) + chunk_size + 1, int(pXc) - chunk_size : int(pXc) + chunk_size + 1] shifted = [] spline = interpolate.interp2d(xr, yr, preshift) xrf = np.arange(pXc - chunk_size, pXc + chunk_size + 1, 0.5) yrf = np.arange(pYc - chunk_size, pYc + chunk_size + 1, 0.5) if len(xrf) > 42: xrf = xrf[:-1].copy() if len(yrf) > 42: yrf = yrf[:-1].copy() shifted = spline(xrf, yrf) qsocut = fits.open('c:/Research Project/Final Quasar Cut/' + str(i) + '_QSO.fit') qsodata = qsocut[0].data.astype(float) qsodata -= median qsodata /= qsodata[21, 21] #quasar['peak'] shifted /= shifted[21, 21] #psf['peak'] res = qsodata - shifted mean, med, std = sigma_clipped_stats(res, sigma=3.0, iters=5) if std < stdev: residue = res stdev = std cutout = shifted print(std) fits.writeto('Test PSF Cut/' + str(i) + '_PSF.fit', cutout, hdulist[0].header, clobber = True) fits.writeto('Test PSF Subtract/' + str(i) + '_1.fit', residue, hdulist[0].header, clobber = True) #print(stdev) print('\n') PSF.append(psf)
hdulist = fits.open('96998-g_MG.fit') image = hdulist[0].data.astype(float) mean, median, stddev = sigma_clipped_stats(image, sigma=3.0, iters=5) image -= median sigma_psf = 1.1 bkgrms = MADStdBackgroundRMS() std = bkgrms.calc_background_rms(image) iraffind = IRAFStarFinder(threshold=5*std, fwhm=sigma_psf*gaussian_sigma_to_fwhm, minsep_fwhm=0.01, roundhi=5.0, roundlo=-5.0, sharplo=0.0, sharphi=2.0) gauss = 0 if(np.ma.count(image) >= 7): gauss = photutils.fit_2dgaussian(image[40 : 61, 40 : 61], mask = None) fwhm = 0 if gauss != 0: fwhm = abs(gauss.x_stddev) * gaussian_sigma_to_fwhm print(fwhm) daogroup = DAOGroup(5.0*sigma_psf*gaussian_sigma_to_fwhm) mmm_bkg = MMMBackground() fitter = LevMarLSQFitter() psf_model = IntegratedGaussianPRF(sigma=abs(gauss.x_stddev)) fitshape = (int(3. * fwhm), int(3. * fwhm)) if int(3. * fwhm) % 2 == 0:
def begin(index): #i = int(index) i = int(index.split('-')[0]) mgi = int(index.split('-')[1]) color = index.split('-')[2] #try: print(index) #filename = 'Test Data Extract/' + str(i) + '.fit' #filename = str(i) + '-g.fit' filename = '/data/marvels/billzhu/2175 Dataset/' + color + '/' + str( index) + '.fit' #filename = '/data/marvels/billzhu/Reference Dataset/0.37 - 0.55/' + color + '/' + str(index) + '.fit' #filename = '/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '.fit' hdulist = fits.open(filename) #qlist = fits.open('MG II Test Cut/' + str(i) + '_MG.fit') qlist = fits.open('/data/marvels/billzhu/2175 Quasar Cut/' + color + '/' + str(index) + '_DUST.fit') #qlist = fits.open('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(index) + '_REF.fit') #qlist = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') qx = qlist[0].header['XCOORD'] qy = qlist[0].header['YCOORD'] obj_id = qlist[0].header['ID'] #print("%f, %f" % (x, y)) qlist.close() #except: # print("No coordinates") # return # Save some frickin time scidata = hdulist[0].data.astype(float) mean, median, std = sigma_clipped_stats(scidata, sigma=3.0, iters=5) #print(median) """ if 'SKY' in hdulist[0].header.keys(): scidata -= float(hdulist[0].header['SOFTBIAS']) scidata -= float(hdulist[0].header['SKY']) else: scidata -= median print(str(i) + ' No sky') #return """ #print(sigma_clipped_stats(scidata, sigma=3.0, iters=5)) pointer = 0 if color == 'g': pointer = 1 if color == 'r': pointer = 2 if color == 'i': pointer = 3 if color == 'z': pointer = 4 if color == 'u': pointer = 0 bkg_sigma = mad_std(scidata) try: #print('/data/marvels/billzhu/MG II Obj/0.37 - 0.55/' + str(i) + '.fit') obj_table = Table.read('/data/marvels/billzhu/2175 Obj/' + str(i) + '-' + str(mgi) + '.fit', hdu=1) #obj_table = Table.read('/data/marvels/billzhu/MG II Obj/0.37 - 0.55/' + str(i) + '.fit', hdu=1) #obj_table = Table.read('/data/marvels/billzhu/Reference Obj/0.37 - 0.55/' + str(i) + '.fit', hdu=1) except: print(str(i) + ' No Table') return #line_data = linecache.getline('Full Data.txt', i).split() #line_data = linecache.getline('DR12 QSO.txt', i).split() #print(len(line_data)) #obj_id = int(line_data[52]) quasar = obj_table[obj_id - 1] gauss = 0 #scidata /= hdulist[0].header['NMGY'] print("%f, %f" % (qx, qy)) data = scidata[int(qy) - 10:int(qy) + 11, int(qx) - 10:int(qx) + 11] #print(data) if (np.ma.count(data) >= 7): gauss = photutils.fit_2dgaussian(data, mask=None) fwhm_x = 0 fwhm_y = 0 #print(gauss.x_stddev) #print(gauss.y_stddev) if gauss != 0: fwhm_x = 2 * np.sqrt(2 * np.log(2)) * np.sqrt(abs( gauss.x_stddev.value)) fwhm_y = 2 * np.sqrt(2 * np.log(2)) * np.sqrt(abs( gauss.y_stddev.value)) #qsigma = np.sqrt(gauss.x_stddev**2 + gauss.y_stddev**2) print(fwhm_x) print(fwhm_y) if fwhm_x == math.nan: fwhm_x = 2.8 if fwhm_y == math.nan: fwhm_y = 2.8 # If no quasar is found, the field image is deemed corrupt and not used if quasar == 0: print(str(i) + ' No quasar') return # Calculate the 18 magnitude threshold mag18 = 0 header = hdulist[0].header try: mag18 = header['FLUX20'] * 10**(8. - 18 / 2.5) except: if color == 'g': mag18 = 1500 if color == 'r': mag18 = 10500 if color == 'i': mag18 = 8800 if color == 'z': mag18 = 1900 print(str(i) + ' MAG20 APPROX = 14000') qsocut = fits.open('/data/marvels/billzhu/2175 Quasar Cut/' + color + '/' + str(index) + '_DUST.fit') #qsocut = fits.open('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(index) + '_REF.fit') #qsocut = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') qsodata = qsocut[0].data.astype(float) """ if 'SKY' in hdulist[0].header.keys(): qsodata -= float(hdulist[0].header['SOFTBIAS']) qsodata -= float(hdulist[0].header['SKY']) else: qsodata -= median """ largearr = [] stars = [] chunk_size = 50 diff_fwhm = 1000000 psf_fwhm = 100 qsovisited = connected(qsodata, 50, 50, mean + 3 * std, np.zeros((101, 101), dtype=bool)) qmax = np.max(qsodata) for j in range(len(obj_table)): sx = obj_table['colc'][j][pointer] sy = obj_table['rowc'][j][pointer] if obj_table['objc_type'][j] == 6 and distance( sx, sy, qx, qy) > 5 and inbounds( sx + chunk_size + 6, sy + chunk_size + 6) and inbounds( sx - chunk_size - 5, sy - chunk_size - 5) and obj_table['psfCounts'][j][pointer] > mag18: #try: """ preshift = scidata[int(sy - 10) : int(sy + 11), int(sx - 10) : int(sx + 11)] xc, yc = centroid_2dg(preshift, mask = None) xc += quasar['colc'][pointer] - 10 yc += quasar['rowc'][pointer] - 10 """ xc = obj_table['colc'][j][pointer] yc = obj_table['rowc'][j][pointer] preshift = scidata[int(yc - chunk_size - 5):int(yc + chunk_size + 6), int(xc - chunk_size - 5):int(xc + chunk_size + 6)] spline = interpolate.interp2d( np.arange(int(xc - chunk_size - 5), int(xc + chunk_size + 6)), np.arange(int(yc - chunk_size - 5), int(yc + chunk_size + 6)), preshift) xrang = np.arange(xc - chunk_size, xc + chunk_size + 1) yrang = np.arange(yc - chunk_size, yc + chunk_size + 1) if len(xrang) > 2 * chunk_size + 1: xrang = xrang[:-1] if len(yrang) > 2 * chunk_size + 1: yrang = yrang[:-1] shifted1 = spline(xrang, yrang) bkg_sigma = mad_std(scidata) #mean1, median1, std1 = sigma_clipped_stats(shifted1, sigma=3.0, iters=5) #print("%f, %f, %f" % (mean1, median1, std1)) #daofind = DAOStarFinder(fwhm = 2., threshold = 3.*bkg_sigma) #sources = daofind.find_stars(shifted1) """ mag22 = 0 if color == 'g': mag22 = 2500 if color == 'r': mag22 = 1900 if color == 'i': mag22 = 1400 if color == 'z': mag22 = 300 """ shifted1 = checkInner(shifted1, obj_table, xc, yc, mean, std, pointer) shifted1 = perimeter(shifted1, mean, mean + 3 * std) #shifted1 = checkOutter(shifted1, mean1, std1) #print('REACHED') gauss1 = photutils.fit_2dgaussian(shifted1[40:61, 40:61], mask=None) fit_fwhm_x = 2 * np.sqrt(2 * np.log(2)) * np.sqrt( abs(gauss1.x_stddev.value)) fit_fwhm_y = 2 * np.sqrt(2 * np.log(2)) * np.sqrt( abs(gauss1.y_stddev.value)) #print("%f, %f, %f, %f" % (fit_fwhm_x, fit_fwhm_y, fwhm_x, fwhm_y)) if abs(fit_fwhm_x - fwhm_x) < 0.25 and abs(fit_fwhm_y - fwhm_y) < 0.25: #shifted1 = normalize(shifted1, 50, 50, np.max(shifted1), np.max(qsodata), mean1 + 5 * std1, np.zeros((101, 101), dtype=bool)) #visited = connected(shifted1, 50, 50, mean1 + 3 * std, np.zeros((101, 101), dtype=bool)) smax = np.max(shifted1) for r in range(len(qsovisited)): for c in range(len(qsovisited)): if qsovisited[r][c] == True: shifted1[r][c] /= obj_table['psfCounts'][j][ pointer] shifted1[r][c] *= obj_table['psfCounts'][ obj_id - 1][pointer] #shifted1 /= np.max(shifted1) #shifted1 *= np.max(qsodata) largearr.append(np.reshape(shifted1, 10201)) #stars.append(shifted1) #except: # continue largearr = np.array(largearr) print(np.shape(largearr)) # Set number of components in PCA, use incremental PCA (IPCA) due to high efficiency and speed numcomp = len(largearr) # Need a healthy number of sources to make the PSF fitting in order to decrease noise, setting at 5% threshold if len(largearr) < 10: print('No Sources') return print(numcomp) mean_vector = [] #print(np.shape(largearr)) try: for j in range(0, 10201): mean_vector.append(np.mean(largearr[:, j])) except: print("NO SOURCE FOUND") return largearr -= mean_vector ipca = IncrementalPCA(n_components=numcomp) ipca.fit(largearr) ipca_comp = ipca.components_ #print(np.shape(ipca_comp)) # Only use the components of the central portion of the quasar, since there may be overfitting due to strength of ipca new_comp = [] for j in range(len(largearr)): temp = np.reshape(ipca_comp[j, :], (101, 101)) new_comp.append(np.reshape(temp[47:54, 47:54], 49)) new_comp = np.array(new_comp) new_comp = new_comp.T print(np.shape(new_comp)) ipca_comp = ipca_comp.T #print(ipca_comp) #print(np.shape(largearr[0, :])) #print(np.shape(ipca_comp)) total_res = 0 max_median = 10000000 take_final = 10 # Final fitting of the first n components, as determined by take_final, into the quasar to build a PSF fit print(np.shape(ipca_comp)) qsodata = np.reshape(qsodata, 10201) qsodata -= mean_vector #qsodata = np.reshape(qsodata, (101, 101)) #coeff = np.dot(np.reshape(qsodata[47 : 54, 47 : 54], 49), new_comp) coeff = np.dot(qsodata, ipca_comp) final_fit = np.dot(ipca_comp[:, 0:take_final], coeff[0:take_final]) final_fit += mean_vector final_fit = np.reshape(final_fit, (101, 101)) #final_fit /= len(largearr) qsodata = np.reshape(qsodata, 10201) qsodata += mean_vector qsodata = np.reshape(qsodata, (101, 101)) #final_fit /= final_fit[50, 50] #final_fit *= qsodata[50, 50] """ fmax = np.max(final_fit) qmax = np.max(qsodata) for r in range(len(qsovisited)): for c in range(len(qsovisited)): if qsovisited[r][c] == True: final_fit[r][c] /= fmax final_fit[r][c] *= qmax print(np.min(final_fit)) print(np.min(qsodata)) """ """ fxc, fyc = centroid_2dg(final_fit[40 : 61, 40 : 61], mask = None) fxc += 40 fyc += 40 print("%f, %f" % (fxc, fyc)) spline = interpolate.interp2d(np.arange(101), np.arange(101), final_fit) fxcr = np.arange(fxc - 50, fxc + 51) fycr = np.arange(fyc - 50, fyc + 51) if len(fxcr) > 101: fxcr = fxcr[1:] if len(fycr) > 101: fycr = fycr[1:] final_fit = spline(fxcr, fycr) """ # Section to normalize the PSF fitting by photon count, but this is unneccesary since CORRECT PCA fits better gauss_fit = photutils.fit_2dgaussian(final_fit[40:61, 40:61], mask=None) fit_fwhm = 2 * np.sqrt(2 * np.log(2)) * np.sqrt(gauss_fit.x_stddev) #print(fit_fwhm) print("%f, %f" % (fwhm_x, fit_fwhm)) print("%f, %f" % (np.max(qsodata), np.max(final_fit))) ffwhm = max(fwhm_x, fit_fwhm) ffphoton_1sig = photonCount(50, 50, 4 * fit_fwhm, final_fit) qsophoton_1sig = photonCount(50, 50, 4 * fit_fwhm, qsodata) print("%f, %f" % (qsophoton_1sig, ffphoton_1sig)) for j in range(len(final_fit)): for k in range(len(final_fit)): if distance(50, 50, j, k) < 4 * fit_fwhm: final_fit[j][k] /= ffphoton_1sig final_fit[j][k] *= qsophoton_1sig print("%f, %f" % (np.max(qsodata), np.max(final_fit))) #final_fit /= ffphoton_1sig #final_fit *= qsophoton_1sig """ line_data = linecache.getline('Full Data.txt', i).split() if color == 'g': mag = float(line_data[6]) if color == 'r': mag = float(line_data[8]) if color == 'i': mag = float(line_data[10]) if color == 'z': mag = float(line_data[12]) if color == 'u': mag = float(line_data[4]) #try: multiplier = 10**(8 - mag / 2.5) * header['FLUX20'] multiplier1 = quasar['psfCounts'][pointer] print(multiplier) print(str(multiplier1 - header['SKY'])) final_fit /= ffphoton_1sig final_fit *= multiplier #except: #final_fit *= qsodata[50, 50] #return print("%f, %f" % (np.max(qsodata), np.max(final_fit))) """ # Final residue from subtraction of PSF from QSO residue = qsodata - final_fit #mean, median, stddev = sigma_clipped_stats(residue[0 : 10, 0 : 10], sigma=3.0, iters=5) #residue -= median try: fits.writeto('/data/marvels/billzhu/2175 PSF Cut/' + color + '/' + str(index) + '_PSF.fit', final_fit, hdulist[0].header, clobber=True) fits.writeto('/data/marvels/billzhu/2175 PSF Subtract/' + color + '/' + str(index) + '_SUB.fit', residue, hdulist[0].header, clobber=True) #fits.writeto('/data/marvels/billzhu/Reference PSF Cut/0.37 - 0.55/' + color + '/' + str(index) + '_PSF.fit', final_fit, hdulist[0].header, clobber = True) #fits.writeto('/data/marvels/billzhu/Reference PSF Subtract/0.37 - 0.55/' + color + '/' + str(index) + '_SUB.fit', residue, hdulist[0].header, clobber = True) #fits.writeto('/data/marvels/billzhu/MG II PSF Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_PSF.fit', final_fit, hdulist[0].header, clobber = True) #fits.writeto('/data/marvels/billzhu/MG II PSF Subtract/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_SUB.fit', residue, hdulist[0].header, clobber = True) """ fits.writeto('/data/marvels/billzhu/MG II PSF Subtract/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_SUB-1.fit', residue1, hdulist[0].header, clobber = True) fits.writeto('/data/marvels/billzhu/MG II PSF Subtract/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_SUB-2.fit', residue2, hdulist[0].header, clobber = True) fits.writeto('/data/marvels/billzhu/MG II PSF Subtract/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_SUB-3.fit', residue3, hdulist[0].header, clobber = True) fits.writeto('/data/marvels/billzhu/MG II PSF Subtract/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_SUB-4.fit', residue4, hdulist[0].header, clobber = True) fits.writeto('/data/marvels/billzhu/MG II PSF Subtract/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_SUB-5.fit', stars[0], hdulist[0].header, clobber = True) fits.writeto('/data/marvels/billzhu/MG II PSF Subtract/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_SUB-6.fit', stars[1], hdulist[0].header, clobber = True) """ #fits.writeto('Reference Subtract/' + str(i) + '_SUB.fit', residue, hdulist[0].header, clobber = True) #fits.writeto('Reference PSF Cut/' + str(i) + '_PSF.fit', final_fit, hdulist[0].header, clobber = True) print('\n') print("DONE TO BOTTOM") except: print('HEADER IS CORRUPT')
def begin(index): chunk_size = 50 i = int(index.split('-')[0]) color = index.split('-')[1] #line = linecache.getline('Full Data.txt', i) #num, x, y = linecache.getline('Pixel Coordinates 50000.txt', i).split() #x = int(x) #y = int(y) #print("%f %f" % (x, y)) try: #filename = 'Test Data Extract/' + str(i) + '.fit' filename = '/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/' + color + '/' + str( i) + '-' + color + '.fit' #filename = 'Reference Dataset/' + str(i) + '_REF.fit' hdulist = fits.open(filename) qlist = fits.open( '/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') #qlist = fits.open('Reference Quasar Cut/' + str(i) + '_REF.fit') x = qlist[0].header['XCOORD'] y = qlist[0].header['YCOORD'] except: print("No coordinates") return #half = 500 scidata = hdulist[0].data.astype(float) mean, median, std = sigma_clipped_stats(scidata, sigma=3.0, iters=5) change_top = False if x + chunk_size > 2048: change_top = True filler = np.array([float(median)] * len(scidata)) for j in range(10): scidata = np.insert(scidata, len(scidata[0]), filler, 1) if x - chunk_size < 0: x += chunk_size filler = np.array([float(median)] * len(scidata)) for j in range(10): scidata = np.insert(scidata, 0, filler, 1) if y + chunk_size > 1489: change_top = True filler = np.array([float(median)] * len(scidata[0])) for j in range(10): scidata = np.insert(scidata, len(scidata), filler, 0) if y - chunk_size < 0: y += chunk_size filler = np.array([float(median)] * len(scidata[0])) for j in range(10): scidata = np.insert(scidata, 0, filler, 0) scidata -= median psfindex = -1 quasar = 0 bkg_sigma = mad_std(scidata) daofind = DAOStarFinder(fwhm=2., threshold=5. * bkg_sigma) sources = daofind(scidata[int(y - 10):int(y + 10), int(x - 10):int(x + 10)]) # Update coordinates of the sources sources['xcentroid'] += x - 10 sources['ycentroid'] += y - 10 # Create new column that contains the FWHM of each source for comparison later FWHM = np.empty([len(sources)]) column = Column(FWHM, name='FWHM') sources.add_column(column) # Find the quasar and calculate its FWHM qsigma = 0 for j in range(len(sources)): if abs(round(sources['xcentroid'][j]) - x) < 2 and abs(round(sources['ycentroid'][j]) - y) < 2: quasar = sources[j] width = int(np.sqrt(sources['npix'][j])) #print("%d %d %f %f" % (j, width, sources['xcentroid'][j], sources['ycentroid'][j])) data = scidata[ int(sources['ycentroid'][j] - width / 2):int(sources['ycentroid'][j] + width / 2) + 1, int(sources['xcentroid'][j] - width / 2):int(sources['xcentroid'][j] + width / 2) + 1] """ plt.imshow(data, origin='lower', interpolation='nearest', cmap='viridis') plt.show() plt.pause(3) """ gauss = 0 if (np.ma.count(data) >= 7): gauss = photutils.fit_2dgaussian(data, mask=None) fwhm = 0 if gauss != 0: fwhm = 2 * np.sqrt(2 * np.log(2)) * np.sqrt(gauss.x_stddev) quasar['FWHM'] = fwhm #qsigma = np.sqrt(gauss.x_stddev**2 + gauss.y_stddev**2) #print(quasar['FWHM']) break ztot = 10000 print(quasar) # If no quasar is found, the field image is deemed corrupt and not used if quasar == 0: return # Define cutout image limits, adjusted to field image boundaries as necessary i.e. < 0 or > max x/y values """ yl = y - half yu = y + half xl = x - half xu = x + half if yl < 0: yl = 0 if yu > len(scidata): yu = len(scidata) if xl < 0: xl = 0 if xu > len(scidata[0]): xu = len(scidata[0]) image = scidata[yl : yu, xl : xu] """ bkg_sigma = mad_std(scidata) daofind = DAOStarFinder(fwhm=0.8 * quasar['FWHM'], threshold=5. * bkg_sigma, roundlo=-0.15, roundhi=0.15) sources = daofind.find_stars(scidata) # Shift the source coordinates to the actual image coordinates #sources['xcentroid'] += xl #sources['ycentroid'] += yl # If no sources found, skip iteration if len(sources) <= 0: return print(len(sources)) # Calculate the FWHM of each identified source, and append them into a column that is added to the source table # Splices the data array for the quasar, with the alleged centroid at the center # Fits a 2D Gaussian curve onto the array, and uses the relation between sigma and fwhm FWHM = [] stddev_list = [] for j in range(len(sources)): width = int(np.sqrt(sources['npix'][j])) #print("%d %d %f %f" % (j, width, sources['xcentroid'][j], sources['ycentroid'][j])) data = scidata[ int(sources['ycentroid'][j] - width / 2):int(sources['ycentroid'][j] + width / 2) + 1, int(sources['xcentroid'][j] - width / 2):int(sources['xcentroid'][j] + width / 2) + 1] """ plt.imshow(data, origin='lower', interpolation='nearest', cmap='viridis') plt.show() plt.pause(3) """ gauss = 0 if (np.ma.count(data) >= 7): gauss = photutils.fit_2dgaussian(data, mask=None) fwhm = 0 if gauss != 0: fwhm = 2 * np.sqrt(2 * np.log(2)) * np.sqrt(gauss.x_stddev) FWHM.append(fwhm) if gauss == 0: stddev_list.append(0) else: stddev_list.append(np.sqrt(gauss.x_stddev)) column = Column(FWHM, name='FWHM') sources.add_column(column) column = Column(stddev_list, name='stddev') sources.add_column(column) #print(sources) # Helper methods for determining differences between PSF source and QSO def distance1(x, y): return math.sqrt((x - quasar['xcentroid'])**2 + (y - quasar['ycentroid'])**2) def fwhmdiff(fwhm): return (fwhm - quasar['FWHM']) def lumdiff(flux): return (flux - quasar['flux']) def xdiff(x): return quasar['xcentroid'] - x def ydiff(y): return quasar['ycentroid'] - y distset = [] fwhmset = [] lumset = [] # Standardize the sources and calculate the best source by combining distance, fwhm difference, and peak flux difference for j in range(len(sources)): d = distance1(sources['xcentroid'][j], sources['ycentroid'][j]) distset.append(d) f = fwhmdiff(sources['FWHM'][j]) fwhmset.append(f) l = lumdiff(sources['flux'][j]) lumset.append(l) dstd = np.std(distset) fstd = np.std(fwhmset) lstd = np.std(lumset) # Weight of the three variables places FWHM difference as most important, flux difference as next important, and distance as least important psflist = [] indexlist = [] zlist = [] for j in range(len(sources)): z = 1 / 3 * abs( distance1(sources['xcentroid'][j], sources['ycentroid'][j]) / (dstd)) + 2 * abs(fwhmdiff(sources['FWHM'][j]) / (fstd)) + 2 / 3 * abs( lumdiff(sources['flux'][j]) / (lstd)) """ tempcut = scidata[int(sources['ycentroid'][j]) - chunk_size : int(sources['ycentroid'][j]) + chunk_size, int(sources['xcentroid'][j]) - chunk_size : int(sources['xcentroid'][j]) + chunk_size] s1 = daofind.find_stars(tempcut) if len(s1) > 1: print(len(s1)) continue """ #print(str(z)) if z > 0 and math.sqrt( (sources['xcentroid'][j] - quasar['xcentroid'])**2 + (sources['ycentroid'][j] - quasar['ycentroid'])**2 ) > 1 and sources['xcentroid'][j] - chunk_size >= 0 and sources[ 'ycentroid'][j] - chunk_size >= 0 and sources['xcentroid'][ j] + chunk_size < 2048 and sources['ycentroid'][ j] + chunk_size < 1489: #print(str(z)) ztot = z psfindex = j # If the list contains less than 5 suitable sources that satisfy all the conditions, then directly add to list # If the list already contains 5 sources, then check if the current source has a lower Z value than the source with the largest Z value if len(psflist) < 9 and z < 3: #print(True) psflist.append(tuplet(j, z)) indexlist.append(j) zlist.append(z) else: if len(psflist) > 9 and z < max(zlist) and z < 3: faker = psflist.remove(psf.getZ(max(zlist))) indexlist.remove(faker.getIndex()) zlist.remove(faker.getZ()) psflist.append(tuplet(j, z)) indexlist.append(j) zlist.append(z) # If no suitable PSF sources are found that satisfy the boundaries, then the file is not used if len(psflist) == 0: print("FAILURE") return PSFlist = [] FWHMlist = [] psf = 0 fwhmdiff = 10000000 for j in indexlist: if abs(sources['FWHM'][j] - quasar['FWHM']) < fwhmdiff: psf = sources[j] fwhmdiff = abs(sources['FWHM'][j] - quasar['FWHM']) """ PSFlist.append(sources[j]) FWHMlist.append(sources['FWHM'][j]) medFWHM = 0 if len(FWHMlist) % 2 == 0: medFWHM = FWHMlist[len(FWHMlist) // 2 - 1] else: medFWHM = np.median(FWHMlist) for k in PSFlist: if k['FWHM'] == medFWHM: print(True) psf = k break """ # Interpolates the PSF source to its actual centroid, upsampling it 2x #try: chunk_size = 50 print(psf) pXc = psf['xcentroid'] pYc = psf['ycentroid'] print("%f, %f" % (pXc, pYc)) xr = np.arange(int(pXc) - chunk_size - 5, int(pXc) + chunk_size + 6) yr = np.arange(int(pYc) - chunk_size - 5, int(pYc) + chunk_size + 6) preshift = scidata[int(pYc) - chunk_size - 5:int(pYc) + chunk_size + 6, int(pXc) - chunk_size - 5:int(pXc) + chunk_size + 6] print(np.shape(preshift)) shifted = [] spline = interpolate.interp2d(xr, yr, preshift) xrf = np.arange(pXc - chunk_size, pXc + chunk_size + 1, 1) yrf = np.arange(pYc - chunk_size, pYc + chunk_size + 1, 1) if len(xrf) > 101: xrf = xrf[:-1].copy() if len(yrf) > 101: yrf = yrf[:-1].copy() shifted = spline(xrf, yrf) mean1, median1, std1 = sigma_clipped_stats(shifted, sigma=3.0, iters=5) shifted = checkOutter(shifted, mean1, std1) #qsocut = fits.open('c:/Research Project/Final Quasar Cut/' + str(i) + '_QSO.fit') qsocut = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') #qsocut = fits.open('c:/Research Project/Reference Quasar Cut/' + str(i) + '_REF.fit') qsodata = qsocut[0].data.astype(float) qsodata -= median #qsocount = photoncount(21, 21, qsigma, qsodata) #qsodata /= qsocount #quasar['peak'] #psfcount = photoncount(21, 21, psf['stddev'], shifted) #shifted /= np.max(shifted) #psf['peak'] #shifted *= np.max(qsodata) gauss_fit = photutils.fit_2dgaussian(shifted[40:61, 40:61], mask=None) fit_fwhm = 2 * np.sqrt(2 * np.log(2)) * np.sqrt(abs(gauss_fit.x_stddev)) #print(fit_fwhm) print("%f, %f" % (quasar['FWHM'], fit_fwhm)) ffwhm = max(quasar['FWHM'], fit_fwhm) ffphoton_5sig = photonCount(50, 50, ffwhm, shifted) """ qsophoton_4sig = photonCount(50, 50, ffwhm, qsodata) for j in range(len(shifted)): for k in range(len(shifted)): if distance(50, 50, j, k) < 4 * ffwhm: shifted[j][k] /= ffphoton_4sig shifted[j][k] *= qsophoton_4sig """ line_data = linecache.getline('Full Data.txt', i).split() gmag = float(line_data[6]) try: multiplier = 10**(gmag / (-2.5)) * 10**8 * hdulist[0].header['FLUX20'] shifted /= ffphoton_5sig shifted *= multiplier except: #final_fit *= qsodata[50, 50] return residue = qsodata - shifted """ mean, med, std = sigma_clipped_stats(residue, sigma=3.0, iters=5) check = False print("%f, %f" % (med, std)) threshold = mean + 3 * std for j in range(42): for k in range(42): if shifted[k, j] > threshold: #print("Over") check = checkNoise(j, k, 21, 21, residue) if check == True: #print("Over True") return """ #fits.writeto('Reference PSF Cut/' + str(i) + '_PSF.fit', shifted, hdulist[0].header, clobber = True) #fits.writeto('Test PSF Subtract/' + str(i) + '_1.fit', residue, hdulist[0].header, clobber = True) fits.writeto('/data/marvels/billzhu/MG II PSF Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_PSF.fit', shifted, hdulist[0].header, clobber=True) fits.writeto('/data/marvels/billzhu/MG II PSF Subtract/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_SUB.fit', residue, hdulist[0].header, clobber=True) #fits.writeto('Reference Subtract/' + str(i) + '_SUB.fit', residue, hdulist[0].header, clobber = True) print('\n') #except: # return return