def testGaussian(): from support import gaussians log = lg.setUpLogger('delete.me') data = gaussians.Gaussian2D(100, 100, 200, 200, 20, 20)['Gaussian'] data /= np.max(data) data *= 2.e5 #measure shape sh = shape.shapeMeasurement(data, log) reference = sh.measureRefinedEllipticity() print reference #non-linearity shape newdata = VISinstrumentModel.CCDnonLinearityModelSinusoidal(data, 0.2) newdata[newdata < 0.] = 0. sh = shape.shapeMeasurement(newdata, log) nonlin = sh.measureRefinedEllipticity() print nonlin print reference['ellipticity'] - nonlin['ellipticity'] print reference['e1'] - nonlin['e1'] print reference['e2'] - nonlin['e2'] print reference['R2'] - nonlin['R2']
def testNonlinearityModel(file='data/psf12x.fits', oversample=12.0, sigma=0.75, scale=2e5, amp=1e-3, phase=0.98, multiplier=1.5, outdir='.'): #read in PSF and renormalize it to norm data = pf.getdata(file) data /= np.max(data) data *= scale #derive reference values from clean PSF settings = dict(sampling=1.0 / oversample, sigma=sigma) sh = shape.shapeMeasurement(data, log, **settings) reference = sh.measureRefinedEllipticity() print reference #apply nonlinearity model to the scaled PSF newdata = VISinstrumentModel.CCDnonLinearityModelSinusoidal(data.copy(), amp, phase=phase, multi=multiplier) newdata[newdata < 0.] = 0. #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(newdata.copy(), log, **settings) results = sh.measureRefinedEllipticity() print results print reference['ellipticity'] - results['ellipticity'], reference['R2'] - results['R2'] fileIO.writeFITS(data, outdir + '/scaledPSF.fits', int=False) fileIO.writeFITS(newdata, outdir + '/nonlinearData.fits', int=False) fileIO.writeFITS(newdata / data, outdir + '/nonlinearRatio.fits', int=False)
def testFiles(): #testing part, looks for blob?.fits and psf.fits to derive centroids and ellipticity import pyfits as pf import glob as g from support import logger as lg import sys files = g.glob('blob?.fits') log = lg.setUpLogger('shape.log') log.info('Testing shape measuring class...') for file in files: log.info('Processing file %s' % file) data = pf.getdata(file) sh = shape.shapeMeasurement(data, log) results = sh.measureRefinedEllipticity() sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits')) print file pprint.pprint(results) print file = 'psf1x.fits' log.info('Processing file %s' % file) data = pf.getdata(file) sh = shape.shapeMeasurement(data, log) results = sh.measureRefinedEllipticity() sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits')) print file pprint.pprint(results) print file = 'stamp.fits' log.info('Processing file %s' % file) data = pf.getdata(file) settings = dict(sigma=10.0) sh = shape.shapeMeasurement(data, log, **settings) results = sh.measureRefinedEllipticity() sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits')) print file pprint.pprint(results) print file = 'gaussian.fits' log.info('Processing file %s' % file) data = pf.getdata(file) settings = dict(sampling=0.2) sh = shape.shapeMeasurement(data, log, **settings) results = sh.measureRefinedEllipticity() sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits')) print file pprint.pprint(results) print log.info('All done\n\n')
def testShapeMeasurementAlgorithms(log, sigma=0.75, iterations=3, weighted=True, fixedPosition=False, fixedX=None, fixedY=None): #Thibauts data folder = '//Users/sammy/EUCLID/CTItesting/Reconciliation/' wcti = pf.getdata(folder + 'damaged_image_parallel.fits') wo = pf.getdata(folder + 'galaxy_100mas_dist2_q=0.9568_re=22.2670_theta=-1.30527_norm=1000_dx=0.2274_dy=0.2352.fits') #reset settings settings = dict(sigma=sigma, iterations=iterations, weighted=weighted, fixedX=fixedX, fixedY=fixedY, fixedPosition=fixedPosition) #calculate shapes sh = shape.shapeMeasurement(wcti, log, **settings) wctiresults = sh.measureRefinedEllipticity() sh = shape.shapeMeasurement(wo, log, **settings) woresults = sh.measureRefinedEllipticity() #remove one key not needed... #woresults.pop('GaussianWeighted', None) #pprint.pprint(woresults) if fixedPosition == True: print '\n\n\n\n%i iterations, keeping centroid fixed' % iterations else: print '\n\n\n\n%i iterations' % iterations #Thibaut's results from his email x = 73.41129368592757 y = 84.48016119109027 e1 = 0.8130001725751526 e2 = 0.004147873150093767 e = 0.8130107535936392 r2 = 68.45385021546944 xn = 83.82895826068 yn = 84.504735271 e1n = 0.026406 e2n = 0.031761186 en = 0.04130482605 r2n = 11.4263310 print 'Without CTI:' print 'Parameter Thibaut SMN Delta [S-T]' print 'X %.3f %.3f %.8f' % (xn, woresults['centreX'], woresults['centreX'] - xn) print 'Y %.3f %.3f %.8f' % (yn, woresults['centreY'], woresults['centreY'] - yn) print 'e_1 %.5f %.5f %.8f' % (e1n, woresults['e1'], woresults['e1'] - e1n) print 'e_2 %.5f %.5f %.8f' % (e2n, woresults['e2'], woresults['e2'] - e2n) print 'e %.5f %.5f %.8f' % (en, woresults['ellipticity'], woresults['ellipticity'] - en) print 'R**2 %.2f %.2f %.8f' % (r2n, woresults['R2'], woresults['R2'] - r2n) print '\nWith CTI:' print 'Parameter Thibaut SMN Delta [S-T]' print 'X %.3f %.3f %.8f' % (x, wctiresults['centreX'], wctiresults['centreX'] - x) print 'Y %.3f %.3f %.8f' % (y, wctiresults['centreY'], wctiresults['centreY'] - y) print 'e_1 %.5f %.5f %.8f' % (e1, wctiresults['e1'], wctiresults['e1'] - e1) print 'e_2 %.5f %.5f %.8f' % (e2, wctiresults['e2'], wctiresults['e2'] - e2) print 'e %.5f %.5f %.8f' % (e, wctiresults['ellipticity'], wctiresults['ellipticity'] - e) print 'R**2 %.2f %.2f %.8f' % (r2, wctiresults['R2'], wctiresults['R2'] - r2)
def measureGaussianR2(log): #gaussian sigma = 2. / (2. * math.sqrt(2.*math.log(2))) Gaussian = shape.shapeMeasurement(np.zeros((100, 100)), log).circular2DGaussian(50, 50, sigma)['Gaussian'] settings = dict(sigma=sigma, weighted=False) sh = shape.shapeMeasurement(Gaussian, log, **settings) results = sh.measureRefinedEllipticity() print print results['R2'] print
def measureGaussianR2(log): #gaussian sigma = 2. / (2. * math.sqrt(2. * math.log(2))) Gaussian = shape.shapeMeasurement(np.zeros( (100, 100)), log).circular2DGaussian(50, 50, sigma)['Gaussian'] settings = dict(sigma=sigma, weighted=False) sh = shape.shapeMeasurement(Gaussian, log, **settings) results = sh.measureRefinedEllipticity() print print results['R2'] print
def testCTIinclusion(log, sigma=0.75, iterations=3, weighted=True, fixedPosition=False, fixedX=None, fixedY=None): #reset settings settings = dict(sigma=sigma, iterations=iterations, weighted=weighted, fixedX=fixedX, fixedY=fixedY, fixedPosition=fixedPosition) #Thibauts data folder = '//Users/sammy/EUCLID/CTItesting/Reconciliation/' wcti = pf.getdata(folder + 'damaged_image_parallel.fits') wocti = pf.getdata( folder + 'galaxy_100mas_dist2_q=0.9568_re=22.2670_theta=-1.30527_norm=1000_dx=0.2274_dy=0.2352.fits' ) wocti /= np.max(wocti) wocti *= 420. sh = shape.shapeMeasurement(wcti, log, **settings) wctiresults = sh.measureRefinedEllipticity() #include CTI with my recipe ctiMSSL = addCTI(wocti.copy()).T ctiThibault = addCTI(wocti.copy(), thibautCDM03=True).T sh = shape.shapeMeasurement(ctiMSSL, log, **settings) wMSSLctiresults = sh.measureRefinedEllipticity() sh = shape.shapeMeasurement(ctiThibault, log, **settings) wThibautctiresults = sh.measureRefinedEllipticity() fileIO.writeFITS(ctiThibault, 'tmp2.fits', int=False) fileIO.writeFITS(wcti / ctiThibault, 'tmp3.fits', int=False) for key in wctiresults: tmp1 = wctiresults[key] - wMSSLctiresults[key] tmp2 = wctiresults[key] - wThibautctiresults[key] if 'Gaussian' in key: print key, np.max(np.abs(tmp1)), np.max(np.abs(tmp2)) else: print key, tmp1, tmp2
def testShapeMeasurementAlgorithm(log, file='data/psf1x.fits', psfs=5000, sigma=0.75, iterations=4): """ :param log: :param file: :param psfs: :param sigma: :param iterations: :return: """ #read in PSF and rescale to avoid rounding or truncation errors data = pf.getdata(file) data /= np.max(data) scales = np.random.random_integers(2e2, 2e5, psfs) settings = dict(sigma=sigma, iterations=iterations) e = [] R2 = [] for scale in scales: sh = shape.shapeMeasurement(data.copy() * scale, log, **settings) results = sh.measureRefinedEllipticity() e.append(results['ellipticity']) R2.append(results['R2']) return np.asarray(e), np.asarray(R2)
def shapeComparisonToAST(oversample=3.): """ To calculate shapes from AST PSFs. One of the actions from the PLM-SRR was 8941 (RID No: ENG-219), with the following wording: ASFT shall provide to the VIS team a PSF profile with associated R2 with the sampling set to 4 microns and the VIS team will check that when applying the R2 processing the result is identical, to double check that the process is correct. """ log = lg.setUpLogger('delete.log') files = glob.glob('*.fits') files = sorted(files) for file in files: data = pf.getdata(file) settings = dict(sampling=1.0 / oversample, itereations=20) sh = shape.shapeMeasurement(data, log, **settings) reference = sh.measureRefinedEllipticity() R2 = reference['R2'] #in pixels R2a = reference['R2arcsec'] print file, R2, R2a
def testShapeMeasurementAlgorithm(log, file='data/psf1x.fits', psfs=5000, sigma=0.75, iterations=4): """ :param log: :param file: :param psfs: :param sigma: :param iterations: :return: """ #read in PSF and rescale to avoid rounding or truncation errors data = pf.getdata(file) data /= np.max(data) scales = np.random.random_integers(2e2, 2e5, psfs) settings = dict(sigma=sigma, iterations=iterations) e = [] R2 = [] for scale in scales: sh = shape.shapeMeasurement(data.copy()*scale, log, **settings) results = sh.measureRefinedEllipticity() e.append(results['ellipticity']) R2.append(results['R2']) return np.asarray(e), np.asarray(R2)
def testCentroidingImpact( log, psf='/Users/sammy/EUCLID/vissim-python/data/psf12x.fits', xrange=12, yrange=12, zoom=12, iterations=50): """ :return: """ settings = dict(sampling=zoom / 12.0, itereations=iterations) data = pf.getdata(psf) res = [] for x in range(xrange): for y in range(yrange): yind, xind = np.indices(data.shape) xind += x yind += y tmp = ndimage.map_coordinates(data.copy(), [yind, xind], order=1, mode='nearest') psf = ndimage.zoom(tmp, 1.0 / zoom, order=0) sh = shape.shapeMeasurement(psf, log, **settings) results = sh.measureRefinedEllipticity() res.append([x, y, results]) return res
def shapeComparisonToAST(oversample=3.): """ To calculate shapes from AST PSFs. One of the actions from the PLM-SRR was 8941 (RID No: ENG-219), with the following wording: ASFT shall provide to the VIS team a PSF profile with associated R2 with the sampling set to 4 microns and the VIS team will check that when applying the R2 processing the result is identical, to double check that the process is correct. """ log = lg.setUpLogger('delete.log') files = glob.glob('*.fits') files = sorted(files) for file in files: data = pf.getdata(file) settings = dict(sampling=1.0/oversample, itereations=20) sh = shape.shapeMeasurement(data, log, **settings) reference = sh.measureRefinedEllipticity() R2 = reference['R2'] #in pixels R2a = reference['R2arcsec'] print file, R2, R2a
def measureEllipticity(self): """ Measures ellipticity for all objects with coordinates (self.x, self.y). Ellipticity is measured using Guassian weighted quadrupole moments. See shape.py and especially the ShapeMeasurement class for more details. """ ells = [] xs = [] ys = [] R2s = [] for x, y in zip(self.x, self.y): #cut out a square region around x and y coordinates #force the region to be symmetric around the galaxy xmin = max(x - self.settings['xcutout'], 0.) ymin = max(y - self.settings['ycutout'], 0.) xmax = min(x + self.settings['xcutout'] + 1., self.settings['sizeX']) ymax = min(y + self.settings['ycutout'] + 1., self.settings['sizeY']) xsize = min(x-xmin, xmax-x) ysize = min(y-ymin, ymax-y) xcutmin = int(x - xsize) xcutmax = int(x + xsize) ycutmin = int(y - ysize) ycutmax = int(y + ysize) if xcutmax - xcutmin < 10 or ycutmax - ycutmin < 10: self.log.warning('Very few pixels around the object, will skip this one...') continue self.log.info('Measuring ellipticity of an object located at (x, y) = (%f, %f)' % (x, y)) img = self.data[ycutmin:ycutmax, xcutmin:xcutmax].copy() sh = shape.shapeMeasurement(img, self.log, **dict(sampling=self.settings['sampling'])) results = sh.measureRefinedEllipticity() #get shifts for x and y centroids for the cutout image cutsizey, cutsizex = img.shape xcent = int(x - cutsizex/2.) ycent = int(y - cutsizey/2.) self.log.info('Centroiding (x, y) = (%f, %f), e=%f, R2=%f' % (results['centreX']+xcent, results['centreY']+ycent, results['ellipticity'], results['R2'])) #print x - results['centreX']-xcent, y -results['centreY']-ycent #save the results ells.append(results['ellipticity']) xs.append(results['centreX']+xcent) ys.append(results['centreY']+ycent) R2s.append(results['R2']) out = dict(Xcentres=xs, Ycentres=ys, ellipticities=ells, R2s=R2s) self.results = out return self.results
def test(log, file='data/psf1x.fits', oversample=1.0, sigma=0.75, scale=1e2, level=10, covering=1.4, single=False): #read in PSF and renormalize it to norm data = pf.getdata(file) data /= np.max(data) #derive reference values from clean PSF settings = dict(sampling=1.0 / oversample, sigma=sigma, iterations=10) scaled = data.copy() * scale sh = shape.shapeMeasurement(scaled.copy(), log, **settings) reference = sh.measureRefinedEllipticity() print 'Reference:' pprint.pprint(reference) cosmics = cosmicrays.cosmicrays(log, np.zeros((2, 2))) crInfo = cosmics._readCosmicrayInformation() print 'Deposited Energy of Cosmic Rays: %i electrons' % level #add cosmic rays to the scaled image cosmics = cosmicrays.cosmicrays(log, scaled, crInfo=crInfo) if single: #only one cosmic with a given energy level, length drawn from a distribution newdata = cosmics.addSingleEvent(limit=level) else: #x cosmic ray events to reach a covering fraction, say 1.4 per cent newdata = cosmics.addUpToFraction(covering, limit=level) #write out new data for inspection fileIO.writeFITS(newdata, 'example.fits', int=False) #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(newdata.copy(), log, **settings) results = sh.measureRefinedEllipticity() print 'Results:' pprint.pprint(results) print 'delta e_1: ', results['e1'] - reference['e1'] print 'delta e_2: ', results['e2'] - reference['e2'] print 'delta e: ', results['ellipticity'] - reference['ellipticity'] print 'delta R**2: ', results['R2'] - reference['R2']
def pistonKnowledge(log, file='data/psf2x.fits', oversample=2.0, psfs=1000, sigma=0.36, iterations=4, debug=False): """ """ #read in PSF and renormalize it data = pf.getdata(file) data /= np.max(data) data *= 2000. if debug: write.writeFITSfile(data, 'normalizedPSF.fits') #set the scale for shape measurement settings = dict(sampling=1.0 / oversample, itereations=iterations, sigma=sigma) #residual from a perfectly flat surface, pistons are in electrons pistons = np.logspace(-5, 1, 10) tot = pistons.size res = {} for i, piston in enumerate(pistons): print 'Piston: %i / %i' % (i + 1, tot) R2 = [] e1 = [] e2 = [] e = [] pss = np.random.random(psfs) * piston #loop over the PSFs for ps in pss: #make a copy of the PSF and scale it with the given scaling #and then add a random piston which is <= the error tmp = data.copy() + ps #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(tmp, log, **settings) results = sh.measureRefinedEllipticity() #save values e1.append(results['e1']) e2.append(results['e2']) e.append(results['ellipticity']) R2.append(results['R2']) out = dict(e1=np.asarray(e1), e2=np.asarray(e2), e=np.asarray(e), R2=np.asarray(R2)) #res[piston] = out res[np.std(pss)] = out #or should we save std(ps)? return res
def findTolerableError(log, file='data/psf4x.fits', oversample=4.0, psfs=10000, iterations=7, sigma=0.75): """ Calculate ellipticity and size for PSFs of different scaling when there is a residual pixel-to-pixel variations. """ #read in PSF and renormalize it data = pf.getdata(file) data /= np.max(data) #PSF scalings for the peak pixel, in electrons scales = np.random.random_integers(1e2, 2e5, psfs) #set the scale for shape measurement settings = dict(sampling=1.0/oversample, itereations=iterations, sigma=sigma) #residual from a perfect no pixel-to-pixel non-uniformity residuals = np.logspace(-7, -1.6, 9)[::-1] #largest first tot = residuals.size res = {} for i, residual in enumerate(residuals): print'%i / %i' % (i+1, tot) R2 = [] e1 = [] e2 = [] e = [] #loop over the PSFs for scale in scales: #random residual pixel-to-pixel variations if oversample < 1.1: residualSurface = np.random.normal(loc=1.0, scale=residual, size=data.shape) elif oversample == 4.0: tmp = np.random.normal(loc=1.0, scale=residual, size=(170, 170)) residualSurface = zoom(tmp, 4.013, order=0) else: sys.exit('ERROR when trying to generate a blocky pixel-to-pixel non-uniformity map...') #make a copy of the PSF and scale it with the given scaling #and then multiply with a residual pixel-to-pixel variation tmp = data.copy() * scale * residualSurface #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(tmp.copy(), log, **settings) results = sh.measureRefinedEllipticity() #save values e1.append(results['e1']) e2.append(results['e2']) e.append(results['ellipticity']) R2.append(results['R2']) out = dict(e1=np.asarray(e1), e2=np.asarray(e2), e=np.asarray(e), R2=np.asarray(R2)) res[residual] = out return res
def measureChars(data, info, log): """ Measure ellipticity, R2, FWHM etc. """ #settings = dict(pixelSize=info['pixsize'], sampling=info['pixsize']/12.) settings = dict(sampling=info['pixsize']/12.) sh = shape.shapeMeasurement(data.copy(), log, **settings) results = sh.measureRefinedEllipticity() out = dict(ellipticity=results['ellipticity'], e1=results['e1'], e2=results['e2'], R2=results['R2']) return out
def simpleTest(log, sigma=0.75, iterations=50): #Thibauts data folder = '/Users/sammy/EUCLID/CTItesting/uniform/' wcti = pf.getdata(folder + 'galaxy_100mas_dist2_q=0.5078_re=6.5402_theta=0.91895_norm=1000_dx=0.3338_dy=0.0048CTI.fits') wocti = pf.getdata(folder + 'galaxy_100mas_dist2_q=0.5078_re=6.5402_theta=0.91895_norm=1000_dx=0.3338_dy=0.0048noCTI.fits') #reset settings settings = dict(sigma=sigma, iterations=iterations) #calculate shapes sh = shape.shapeMeasurement(wcti, log, **settings) wctiresults = sh.measureRefinedEllipticity() sh = shape.shapeMeasurement(wocti, log, **settings) woctiresults = sh.measureRefinedEllipticity() #include CTI with my recipe ctiMSSL = addCTI(wocti.copy()) ctiThibault = addCTI(wocti.copy(), thibautCDM03=True) sh = shape.shapeMeasurement(ctiMSSL, log, **settings) wMSSLctiresults = sh.measureRefinedEllipticity() sh = shape.shapeMeasurement(ctiThibault, log, **settings) wThibautctiresults = sh.measureRefinedEllipticity() fileIO.writeFITS(ctiMSSL, 'tmp1.fits', int=False) fileIO.writeFITS(ctiThibault, 'tmp2.fits', int=False) fileIO.writeFITS(wcti/ctiMSSL, 'tmp3.fits', int=False) for key in wctiresults: tmp1 = wctiresults[key] - wMSSLctiresults[key] tmp2 = wctiresults[key] - wThibautctiresults[key] if 'Gaussian' in key: print key, np.max(np.abs(tmp1)), np.max(np.abs(tmp2)) else: print key, tmp1, tmp2
def test(log, file="data/psf1x.fits", oversample=1.0, sigma=0.75, scale=1e2, level=10, covering=1.4, single=False): # read in PSF and renormalize it to norm data = pf.getdata(file) data /= np.max(data) # derive reference values from clean PSF settings = dict(sampling=1.0 / oversample, sigma=sigma, iterations=10) scaled = data.copy() * scale sh = shape.shapeMeasurement(scaled.copy(), log, **settings) reference = sh.measureRefinedEllipticity() print "Reference:" pprint.pprint(reference) cosmics = cosmicrays.cosmicrays(log, np.zeros((2, 2))) crInfo = cosmics._readCosmicrayInformation() print "Deposited Energy of Cosmic Rays: %i electrons" % level # add cosmic rays to the scaled image cosmics = cosmicrays.cosmicrays(log, scaled, crInfo=crInfo) if single: # only one cosmic with a given energy level, length drawn from a distribution newdata = cosmics.addSingleEvent(limit=level) else: # x cosmic ray events to reach a covering fraction, say 1.4 per cent newdata = cosmics.addUpToFraction(covering, limit=level) # write out new data for inspection fileIO.writeFITS(newdata, "example.fits", int=False) # measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(newdata.copy(), log, **settings) results = sh.measureRefinedEllipticity() print "Results:" pprint.pprint(results) print "delta e_1: ", results["e1"] - reference["e1"] print "delta e_2: ", results["e2"] - reference["e2"] print "delta e: ", results["ellipticity"] - reference["ellipticity"] print "delta R**2: ", results["R2"] - reference["R2"]
def testCTIinclusion(log, sigma=0.75, iterations=3, weighted=True, fixedPosition=False, fixedX=None, fixedY=None): #reset settings settings = dict(sigma=sigma, iterations=iterations, weighted=weighted, fixedX=fixedX, fixedY=fixedY, fixedPosition=fixedPosition) #Thibauts data folder = '//Users/sammy/EUCLID/CTItesting/Reconciliation/' wcti = pf.getdata(folder + 'damaged_image_parallel.fits') wocti = pf.getdata(folder + 'galaxy_100mas_dist2_q=0.9568_re=22.2670_theta=-1.30527_norm=1000_dx=0.2274_dy=0.2352.fits') wocti /= np.max(wocti) wocti *= 420. sh = shape.shapeMeasurement(wcti, log, **settings) wctiresults = sh.measureRefinedEllipticity() #include CTI with my recipe ctiMSSL = addCTI(wocti.copy()).T ctiThibault = addCTI(wocti.copy(), thibautCDM03=True).T sh = shape.shapeMeasurement(ctiMSSL, log, **settings) wMSSLctiresults = sh.measureRefinedEllipticity() sh = shape.shapeMeasurement(ctiThibault, log, **settings) wThibautctiresults = sh.measureRefinedEllipticity() fileIO.writeFITS(ctiThibault, 'tmp2.fits', int=False) fileIO.writeFITS(wcti/ctiThibault, 'tmp3.fits', int=False) for key in wctiresults: tmp1 = wctiresults[key] - wMSSLctiresults[key] tmp2 = wctiresults[key] - wThibautctiresults[key] if 'Gaussian' in key: print key, np.max(np.abs(tmp1)), np.max(np.abs(tmp2)) else: print key, tmp1, tmp2
def measureChars(data, info, log): """ Measure ellipticity, R2, FWHM etc. """ #settings = dict(pixelSize=info['pixsize'], sampling=info['pixsize']/12.) settings = dict(sampling=info['pixsize'] / 12.) sh = shape.shapeMeasurement(data.copy(), log, **settings) results = sh.measureRefinedEllipticity() out = dict(ellipticity=results['ellipticity'], e1=results['e1'], e2=results['e2'], R2=results['R2']) return out
def calculateShapes(log, files, catalogue, cutout=85, iterations=50, sigma=0.75): """ Calculate the shape and size of the PSFs in the files at the positions of the catalogue. :param log: logger instance :type log: instance :param files: names of the FITS files to process :type files: list :param catalogue: name of the input catalogue with object positions :type catalogue: str :param cutout: size of the cutout region [centre-cutout:centre+cutout+1] :type cutout: int :param iterations: number of iterations in the shape measurement :type iterations: int :param sigma: size of the gaussian weighting function :type sigma: float :return: [resolution, xcentre, ycentre, shape dictionary] :rtype: list """ #shape measurement settings settings = dict(itereations=iterations, sigma=sigma) cat = np.loadtxt(catalogue) x = cat[:, 0] y = cat[:, 1] results = [] for xc, yc in zip(x, y): for f in files: fh = pf.open(f, memmap=True) data = fh[1].data[yc - cutout:yc + cutout + 1, xc - cutout:xc + cutout + 1] reso = fh[1].header['PSFOVER'] fh.close() sh = shape.shapeMeasurement(data, log, **settings) r = sh.measureRefinedEllipticity() results.append([reso, xc, yc, r]) return results
def animate(i): settings = dict(sigma=sigma, iterations=i + 1) #settings = dict(sigma=sigma, iterations=i+1, fixedPosition=True, fixedX=85.0, fixedY=85.) sh = shape.shapeMeasurement(data, l, **settings) results = sh.measureRefinedEllipticity() text.set_text(r'%i iterations, $e \sim %.4f$' % (i + 1, results['ellipticity'])) ax2.imshow(results['GaussianWeighted'], origin='lower') ang = 0.5 * np.arctan(results['e2'] / results['e1']) e.center = (results['centreX'] - 1, results['centreY'] - 1) e.width = results['a'] e.height = results['b'] e.angle = ang return ax2, text, e
def calculateShapes(log, files, catalogue, cutout=85, iterations=50, sigma=0.75): """ Calculate the shape and size of the PSFs in the files at the positions of the catalogue. :param log: logger instance :type log: instance :param files: names of the FITS files to process :type files: list :param catalogue: name of the input catalogue with object positions :type catalogue: str :param cutout: size of the cutout region [centre-cutout:centre+cutout+1] :type cutout: int :param iterations: number of iterations in the shape measurement :type iterations: int :param sigma: size of the gaussian weighting function :type sigma: float :return: [resolution, xcentre, ycentre, shape dictionary] :rtype: list """ #shape measurement settings settings = dict(itereations=iterations, sigma=sigma) cat = np.loadtxt(catalogue) x = cat[:, 0] y = cat[:, 1] results = [] for xc, yc in zip(x, y): for f in files: fh = pf.open(f, memmap=True) data = fh[1].data[yc-cutout:yc+cutout+1, xc-cutout:xc+cutout+1] reso = fh[1].header['PSFOVER'] fh.close() sh = shape.shapeMeasurement(data, log, **settings) r = sh.measureRefinedEllipticity() results.append([reso, xc, yc, r]) return results
def plotEllipticityDependency(data, ellipticity, log): """ Generate a simple plot: size of the Gaussian weighting function vs. derived ellipticity. """ x = [] y = [] for sigma in range(1, 50): settings = dict(sigma=sigma) sh = shape.shapeMeasurement(data, log, **settings) results = sh.measureRefinedEllipticity() x.append(sigma) y.append(results['ellipticity']) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(x, y, 'bo-') ax.plot([min(x), max(x)], [ellipticity, ellipticity], 'k--') ax.set_xlabel(r'Gaussian Weighting $\sigma$ [arcseconds]') ax.set_ylabel('Measured Ellipticity') ax.set_ylim(0, 1.01) plt.savefig('EvsSigma.pdf') plt.close()
def testCentroidingImpact(log, psf='/Users/sammy/EUCLID/vissim-python/data/psf12x.fits', xrange=12, yrange=12, zoom=12, iterations=50): """ :return: """ settings = dict(sampling=zoom / 12.0, itereations=iterations) data = pf.getdata(psf) res = [] for x in range(xrange): for y in range(yrange): yind, xind = np.indices(data.shape) xind += x yind += y tmp = ndimage.map_coordinates(data.copy(), [yind, xind], order=1, mode='nearest') psf = ndimage.zoom(tmp, 1.0/zoom, order=0) sh = shape.shapeMeasurement(psf, log, **settings) results = sh.measureRefinedEllipticity() res.append([x, y, results]) return res
def testCentroidingImpactSingleDirection( log, psf='/Users/sammy/EUCLID/vissim-python/data/psf12x.fits', canvas=16, ran=13, zoom=12, iterations=50, sigma=0.75, gaussian=False, interpolation=False, save=True): """ :return: """ settings = dict(sampling=zoom / 12.0, itereations=iterations, sigma=sigma) if gaussian: data = Gaussian2D(256., 256., 25, 25)['Gaussian'] data *= 1e5 else: data = pf.getdata(psf) xres = [] print 'X shifts' for x in range(ran): tmp = data.copy()[canvas:-canvas + 1, canvas - x:-canvas - x + 1] if interpolation: if gaussian: psf = frebin(tmp, 40, nlout=40) else: size = tmp.shape[0] / 12 psf = frebin(tmp, size, nlout=size, total=True) else: print tmp.shape psf = ndimage.zoom(tmp, 1.0 / zoom, order=0) if save: out = 'PSFx%i' % x if gaussian: out += 'Gaussian' if interpolation: out += 'Interpolated' out += '.fits' fileIO.writeFITS(psf, out, int=False) sh = shape.shapeMeasurement(psf, log, **settings) results = sh.measureRefinedEllipticity() xres.append([x, results]) print x, psf.shape, np.sum(psf), np.max( psf), results['e1'], results['e2'], results['ellipticity'] yres = [] print 'Y shifts' for y in range(ran): tmp = data.copy()[canvas - y:-canvas - y + 1, canvas:-canvas + 1] if interpolation: if gaussian: psf = frebin(tmp, 40, nlout=40) else: size = tmp.shape[0] / 12 psf = frebin(tmp, size, nlout=size, total=True) else: psf = ndimage.zoom(tmp, 1.0 / zoom, order=0) sh = shape.shapeMeasurement(psf, log, **settings) results = sh.measureRefinedEllipticity() yres.append([y, results]) print y, psf.shape, np.sum(psf), np.max( psf), results['e1'], results['e2'], results['ellipticity'] return xres, yres
def ghostContributionElectrons(log, filename='data/psf1x.fits', magnitude=24.5, zp=25.5, exptime=565., exposures=3, iterations=100, sigma=0.75, centered=False, offset=9, verbose=False): #set sampling etc. for shape measurement settings = dict(itereations=iterations, sigma=sigma, debug=True) #read in PSF data = pf.getdata(filename) #place it a larger canvas with zero padding around canvas = np.pad(data, 100, mode='constant', constant_values=0) #requires numpy >= 1.7.0 ys, xs = canvas.shape xcen = int(np.round(xs / 2., 0)) ycen = int(np.round(ys / 2., 0)) #normalize canvas, scale it to magnitude and save it canvas /= np.max(canvas) intscale = 10.0**(-0.4 * (magnitude-zp)) * exptime * exposures canvas *= intscale fileIO.writeFITS(canvas, 'originalPSF.fits', int=False) #reference values sh = shape.shapeMeasurement(canvas, log, **settings) reference = sh.measureRefinedEllipticity() fileIO.cPickleDumpDictionary(reference, 'ghostStarContribution.pk') if verbose: print 'Reference:' pprint.pprint(reference) #load ghost ghostModel = pf.getdata('data/ghost800nm.fits')[355:423, 70:131] ghostModel /= np.max(ghostModel) #peak is 1 now ys, xs = ghostModel.shape yd = int(np.round(ys / 2., 0)) xd = int(np.round(xs / 2., 0)) fileIO.writeFITS(ghostModel, 'ghostImage.fits', int=False) #ghost levels scales = np.logspace(-4, 2, 21) result = {} for scale in scales: scaled = ghostModel.copy() * scale #fileIO.writeFITS(scaled, 'ghostImage.fits', int=False) tmp = canvas.copy() if centered: tmp[ycen - yd:ycen + yd, xcen - xd:xcen + xd + 1] += scaled else: tmp[ycen - yd + offset:ycen + yd + offset, xcen - xd + offset:xcen + xd + 1 + offset] += scaled #tmp[ycen: ycen + 2*yd, xcen:xcen + 2*xd + 1] += scaled #fileIO.writeFITS(tmp, 'originalPlusGhost.fits', int=False) #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(tmp, log, **settings) results = sh.measureRefinedEllipticity() de1 = results['e1'] - reference['e1'] de2 = results['e2'] - reference['e2'] de = np.sqrt(de1**2 + de2**2) dR2 = (results['R2'] - reference['R2']) / reference['R2'] if verbose: print '\n\nscale=', scale print 'Delta: with ghost - reference' print 'e1', de1 print 'e2', de2 print 'e', de print 'R2', dR2 result[scale] = [de1, de2, de, dR2, results['e1'], results['e2'], results['ellipticity'], results['R2']] return result
def analyseInFocusImpact(log, filename='data/psf4x.fits', psfscale=100000, maxdistance=100, oversample=4.0, psfs=1000, iterations=6, sigma=0.75): """ Calculates PSF size and ellipticity when including another PSF scaled to a given level (requirement = 5e-5) :param log: :param filename: name of the PSF file to analyse :param psfscale: level to which the original PSF is scaled to :param maxdistance: maximum distance the ghost image can be from the original PSF (centre to centre) :param oversample: oversampling factor :param psfs: number of PSFs to analyse (number of ghosts in random locations) :param iterations: number of iterations in the shape measurement :param sigma: size of the Gaussian weighting function :return: results :rtype: dict """ #read in PSF and renormalize it data = pf.getdata(filename) data /= np.max(data) #place it a larger canvas with zero padding around ys, xs = data.shape yd = int(np.round(ys/2., 0)) xd = int(np.round(xs/2., 0)) canvas = np.pad(data, xs+maxdistance, mode='constant', constant_values=0) #requires numpy >= 1.7.0 ys, xs = canvas.shape xcen = int(np.round(xs/2., 0)) ycen = int(np.round(ys/2., 0)) #print canvas.shape #print canvas.flags canvas /= np.max(canvas) canvas *= float(psfscale) #set sampling etc. for shape measurement settings = dict(sampling=1.0 / oversample, itereations=iterations, sigma=sigma) #positions x = np.round((np.random.rand(psfs)-0.5)*maxdistance, 0).astype(np.int) y = np.round((np.random.rand(psfs)-0.5)*maxdistance, 0).astype(np.int) #ghost level ghosts = np.logspace(-7, -4, 10)[::-1] #largest first tot = ghosts.size res = {} for i, scale in enumerate(ghosts): print'ghost levels: %i / %i' % (i + 1, tot) R2 = [] e1 = [] e2 = [] e = [] scaled = data.copy() * (scale * psfscale) #loop over the ghost positions for xc, yc in zip(x, y): tmp = canvas.copy() xm = xcen + xc ym = ycen + yc try: tmp[ym-yd:ym+yd+1, xm-xd:xm+xd+1] += scaled except: try: tmp[ym-yd:ym+yd, xm-xd:xm+xd] += scaled except: print scaled.shape print tmp[ym-yd:ym+yd+1, xm-xd:xm+xd+1].shape print 'ERROR -- cannot place the ghost to the image!!' continue #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(tmp, log, **settings) results = sh.measureRefinedEllipticity() #save values e1.append(results['e1']) e2.append(results['e2']) e.append(results['ellipticity']) R2.append(results['R2']) out = dict(e1=np.asarray(e1), e2=np.asarray(e2), e=np.asarray(e), R2=np.asarray(R2)) res[scale] = out return res
def testFlatCalibration(log, flats, surfaces=10, file='data/psf1x.fits', psfs=5000, sigma=0.75, iterations=7, weighting=True, plot=False, debug=False): """ Derive the PSF ellipticities for a given number of random surfaces with random PSF positions and a given number of flat fields median combined. This function is to derive the the actual values so that the knowledge (variance) can be studied. """ #read in PSF and rescale to avoid rounding or truncation errors data = pf.getdata(file) data /= np.max(data) data *= 300. #SNR about 10 for star... #derive reference values settings = dict(sigma=sigma, iterations=iterations, weighted=weighting) sh = shape.shapeMeasurement(data.copy(), log, **settings) reference = sh.measureRefinedEllipticity() #random positions for the PSFs, these positions are the lower corners #assume that this is done on quadrant level thus the xmax and ymax are 2065 and 2047, respectively xpositions = np.random.random_integers(0, 2047 - data.shape[1], psfs) ypositions = np.random.random_integers(0, 2065 - data.shape[0], psfs) out = {} #number of biases to median combine for a in flats: print 'Number of Flats to combine: %i / %i' % (a, flats[-1]) #data storage de1 = [] de2 = [] de = [] R2 = [] dR2 = [] e1 = [] e2 = [] e = [] for b in xrange(surfaces): print 'Random Realisations: %i / %i' % (b+1, surfaces) residual = generateResidualFlatField(combine=a, plots=plot, debug=debug) print 'Average residual = %e' % (np.mean(residual) - 1.) # generate 2D plot if b == 0 and plot: im = plt.imshow(residual, extent=(0, 2066, 2048, 0)) plt.scatter(xpositions + (data.shape[1]/2), ypositions + (data.shape[0]/2), color='white') c1 = plt.colorbar(im) c1.set_label('Residual Flat Field') plt.xlim(0, 2066) plt.ylim(0, 2048) plt.xlabel('Y [pixels]') plt.ylabel('X [pixels]') plt.savefig('residualFlat2D%i.png' % a) plt.close() #loop over the PSFs for xpos, ypos in zip(xpositions, ypositions): tmp = data.copy() #get the underlying residual surface and multiple with the PSF small = residual[ypos:ypos+data.shape[0], xpos:xpos+data.shape[1]].copy() #small += 1. small *= tmp #small *= tmp # depends on the residual geenration #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(small.copy(), log, **settings) results = sh.measureRefinedEllipticity() #save values e1.append(results['e1']) e2.append(results['e2']) e.append(results['ellipticity']) R2.append(results['R2']) de1.append(results['e1'] - reference['e1']) de2.append(results['e2'] - reference['e2']) de.append(results['ellipticity'] - reference['ellipticity']) dR2.append(results['R2'] - reference['R2']) out[a+1] = [e1, e2, e, R2, de1, de2, de, dR2] return out, reference
def plotfile(filename='/Users/sammy/EUCLID/vissim-python/data/psf1x.fits', sigma=0.75, iterations=4, out='test.pdf', scale=False, log=False, zoom=30): """ Calculate ellipticity from a given input file using quadrupole moments and plot the data. """ settings = dict(sigma=sigma, iterations=iterations) l = lg.setUpLogger('CTItesting.log') data = pf.getdata(filename) if scale: data /= np.max(data) data *= 1.e5 sh = shape.shapeMeasurement(data, l, **settings) results = sh.measureRefinedEllipticity() fig, axarr = plt.subplots(1, 2, sharey=True) ax1 = axarr[0] ax2 = axarr[1] fig.subplots_adjust(wspace=0) if log: ax1.set_title(r'$\log_{10}$(Image)') ax2.set_title(r'$\log_{10}$(Gaussian Weighted)') else: ax1.set_title(r'Image') ax2.set_title(r'Gaussian Weighted') #no ticks on the right hand side plot plt.setp(ax2.get_yticklabels(), visible=False) if log: im1 = ax1.imshow(np.log10(data), origin='lower') im2 = ax2.imshow(np.log10(results['GaussianWeighted']), origin='lower') else: im1 = ax1.imshow(data, origin='lower') im2 = ax2.imshow(results['GaussianWeighted'], origin='lower') ang = 0.5 * np.arctan(results['e2'] / results['e1']) e = Ellipse(xy=(results['centreX'] - 1, results['centreY'] - 1), width=results['a'], height=results['b'], angle=ang, facecolor='none', ec='k', lw=2) fig.gca().add_artist(e) if zoom is not None: ax1.set_xlim(results['centreX'] - zoom - 1, results['centreX'] + zoom) ax2.set_xlim(results['centreX'] - zoom - 1, results['centreX'] + zoom) ax1.set_ylim(results['centreY'] - zoom - 1, results['centreY'] + zoom) ax2.set_ylim(results['centreY'] - zoom - 1, results['centreY'] + zoom) plt.savefig(out) plt.close()
def measureEllipticity(self): """ Measures ellipticity for all objects with coordinates (self.x, self.y). Ellipticity is measured using Guassian weighted quadrupole moments. See shape.py and especially the ShapeMeasurement class for more details. """ ells = [] xs = [] ys = [] R2s = [] for x, y in zip(self.x, self.y): #cut out a square region around x and y coordinates #force the region to be symmetric around the galaxy xmin = max(x - self.settings['xcutout'], 0.) ymin = max(y - self.settings['ycutout'], 0.) xmax = min(x + self.settings['xcutout'] + 1., self.settings['sizeX']) ymax = min(y + self.settings['ycutout'] + 1., self.settings['sizeY']) xsize = min(x - xmin, xmax - x) ysize = min(y - ymin, ymax - y) xcutmin = int(x - xsize) xcutmax = int(x + xsize) ycutmin = int(y - ysize) ycutmax = int(y + ysize) if xcutmax - xcutmin < 10 or ycutmax - ycutmin < 10: self.log.warning( 'Very few pixels around the object, will skip this one...') continue self.log.info( 'Measuring ellipticity of an object located at (x, y) = (%f, %f)' % (x, y)) img = self.data[ycutmin:ycutmax, xcutmin:xcutmax].copy() sh = shape.shapeMeasurement( img, self.log, **dict(sampling=self.settings['sampling'])) results = sh.measureRefinedEllipticity() #get shifts for x and y centroids for the cutout image cutsizey, cutsizex = img.shape xcent = int(x - cutsizex / 2.) ycent = int(y - cutsizey / 2.) self.log.info('Centroiding (x, y) = (%f, %f), e=%f, R2=%f' % (results['centreX'] + xcent, results['centreY'] + ycent, results['ellipticity'], results['R2'])) #print x - results['centreX']-xcent, y -results['centreY']-ycent #save the results ells.append(results['ellipticity']) xs.append(results['centreX'] + xcent) ys.append(results['centreY'] + ycent) R2s.append(results['R2']) out = dict(Xcentres=xs, Ycentres=ys, ellipticities=ells, R2s=R2s) self.results = out return self.results
def ghostContributionToStar(log, filename='data/psf12x.fits', psfscale=2e5, distance=750, inner=8, outer=60, oversample=12, iterations=20, sigma=0.75, scale=5e-5, fixedPosition=True): #set sampling etc. for shape measurement settings = dict(sampling=1.0 / oversample, itereations=iterations, sigma=sigma, debug=True) #read in PSF data = pf.getdata(filename) #place it a larger canvas with zero padding around canvas = np.pad(data, int(distance * oversample + outer + 1), mode='constant', constant_values=0) #requires numpy >= 1.7.0 ys, xs = canvas.shape xcen = int(np.round(xs / 2., 0)) ycen = int(np.round(ys / 2., 0)) #normalize canvas and save it canvas /= np.max(canvas) canvas *= float(psfscale) fileIO.writeFITS(canvas, 'originalPSF.fits', int=False) #reference values sh = shape.shapeMeasurement(canvas, log, **settings) reference = sh.measureRefinedEllipticity() fileIO.cPickleDumpDictionary(reference, 'ghostStarContribution.pk') print 'Reference:' pprint.pprint(reference) #make out of focus image, a simple doughnut img, xd, yd = drawDoughnut(inner, outer, oversample=oversample) #positions (shift respect to the centring of the star) xc = 0 yc = distance * oversample #indices range xm = xcen + xc ym = ycen + yc #ghost level #scale the doughtnut pixel values, note that all pixels have the same value... img /= np.max(img) scaled = img.copy() * scale * psfscale fileIO.writeFITS(scaled, 'ghostImage.fits', int=False) tmp = canvas.copy() if oversample % 2 == 0: tmp[ym - yd:ym + yd, xm - xd:xm + xd] += scaled else: tmp[ym - yd:ym + yd + 1, xm - xd:xm + xd + 1] += scaled fileIO.writeFITS(tmp, 'originalPlusGhost.fits', int=False) #use fixed positions if fixedPosition: settings['fixedPosition'] = True settings['fixedX'] = reference['centreX'] settings['fixedY'] = reference['centreY'] #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(tmp, log, **settings) results = sh.measureRefinedEllipticity() fileIO.cPickleDumpDictionary(results, 'ghostStarContribution.pk') #save values print '\nWith Doughnut:' pprint.pprint(results) print '\nDelta: with ghost - reference' print 'e1', results['e1'] - reference['e1'] print 'e2', results['e2'] - reference['e2'] print 'e', results['ellipticity'] - reference['ellipticity'] print 'R2', results['R2'] - reference['R2'] print 'Xcen', results['centreX'] - reference['centreX'] print 'Ycen', results['centreY'] - reference['centreY'] return results
def testCTIcorrectionNonoise(log, files, output, sigma=0.75, iterations=4): """ Calculates PSF properties such as ellipticity and size from data w/ and w/o CTI. :param log: python logger instance :type log: instance :param files: a list of files to be processed :type files: list :param sigma: size of the Gaussian weighting function :type sigma: float :param iterations: the number of iterations for the moment based shape estimator :type iterations: int :return: ellipticity and size :rtype: dict """ eclean = [] e1clean = [] e2clean = [] R2clean = [] xclean = [] yclean = [] eCTI = [] e1CTI = [] e2CTI = [] R2CTI = [] xCTI = [] yCTI = [] eCTIfixed = [] e1CTIfixed = [] e2CTIfixed = [] R2CTIfixed = [] xCTIfixed = [] yCTIfixed = [] fh = open(output.replace('pk', 'csv'), 'w') fh.write('#file, delta_e, delta_e1, delta_e2, delta_R2, delta_x, delta_y\n') for f in files: print 'Processing: ', f #reset settings settings = dict(sigma=sigma, iterations=iterations) #load no cti data nocti = pf.getdata(f.replace('CUT', 'CUTnoctinonoise')) #load CTI data CTI = pf.getdata(f) sh = shape.shapeMeasurement(nocti, log, **settings) results = sh.measureRefinedEllipticity() eclean.append(results['ellipticity']) e1clean.append(results['e1']) e2clean.append(results['e2']) R2clean.append(results['R2']) xclean.append(results['centreX']) yclean.append(results['centreY']) #CTI, fitted centroid sh = shape.shapeMeasurement(CTI.copy(), log, **settings) results2 = sh.measureRefinedEllipticity() eCTI.append(results2['ellipticity']) e1CTI.append(results2['e1']) e2CTI.append(results2['e2']) R2CTI.append(results2['R2']) xCTI.append(results2['centreX']) yCTI.append(results2['centreY']) #fixed centroid settings['fixedPosition'] = True settings['fixedX'] = results['centreX'] settings['fixedY'] = results['centreY'] settings['iterations'] = 1 sh = shape.shapeMeasurement(CTI.copy(), log, **settings) results3 = sh.measureRefinedEllipticity() eCTIfixed.append(results3['ellipticity']) e1CTIfixed.append(results3['e1']) e2CTIfixed.append(results3['e2']) R2CTIfixed.append(results3['R2']) xCTIfixed.append(results3['centreX']) yCTIfixed.append(results3['centreY']) text = '%s,%e,%e,%e,%e,%e,%e\n' % (f, results['ellipticity'] - results2['ellipticity'], results['e1'] - results2['e1'], results['e2'] - results2['e2'], results['R2'] - results2['R2'], results['centreX'] - results2['centreX'], results['centreY'] - results2['centreY']) fh.write(text) print text fh.close() results = {'eclean' : np.asarray(eclean), 'e1clean' : np.asarray(e1clean), 'e2clean' : np.asarray(e2clean), 'R2clean' : np.asarray(R2clean), 'xclean' : np.asarray(xclean), 'yclean' : np.asarray(yclean), 'eCTI' : np.asarray(eCTI), 'e1CTI' : np.asarray(e1CTI), 'e2CTI' : np.asarray(e2CTI), 'R2CTI' : np.asarray(R2CTI), 'xCTI' : np.asarray(xCTI), 'yCTI' : np.asarray(yCTI), 'eCTIfixed': np.asarray(eCTIfixed), 'e1CTIfixed': np.asarray(e1CTIfixed), 'e2CTIfixed': np.asarray(e2CTIfixed), 'R2CTIfixed': np.asarray(R2CTIfixed), 'xCTIfixed': np.asarray(xCTIfixed), 'yCTIfixed': np.asarray(yCTIfixed)} #save to a file fileIO.cPickleDumpDictionary(results, output) return results
def testCTIcorrection(log, files, sigma=0.75, iterations=4, xcen=1900, ycen=1900, side=20): """ Calculates PSF properties such as ellipticity and size from data without CTI and from CTI data. :param log: python logger instance :type log: instance :param files: a list of files to be processed :type files: list :param sigma: size of the Gaussian weighting function :type sigma: float :param iterations: the number of iterations for the moment based shape estimator :type iterations: int :param xcen: x-coordinate of the object centre :type xcen: int :param ycen: y-coordinate of the object centre :type ycen: int :param side: size of the cutout around the centre (+/- side) :type side: int :return: ellipticity and size :rtype: dict """ settings = dict(sigma=sigma, iterations=iterations) eclean = [] e1clean = [] e2clean = [] R2clean = [] eCTI = [] e1CTI = [] e2CTI = [] R2CTI = [] for file in files: #load no cti data nocti = pf.getdata(file.replace('CTI', 'nocti'))[ycen-side:ycen+side, xcen-side:xcen+side] #subtract background nocti -= 27.765714285714285 nocti[nocti < 0.] = 0. #remove negative numbers #load CTI data CTI = pf.getdata(file)[ycen-side:ycen+side, xcen-side:xcen+side] CTI[CTI < 0.] = 0. #remove negative numbers sh = shape.shapeMeasurement(nocti, log, **settings) results = sh.measureRefinedEllipticity() eclean.append(results['ellipticity']) e1clean.append(results['e1']) e2clean.append(results['e2']) R2clean.append(results['R2']) sh = shape.shapeMeasurement(CTI, log, **settings) results = sh.measureRefinedEllipticity() eCTI.append(results['ellipticity']) e1CTI.append(results['e1']) e2CTI.append(results['e2']) R2CTI.append(results['R2']) results = {'eclean' : np.asarray(eclean), 'e1clean' : np.asarray(e1clean), 'e2clean' : np.asarray(e2clean), 'R2clean' : np.asarray(R2clean), 'eCTI' : np.asarray(eCTI), 'e1CTI' : np.asarray(e1CTI), 'e2CTI' : np.asarray(e2CTI), 'R2CTI' : np.asarray(R2CTI)} #save to a file fileIO.cPickleDumpDictionary(results, 'results.pk') return results
#calculate ellipticity from Sigmas e = ellipticityFromSigmas(sigmax, sigmay) #generate a 2D gaussian with given properties... gaussian2d = Gaussian2D(xcen, ycen, xsize, ysize, sigmax, sigmay) #plot plot3D(gaussian2d) #write FITS file files.writeFITS(gaussian2d['Gaussian'], 'gaussian.fits') #calculate shape and printout results settings = dict(sigma=15., weighted=False) sh = shape.shapeMeasurement(gaussian2d['Gaussian'], log, **settings) results = sh.measureRefinedEllipticity() print pprint.pprint(results) print e, (e - results['ellipticity']) / e * 100. #generate a plot sigma vs ellipticity for a given Gaussian plotEllipticityDependency(gaussian2d['Gaussian'], e, log) #measureGaussianR2 measureGaussianR2(log) #derive FWHM - R2 relation... not really working #size() #test many files
def useThibautsData(log, output, bcgr=72.2, sigma=0.75, iterations=4, loc=1900, galaxies=1000, datadir='/Users/smn2/EUCLID/CTItesting/uniform/', thibautCDM03=False, beta=False, serial=1, parallel=1): """ Test the impact of CTI in case of no noise and no correction. :param log: logger instance :param bcgr: background in electrons for the CTI modelling :param sigma: size of the weighting function for the quadrupole moment :param iterations: number of iterations in the quadrupole moments estimation :param loc: location to which the galaxy will be placed [default=1900] :param galaxies: number of galaxies to use (< 10000) :param datadir: directory pointing to the galaxy images :return: """ files = g.glob(datadir + '*.fits') #pick randomly files = np.random.choice(files, galaxies, replace=False) #trap parameters: parallel if thibautCDM03: f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_parallel.dat' f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_serial.dat' params = ThibautsCDM03params() params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel)) else: f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_parallel.dat' f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_serial.dat' params = MSSLCDM03params() params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel)) if beta: params.update(dict(beta_p=0.6, beta_s=0.6)) print f1, f2 #store shapes eclean = [] e1clean = [] e2clean = [] R2clean = [] xclean = [] yclean = [] eCTI = [] e1CTI = [] e2CTI = [] R2CTI = [] xCTI = [] yCTI = [] eCTIfixed = [] e1CTIfixed = [] e2CTIfixed = [] R2CTIfixed = [] xCTIfixed = [] yCTIfixed = [] fh = open(output.replace('.pk', '.csv'), 'w') fh.write('#files: %s and %s\n' % (f1, f2)) for key in params: print key, params[key] fh.write('# %s = %s\n' % (key, str(params[key]))) fh.write('#file, delta_e, delta_e1, delta_e2, delta_R2, delta_x, delta_y\n') for f in files: print 'Processing: ', f #load data nocti = pf.getdata(f) #scale to SNR about 10 (average galaxy, a single exposure) nocti /= np.sum(nocti) nocti *= 1500. #place it on canvas tmp = np.zeros((2066, 2048)) ysize, xsize = nocti.shape ysize /= 2 xsize /= 2 tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize] = nocti.copy() #add background tmp += bcgr #run CDM03 c = CTI.CDM03bidir(params, []) tmp = c.applyRadiationDamage(tmp.copy().transpose()).transpose() #remove background and make a cutout CTIdata = tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize] CTIdata -= bcgr CTIdata[CTIdata < 0.] = 0. #write files #fileIO.writeFITS(nocti, f.replace('.fits', 'noCTI.fits'), int=False) #fileIO.writeFITS(CTI, f.replace('.fits', 'CTI.fits'), int=False) #reset settings settings = dict(sigma=sigma, iterations=iterations) #calculate shapes sh = shape.shapeMeasurement(nocti.copy(), log, **settings) results = sh.measureRefinedEllipticity() eclean.append(results['ellipticity']) e1clean.append(results['e1']) e2clean.append(results['e2']) R2clean.append(results['R2']) xclean.append(results['centreX']) yclean.append(results['centreY']) #CTI, fitted centroid sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings) results2 = sh.measureRefinedEllipticity() eCTI.append(results2['ellipticity']) e1CTI.append(results2['e1']) e2CTI.append(results2['e2']) R2CTI.append(results2['R2']) xCTI.append(results2['centreX']) yCTI.append(results2['centreY']) #fixed centroid settings['fixedPosition'] = True settings['fixedX'] = results['centreX'] settings['fixedY'] = results['centreY'] settings['iterations'] = 1 sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings) results3 = sh.measureRefinedEllipticity() eCTIfixed.append(results3['ellipticity']) e1CTIfixed.append(results3['e1']) e2CTIfixed.append(results3['e2']) R2CTIfixed.append(results3['R2']) xCTIfixed.append(results3['centreX']) yCTIfixed.append(results3['centreY']) text = '%s,%e,%e,%e,%e,%e,%e\n' % (f, results['ellipticity'] - results2['ellipticity'], results['e1'] - results2['e1'], results['e2'] - results2['e2'], results['R2'] - results2['R2'], results['centreX'] - results2['centreX'], results['centreY'] - results2['centreY']) fh.write(text) print text fh.close() results = {'eclean': np.asarray(eclean), 'e1clean': np.asarray(e1clean), 'e2clean': np.asarray(e2clean), 'R2clean': np.asarray(R2clean), 'xclean': np.asarray(xclean), 'yclean': np.asarray(yclean), 'eCTI': np.asarray(eCTI), 'e1CTI': np.asarray(e1CTI), 'e2CTI': np.asarray(e2CTI), 'R2CTI': np.asarray(R2CTI), 'xCTI': np.asarray(xCTI), 'yCTI': np.asarray(yCTI), 'eCTIfixed': np.asarray(eCTIfixed), 'e1CTIfixed': np.asarray(e1CTIfixed), 'e2CTIfixed': np.asarray(e2CTIfixed), 'R2CTIfixed': np.asarray(R2CTIfixed), 'xCTIfixed': np.asarray(xCTIfixed), 'yCTIfixed': np.asarray(yCTIfixed)} #save to a file fileIO.cPickleDumpDictionary(results, output) return results
def doAperturePhotometry(self, pixel_based=True): """ Perform aperture photometry and calculate the shape of the object based on quadrupole moments. This method also calculates refined centroid for each object. .. Warning:: Results are rather sensitive to the background subtraction, while the errors depend strongly on the noise estimate from the background. Thus, great care should be exercised when applying this method. :param pixel_based: whether to do a global or pixel based background subtraction :type pixel_based: boolean :return: photometry, error_in_photometry, ellipticity, refined_x_pos, refined_y_pos :return: ndarray, ndarray, ndarray, ndarray, ndarray """ if not hasattr(self, 'xcms'): self.getCenterOfMass() #box around the source, make it larger than the aperture to allow recentroiding size = np.ceil(self.settings['aperture'] * 1.5) #area that the aperture covers area = np.pi * self.settings['aperture']**2 #background if pixel_based: bcg = self.background else: bcg = self.background * area photom = [] ell = [] refx = [] refy = [] error = [] for x, y in zip(self.xcms, self.ycms): xint = int(x) yint = int(y) if x - size < 0 or y - size < 0 or x + size > self.origimage.shape[ 1] or y + size > self.origimage.shape[0]: #too close to the edge for aperture photometry photom.append(-999) ell.append(-999) refx.append(-999) refy.append(-999) error.append(-999) else: #cut out a small region and subtract the sky if pixel_based: small = self.origimage[yint - size:yint + size + 1, xint - size:xint + size + 1].copy().astype( np.float64) - bcg else: small = self.origimage[yint - size:yint + size + 1, xint - size:xint + size + 1].copy().astype(np.float64) if self.settings['oversample'] < 1.5: oversampled = small else: sum = np.sum(small) oversampled = interpolation.zoom( small, self.settings['oversample'], order=0) oversampled = oversampled / np.sum(oversampled) * sum #indeces of the oversampled image yind, xind = np.indices(oversampled.shape) #assume that centre is the same as the peak pixel (zero indexed) #ycen1, xcen1 = ndimage.measurements.maximum_position(oversampled) #calculate centre and shape settings = dict(sampling=1. / self.settings['oversample']) sh = shape.shapeMeasurement(oversampled.copy(), self.log, **settings) results = sh.measureRefinedEllipticity() xcen = results['centreX'] - 1. ycen = results['centreY'] - 1. ell.append(results['ellipticity']) #refined x and y positions refx.append(xint - size + (xcen / self.settings['oversample'])) refy.append(yint - size + (ycen / self.settings['oversample'])) #change the peak to be 0, 0 and calculate radius xind -= xcen yind -= ycen rad = np.sqrt(xind**2 + yind**2) #calculate flux in the apertures mask = rad <= (self.settings['oversample'] * self.settings['aperture']) counts = oversampled[np.where(mask)].sum() #global background subtraction if ~pixel_based: counts -= bcg #calculate the error in magnitudes err = 1.0857 * np.sqrt(area * self.background_std**2 + (counts / self.settings['gain'])) / counts #convert to electrons counts *= self.settings['gain'] photom.append(counts) error.append(err) self.photometry = np.asarray(photom) self.ellipticity = np.asarray(ell) self.refx = np.asarray(refx) self.refy = np.asarray(refy) self.error = np.asarray(error) return self.photometry, self.error, self.ellipticity, self.refx, self.refy
def ghostContributionElectrons(log, filename='data/psf1x.fits', magnitude=24.5, zp=25.5, exptime=565., exposures=3, iterations=100, sigma=0.75, centered=False, offset=9, verbose=False): #set sampling etc. for shape measurement settings = dict(itereations=iterations, sigma=sigma, debug=True) #read in PSF data = pf.getdata(filename) #place it a larger canvas with zero padding around canvas = np.pad(data, 100, mode='constant', constant_values=0) #requires numpy >= 1.7.0 ys, xs = canvas.shape xcen = int(np.round(xs / 2., 0)) ycen = int(np.round(ys / 2., 0)) #normalize canvas, scale it to magnitude and save it canvas /= np.max(canvas) intscale = 10.0**(-0.4 * (magnitude - zp)) * exptime * exposures canvas *= intscale fileIO.writeFITS(canvas, 'originalPSF.fits', int=False) #reference values sh = shape.shapeMeasurement(canvas, log, **settings) reference = sh.measureRefinedEllipticity() fileIO.cPickleDumpDictionary(reference, 'ghostStarContribution.pk') if verbose: print 'Reference:' pprint.pprint(reference) #load ghost ghostModel = pf.getdata('data/ghost800nm.fits')[355:423, 70:131] ghostModel /= np.max(ghostModel) #peak is 1 now ys, xs = ghostModel.shape yd = int(np.round(ys / 2., 0)) xd = int(np.round(xs / 2., 0)) fileIO.writeFITS(ghostModel, 'ghostImage.fits', int=False) #ghost levels scales = np.logspace(-4, 2, 21) result = {} for scale in scales: scaled = ghostModel.copy() * scale #fileIO.writeFITS(scaled, 'ghostImage.fits', int=False) tmp = canvas.copy() if centered: tmp[ycen - yd:ycen + yd, xcen - xd:xcen + xd + 1] += scaled else: tmp[ycen - yd + offset:ycen + yd + offset, xcen - xd + offset:xcen + xd + 1 + offset] += scaled #tmp[ycen: ycen + 2*yd, xcen:xcen + 2*xd + 1] += scaled #fileIO.writeFITS(tmp, 'originalPlusGhost.fits', int=False) #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(tmp, log, **settings) results = sh.measureRefinedEllipticity() de1 = results['e1'] - reference['e1'] de2 = results['e2'] - reference['e2'] de = np.sqrt(de1**2 + de2**2) dR2 = (results['R2'] - reference['R2']) / reference['R2'] if verbose: print '\n\nscale=', scale print 'Delta: with ghost - reference' print 'e1', de1 print 'e2', de2 print 'e', de print 'R2', dR2 result[scale] = [ de1, de2, de, dR2, results['e1'], results['e2'], results['ellipticity'], results['R2'] ] return result
def ghostContributionToStar(log, filename='data/psf12x.fits', psfscale=2e5, distance=750, inner=8, outer=60, oversample=12, iterations=20, sigma=0.75, scale=5e-5, fixedPosition=True): #set sampling etc. for shape measurement settings = dict(sampling=1.0 / oversample, itereations=iterations, sigma=sigma, debug=True) #read in PSF data = pf.getdata(filename) #place it a larger canvas with zero padding around canvas = np.pad(data, int(distance*oversample + outer + 1), mode='constant', constant_values=0) #requires numpy >= 1.7.0 ys, xs = canvas.shape xcen = int(np.round(xs / 2., 0)) ycen = int(np.round(ys / 2., 0)) #normalize canvas and save it canvas /= np.max(canvas) canvas *= float(psfscale) fileIO.writeFITS(canvas, 'originalPSF.fits', int=False) #reference values sh = shape.shapeMeasurement(canvas, log, **settings) reference = sh.measureRefinedEllipticity() fileIO.cPickleDumpDictionary(reference, 'ghostStarContribution.pk') print 'Reference:' pprint.pprint(reference) #make out of focus image, a simple doughnut img, xd, yd = drawDoughnut(inner, outer, oversample=oversample) #positions (shift respect to the centring of the star) xc = 0 yc = distance * oversample #indices range xm = xcen + xc ym = ycen + yc #ghost level #scale the doughtnut pixel values, note that all pixels have the same value... img /= np.max(img) scaled = img.copy() * scale * psfscale fileIO.writeFITS(scaled, 'ghostImage.fits', int=False) tmp = canvas.copy() if oversample % 2 == 0: tmp[ym - yd:ym + yd, xm - xd:xm + xd] += scaled else: tmp[ym - yd:ym + yd + 1, xm - xd:xm + xd + 1] += scaled fileIO.writeFITS(tmp, 'originalPlusGhost.fits', int=False) #use fixed positions if fixedPosition: settings['fixedPosition'] = True settings['fixedX'] = reference['centreX'] settings['fixedY'] = reference['centreY'] #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(tmp, log, **settings) results = sh.measureRefinedEllipticity() fileIO.cPickleDumpDictionary(results, 'ghostStarContribution.pk') #save values print '\nWith Doughnut:' pprint.pprint(results) print '\nDelta: with ghost - reference' print 'e1', results['e1'] - reference['e1'] print 'e2', results['e2'] - reference['e2'] print 'e', results['ellipticity'] - reference['ellipticity'] print 'R2', results['R2'] - reference['R2'] print 'Xcen', results['centreX'] - reference['centreX'] print 'Ycen', results['centreY'] - reference['centreY'] return results
def analyseInFocusImpact(log, filename='data/psf4x.fits', psfscale=100000, maxdistance=100, oversample=4.0, psfs=1000, iterations=6, sigma=0.75): """ Calculates PSF size and ellipticity when including another PSF scaled to a given level (requirement = 5e-5) :param log: :param filename: name of the PSF file to analyse :param psfscale: level to which the original PSF is scaled to :param maxdistance: maximum distance the ghost image can be from the original PSF (centre to centre) :param oversample: oversampling factor :param psfs: number of PSFs to analyse (number of ghosts in random locations) :param iterations: number of iterations in the shape measurement :param sigma: size of the Gaussian weighting function :return: results :rtype: dict """ #read in PSF and renormalize it data = pf.getdata(filename) data /= np.max(data) #place it a larger canvas with zero padding around ys, xs = data.shape yd = int(np.round(ys / 2., 0)) xd = int(np.round(xs / 2., 0)) canvas = np.pad(data, xs + maxdistance, mode='constant', constant_values=0) #requires numpy >= 1.7.0 ys, xs = canvas.shape xcen = int(np.round(xs / 2., 0)) ycen = int(np.round(ys / 2., 0)) #print canvas.shape #print canvas.flags canvas /= np.max(canvas) canvas *= float(psfscale) #set sampling etc. for shape measurement settings = dict(sampling=1.0 / oversample, itereations=iterations, sigma=sigma) #positions x = np.round((np.random.rand(psfs) - 0.5) * maxdistance, 0).astype(np.int) y = np.round((np.random.rand(psfs) - 0.5) * maxdistance, 0).astype(np.int) #ghost level ghosts = np.logspace(-7, -4, 10)[::-1] #largest first tot = ghosts.size res = {} for i, scale in enumerate(ghosts): print 'ghost levels: %i / %i' % (i + 1, tot) R2 = [] e1 = [] e2 = [] e = [] scaled = data.copy() * (scale * psfscale) #loop over the ghost positions for xc, yc in zip(x, y): tmp = canvas.copy() xm = xcen + xc ym = ycen + yc try: tmp[ym - yd:ym + yd + 1, xm - xd:xm + xd + 1] += scaled except: try: tmp[ym - yd:ym + yd, xm - xd:xm + xd] += scaled except: print scaled.shape print tmp[ym - yd:ym + yd + 1, xm - xd:xm + xd + 1].shape print 'ERROR -- cannot place the ghost to the image!!' continue #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(tmp, log, **settings) results = sh.measureRefinedEllipticity() #save values e1.append(results['e1']) e2.append(results['e2']) e.append(results['ellipticity']) R2.append(results['R2']) out = dict(e1=np.asarray(e1), e2=np.asarray(e2), e=np.asarray(e), R2=np.asarray(R2)) res[scale] = out return res
def analyseOutofFocusImpact(log, filename='data/psf4x.fits', psfscale=100000, maxdistance=100, inner=8, outer=60, oversample=4.0, psfs=5000, iterations=5, sigma=0.75, lowghost=-7, highghost=-2, samples=9): """ Calculates PSF size and ellipticity when including an out-of-focus doughnut of a given contrast level. The dougnut pixel values are all scaled to a given scaling value (requirement 5e-5). :param log: logger instance :param filename: name of the PSF file to analyse :param psfscale: level to which the original PSF is scaled to :param maxdistance: maximum distance the ghost image can be from the original PSF (centre to centre) :param inner: inner radius of the out-of-focus doughnut :param outer: outer radius of the out-of-focus doughnut :param oversample: oversampling factor :param psfs: number of PSFs to analyse (number of ghosts in random locations) :param iterations: number of iterations in the shape measurement :param sigma: size of the Gaussian weighting function :param lowghost: log of the highest ghost contrast ratio to study :param highghost: log of the lowest ghost contrast ratio to study :param samples: number of points for the contrast ratio to study :return: results :rtype: dict """ #read in PSF and renormalize it data = pf.getdata(filename) #place it a larger canvas with zero padding around ys, xs = data.shape canvas = np.pad(data, int(maxdistance*oversample + xs + outer), mode='constant', constant_values=0) #requires numpy >= 1.7.0 ys, xs = canvas.shape xcen = int(np.round(xs/2., 0)) ycen = int(np.round(ys/2., 0)) #print canvas.shape #print canvas.flags canvas /= np.max(canvas) canvas *= float(psfscale) #make out of focus image, a simple doughnut img, xd, yd = drawDoughnut(inner, outer, oversample=oversample) #set sampling etc. for shape measurement settings = dict(sampling=1.0 / oversample, itereations=iterations, sigma=sigma) #positions x = np.round((np.random.rand(psfs)-0.5)*maxdistance*oversample, 0).astype(np.int) y = np.round((np.random.rand(psfs)-0.5)*maxdistance*oversample, 0).astype(np.int) #ghost level ghosts = np.logspace(lowghost, highghost, samples)[::-1] #largest first tot = ghosts.size res = {} for i, scale in enumerate(ghosts): print'ghost levels: %i / %i' % (i + 1, tot) R2 = [] e1 = [] e2 = [] e = [] #scale the doughtnut pixel values, note that all pixels have the same value... scaled = img.copy() * (scale * psfscale) #loop over the ghost positions for xc, yc in zip(x, y): tmp = canvas.copy() xm = xcen + xc ym = ycen + yc try: tmp[ym-yd:ym+yd+1, xm-xd:xm+xd+1] += scaled except: try: tmp[ym-yd:ym+yd, xm-xd:xm+xd] += scaled except: print scaled.shape print tmp[ym-yd:ym+yd+1, xm-xd:xm+xd+1].shape print 'ERROR -- cannot place the ghost to the image!!' continue #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(tmp, log, **settings) results = sh.measureRefinedEllipticity() #save values e1.append(results['e1']) e2.append(results['e2']) e.append(results['ellipticity']) R2.append(results['R2']) out = dict(e1=np.asarray(e1), e2=np.asarray(e2), e=np.asarray(e), R2=np.asarray(R2)) res[scale] = out return res
def analyseSpotsFitting(files, gaussian=False, pixelvalues=False, bessel=True, maxfev=10000): """ Analyse spot measurements using different fitting methods. :param files: names of the FITS files to analyse (should match the IDs) :param gaussian: whether or not to do a simple Gaussian fitting analysis :param pixelvalues: whether or not to plot pixel values on a grid :param bessel: whether or not to do a Bessel + Gaussian convolution analysis :param maxfev: maximum number of iterations in the least squares fitting :return: None """ log = lg.setUpLogger('spots.log') log.info('Starting...') over = 24 settings = dict(itereations=8) ids = fileIDs() d = {} for filename in files: tmp = readData(filename, crop=False) f = filename.replace('small.fits', '') d[f] = tmp if pixelvalues: #plot differrent pixel values plotPixelValues(d, ids) if gaussian: #fit simple Gaussians Gaussians = {} for f, im in d.iteritems(): #horizontal direction sumH = np.sum(im, axis=0) Hfit = gaussianFit(sumH, initials=[np.max(sumH) - np.median(sumH), 8., 0.4, np.median(sumH)]) plotLineFits(sumH, Hfit, f) #vertical direction sumV = np.sum(im, axis=1) Vfit = gaussianFit(sumV, initials=[np.max(sumV) - np.median(sumV), 8., 0.4, np.median(sumV)]) plotLineFits(sumH, Hfit, f, horizontal=False) #2D gaussian tmp = im.copy() - np.median(im) twoD = fit.Gaussian2D(tmp, intials=[np.max(tmp), 7, 7, 0.4, 0.4]) print f, Hfit['sigma'], twoD[4], Vfit['sigma'], twoD[3], int(np.max(im)) Gaussians[f] = [Hfit['sigma'], twoD[4], Vfit['sigma'], twoD[3]] fileIO.cPickleDumpDictionary(Gaussians, 'SpotmeasurementsGaussian.pk') plotGaussianResults(Gaussians, ids, output='line') plotGaussianResults(Gaussians, ids, output='twoD', vals=[1, 3]) if bessel: Gaussians = {} #Bessel + Gaussian hf = 8 * over for f, im in d.iteritems(): #if '21_59_31s' not in f: # continue #over sample the data, needed for convolution oversampled = ndimage.zoom(im.copy(), over, order=0) fileIO.writeFITS(oversampled, f+'block.fits', int=False) #find the centre in oversampled frame, needed for bessel and gives a starting point for fitting tmp = oversampled.copy() - np.median(oversampled) sh = shape.shapeMeasurement(tmp, log, **settings) results = sh.measureRefinedEllipticity() midx = results['centreX'] - 1. midy = results['centreY'] - 1. #generate 2D bessel and re-centre using the above centroid, normalize to the maximum image value and #save to a FITS file. bes = generateBessel(radius=0.45, oversample=over, size=16*over) shiftx = -midx + hf shifty = -midy + hf bes = ndimage.interpolation.shift(bes, [-shifty, -shiftx], order=0) bes /= np.max(bes) fileIO.writeFITS(bes, f+'bessel.fits', int=False) #check the residual with only the bessel and save to a FITS file t = ndimage.zoom(bes.copy(), 1./over, order=0) t /= np.max(t) fileIO.writeFITS(im.copy() - np.median(oversampled) - t*np.max(tmp), f+'residual.fits', int=False) fileIO.writeFITS(oversampled - bes.copy()*np.max(tmp), f+'residualOversampled.fits', int=False) #best guesses for fitting parameters params = [1., results['centreX'], results['centreY'], 0.5, 0.5] biassubtracted = im.copy() - np.median(oversampled) #error function is a convolution between a bessel function and 2D gaussian - data #note that the error function must be on low-res grid because it is the pixel values we try to match errfunc = lambda p: np.ravel(ndimage.zoom(signal.fftconvolve(fitf(*p)(*np.indices(tmp.shape)), bes.copy(), mode='same'), 1./over, order=0)*np.max(tmp) - biassubtracted.copy()) #fit res = sp.optimize.leastsq(errfunc, params, full_output=True, maxfev=maxfev) #save the fitted residuals t = signal.fftconvolve(fitf(*res[0])(*np.indices(tmp.shape)), bes.copy(), mode='same') fileIO.writeFITS(res[2]['fvec'].reshape(im.shape), f+'residualFit.fits', int=False) fileIO.writeFITS(fitf(*res[0])(*np.indices(tmp.shape)), f+'gaussian.fits', int=False) fileIO.writeFITS(t, f+'BesselGausOversampled.fits', int=False) fileIO.writeFITS(ndimage.zoom(t, 1./over, order=0), f+'BesselGaus.fits', int=False) #print out the results and save to a dictionary print results['centreX'], results['centreY'], res[2]['nfev'], res[0] #sigmas are symmetric as the width of the fitting function is later squared... sigma1 = np.abs(res[0][3]) sigma2 = np.abs(res[0][4]) Gaussians[f] = [sigma1, sigma2] fileIO.cPickleDumpDictionary(Gaussians, 'SpotmeasurementsBesselGaussian.pk') #plot the findings plotGaussianResults(Gaussians, ids, output='Bessel', vals=[0, 1])
def shapeMovie( filename='/Users/sammy/EUCLID/CTItesting/Reconciliation/damaged_image_parallel.fits', sigma=0.75, scale=False, zoom=30, frames=20, subtractMedian=False): settings = dict(sigma=sigma, iterations=1) #settings = dict(sigma=sigma, iterations=1, fixedPosition=True, fixedX=85.0, fixedY=85.) l = lg.setUpLogger('CTItesting.log') data = pf.getdata(filename) if scale: data /= np.max(data) data *= 1.e5 if subtractMedian: data -= np.median(data) data = data[78:90, 78:90] #also limit the area sh = shape.shapeMeasurement(data, l, **settings) results = sh.measureRefinedEllipticity() ang = 0.5 * np.arctan(results['e2'] / results['e1']) fig, axarr = plt.subplots(1, 2, sharey=True) ax1 = axarr[0] ax2 = axarr[1] fig.subplots_adjust(wspace=0) ax1.set_title(r'Image w/ CTI') ax2.set_title(r'Gaussian Weighted') #no ticks on the right hand side plot plt.setp(ax2.get_yticklabels(), visible=False) ax1.imshow(data, origin='lower') ax2.imshow(results['GaussianWeighted'], origin='lower') if zoom is not None: ax1.set_xlim(results['centreX'] - zoom - 1, results['centreX'] + zoom) ax2.set_xlim(results['centreX'] - zoom - 1, results['centreX'] + zoom) ax1.set_ylim(results['centreY'] - zoom - 1, results['centreY'] + zoom) ax2.set_ylim(results['centreY'] - zoom - 1, results['centreY'] + zoom) text = ax2.text(0.02, 0.95, '', transform=ax2.transAxes, color='white') e = Ellipse(xy=(results['centreX'] - 1, results['centreY'] - 1), width=results['a'], height=results['b'], angle=ang, facecolor='none', ec='white', lw=2) def init(): # initialization function: plot the background of each frame ax2.imshow([[], []]) fig.gca().add_artist(e) text.set_text(' ') return ax2, text, e def animate(i): settings = dict(sigma=sigma, iterations=i + 1) #settings = dict(sigma=sigma, iterations=i+1, fixedPosition=True, fixedX=85.0, fixedY=85.) sh = shape.shapeMeasurement(data, l, **settings) results = sh.measureRefinedEllipticity() text.set_text(r'%i iterations, $e \sim %.4f$' % (i + 1, results['ellipticity'])) ax2.imshow(results['GaussianWeighted'], origin='lower') ang = 0.5 * np.arctan(results['e2'] / results['e1']) e.center = (results['centreX'] - 1, results['centreY'] - 1) e.width = results['a'] e.height = results['b'] e.angle = ang return ax2, text, e #note that the frames defines the number of times animate functions is being called anim = animation.FuncAnimation(fig, animate, init_func=init, frames=frames, interval=2, blit=True) anim.save('shapeMovie.mp4', fps=0.7)
def testNoFlatfieldingEffects(log, file='data/psf1x.fits', oversample=1.0, psfs=500): """ Calculate ellipticity and size variance and error in case of no pixel-to-pixel flat field correction. """ #read in PSF and renormalize it data = pf.getdata(file) data /= np.max(data) data *= 1e5 #derive reference values settings = dict(sampling=1.0/oversample) sh = shape.shapeMeasurement(data.copy(), log, **settings) reference = sh.measureRefinedEllipticity() print reference #residual residual = pf.getdata('data/VISFlatField2percent.fits') #'data/VISFlatField1percent.fits' if oversample == 4.0: residual = zoom(zoom(residual, 2, order=0), 2, order=0) elif oversample == 1.0: pass else: print 'ERROR--cannot do arbitrary oversampling...' #random positions for the PSFs, these positions are the lower corners xpositions = np.random.random_integers(0, residual.shape[1] - data.shape[1], psfs) ypositions = np.random.random_integers(0, residual.shape[0] - data.shape[0], psfs) #data storage out = {} de1 = [] de2 = [] de = [] R2 = [] dR2 = [] e1 = [] e2 = [] e = [] rnd = 1 tot = xpositions.size #loop over the PSFs for xpos, ypos in zip(xpositions, ypositions): print'%i / %i' % (rnd, tot) rnd += 1 #make a copy of the PSF tmp = data.copy() #get the underlying residual surface ond multiple the PSF with the surface small = residual[ypos:ypos+data.shape[0], xpos:xpos+data.shape[1]].copy() small *= tmp #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(small.copy(), log, **settings) results = sh.measureRefinedEllipticity() #save values e1.append(results['e1']) e2.append(results['e2']) e.append(results['ellipticity']) R2.append(results['R2']) de1.append(results['e1'] - reference['e1']) de2.append(results['e2'] - reference['e2']) de.append(results['ellipticity'] - reference['ellipticity']) dR2.append(results['R2'] - reference['R2']) out[1] = [e1, e2, e, R2, de1, de2, de, dR2] return out, reference
def testNonlinearityModelTransfer(log, file='data/psf12x.fits', oversample=12.0, sigma=0.75, psfs=5000, amps=12, multiplier=1.5, minerror=-5., maxerror=-1., lowflux=500, highflux=180000, linspace=False): """ Function to study the error in the non-linearity correction on the knowledge of the PSF ellipticity and size. The error has been assumed to follow a sinusoidal curve with random phase and a given number of angular frequencies (defined by the multiplier). The amplitudes being studied, i.e. the size of the maximum deviation, can be spaced either linearly or logarithmically. :param log: logger instance :type log: instance :param file: name of the PSF FITS files to use [default=data/psf12x.fits] :type file: str :param oversample: the PSF oversampling factor, which needs to match the input file [default=12] :type ovesample: float :param sigma: 1sigma radius of the Gaussian weighting function for shape measurements :type sigma: float :param phs: phase in case phases = None :type phs: float :param phases: if None then a single fixed phase will be applied, if an int then a given number of random phases will be used :type phases: None or int :param psfs: the number of PSFs to use. :type psfs: int :param amps: the number of individual samplings used when covering the error space :type amps: int :param multiplier: the number of angular frequencies to be used :type multiplier: int or float :param minerror: the minimum error to be covered, given in log10(min_error) [default=-5 i.e. 0.001%] :type minerror: float :param maxerror: the maximum error to be covered, given in log10(max_error) [default=-1 i.e. 10%] :type maxerror: float :param linspace: whether the amplitudes of the error curves should be linearly or logarithmically spaced. :type linspace: boolean :return: reference value and results dictionaries :rtype: list """ #read in PSF and renormalize it to norm data = pf.getdata(file) data /= np.max(data) #derive reference values from clean PSF settings = dict(sampling=1.0 / oversample, sigma=sigma) sh = shape.shapeMeasurement(data.copy() * 175000., log, **settings) reference = sh.measureRefinedEllipticity() #range of amplitude to study if linspace: amplitudes = np.linspace( 10**minerror, 1**maxerror, amps)[::-1] #flip so that the largest is first else: amplitudes = np.logspace(minerror, maxerror, amps)[::-1] out = {} #loop over all the amplitudes to be studied for i, amp in enumerate(amplitudes): print 'Run %i / %i with amplitude=%e' % (i + 1, amps, amp) R2 = [] e1 = [] e2 = [] e = [] #random phases to Monte Carlo ph = np.random.random(psfs) for phase in ph: #apply nonlinearity model to the scaled PSF scaled = data.copy() * highflux newdata = VISinstrumentModel.CCDnonLinearityModelSinusoidal( scaled, amp, phase=phase, multi=multiplier) #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(newdata, log, **settings) resultsH = sh.measureRefinedEllipticity() #apply nonlinearity model to the scaled PSF scaled = data.copy() * lowflux newdata = VISinstrumentModel.CCDnonLinearityModelSinusoidal( scaled, amp, phase=phase, multi=multiplier) #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(newdata, log, **settings) resultsL = sh.measureRefinedEllipticity() #save values e1.append(resultsH['e1'] - resultsL['e1']) e2.append(resultsH['e2'] - resultsL['e2']) e.append(resultsH['ellipticity'] - resultsL['ellipticity']) R2.append(resultsH['R2'] - resultsL['R2']) out[amp] = [e1, e2, e, R2] return reference, out
#scipy centre-of-mass #midy, midx = scipy.ndimage.measurements.center_of_mass(data) #peak location, take a small cutout around this region #midy, midx = scipy.ndimage.measurements.maximum_position(data) #data = data[midy-1000:midy+1001, midx-1000:midx+1001] #data centre pixels ceny, cenx = data.shape ceny /= 2 cenx /= 2 #second order moments centroid finding settings = dict(sampling=1./12., iterations=9, sigma=0.09) res = shape.shapeMeasurement(data, log, **settings).measureRefinedEllipticity() midx = res['centreX'] - 1 midy = res['centreY'] - 1 #interpolate to new location #note however that sinc interpolation should probably be used instead of spline... shiftx = -midx + cenx shifty = -midy + ceny cutout = interpolation.shift(data, [shifty, shiftx], order=3) #take a cutout to match size my, mx = cutout.shape mx /= 2 my /= 2 cutout = cutout[my - cut:my + cut, mx - cut:mx + cut]
def testCentroidingImpactSingleDirection(log, psf='/Users/sammy/EUCLID/vissim-python/data/psf12x.fits', canvas=16, ran=13, zoom=12, iterations=50, sigma=0.75, gaussian=False, interpolation=False, save=True): """ :return: """ settings = dict(sampling=zoom / 12.0, itereations=iterations, sigma=sigma) if gaussian: data = Gaussian2D(256., 256., 25, 25)['Gaussian'] data *= 1e5 else: data = pf.getdata(psf) xres = [] print 'X shifts' for x in range(ran): tmp = data.copy()[canvas:-canvas+1, canvas-x:-canvas-x+1] if interpolation: if gaussian: psf = frebin(tmp, 40, nlout=40) else: size = tmp.shape[0] / 12 psf = frebin(tmp, size, nlout=size, total=True) else: print tmp.shape psf = ndimage.zoom(tmp, 1.0/zoom, order=0) if save: out = 'PSFx%i' % x if gaussian: out += 'Gaussian' if interpolation: out += 'Interpolated' out += '.fits' fileIO.writeFITS(psf, out, int=False) sh = shape.shapeMeasurement(psf, log, **settings) results = sh.measureRefinedEllipticity() xres.append([x, results]) print x, psf.shape, np.sum(psf), np.max(psf), results['e1'], results['e2'], results['ellipticity'] yres = [] print 'Y shifts' for y in range(ran): tmp = data.copy()[canvas-y:-canvas-y+1, canvas:-canvas+1] if interpolation: if gaussian: psf = frebin(tmp, 40, nlout=40) else: size = tmp.shape[0] / 12 psf = frebin(tmp, size, nlout=size, total=True) else: psf = ndimage.zoom(tmp, 1.0/zoom, order=0) sh = shape.shapeMeasurement(psf, log, **settings) results = sh.measureRefinedEllipticity() yres.append([y, results]) print y, psf.shape, np.sum(psf), np.max(psf), results['e1'], results['e2'], results['ellipticity'] return xres, yres
def doAperturePhotometry(self, pixel_based=True): """ Perform aperture photometry and calculate the shape of the object based on quadrupole moments. This method also calculates refined centroid for each object. .. Warning:: Results are rather sensitive to the background subtraction, while the errors depend strongly on the noise estimate from the background. Thus, great care should be exercised when applying this method. :param pixel_based: whether to do a global or pixel based background subtraction :type pixel_based: boolean :return: photometry, error_in_photometry, ellipticity, refined_x_pos, refined_y_pos :return: ndarray, ndarray, ndarray, ndarray, ndarray """ if not hasattr(self, 'xcms'): self.getCenterOfMass() #box around the source, make it larger than the aperture to allow recentroiding size = np.ceil(self.settings['aperture']*1.5) #area that the aperture covers area = np.pi * self.settings['aperture']**2 #background if pixel_based: bcg = self.background else: bcg = self.background * area photom = [] ell = [] refx = [] refy = [] error = [] for x, y in zip(self.xcms, self.ycms): xint = int(x) yint = int(y) if x-size < 0 or y-size < 0 or x+size > self.origimage.shape[1] or y+size > self.origimage.shape[0]: #too close to the edge for aperture photometry photom.append(-999) ell.append(-999) refx.append(-999) refy.append(-999) error.append(-999) else: #cut out a small region and subtract the sky if pixel_based: small = self.origimage[yint-size:yint+size+1, xint-size:xint+size+1].copy().astype(np.float64) - bcg else: small = self.origimage[yint-size:yint+size+1, xint-size:xint+size+1].copy().astype(np.float64) if self.settings['oversample'] < 1.5: oversampled = small else: sum = np.sum(small) oversampled = interpolation.zoom(small, self.settings['oversample'], order=0) oversampled = oversampled / np.sum(oversampled) * sum #indeces of the oversampled image yind, xind = np.indices(oversampled.shape) #assume that centre is the same as the peak pixel (zero indexed) #ycen1, xcen1 = ndimage.measurements.maximum_position(oversampled) #calculate centre and shape settings = dict(sampling=1./self.settings['oversample']) sh = shape.shapeMeasurement(oversampled.copy(), self.log, **settings) results = sh.measureRefinedEllipticity() xcen = results['centreX'] - 1. ycen = results['centreY'] - 1. ell.append(results['ellipticity']) #refined x and y positions refx.append(xint - size + (xcen / self.settings['oversample'])) refy.append(yint - size + (ycen / self.settings['oversample'])) #change the peak to be 0, 0 and calculate radius xind -= xcen yind -= ycen rad = np.sqrt(xind**2 + yind**2) #calculate flux in the apertures mask = rad <= (self.settings['oversample'] * self.settings['aperture']) counts = oversampled[np.where(mask)].sum() #global background subtraction if ~pixel_based: counts -= bcg #calculate the error in magnitudes err = 1.0857 * np.sqrt(area * self.background_std**2 + (counts / self.settings['gain'])) / counts #convert to electrons counts *= self.settings['gain'] photom.append(counts) error.append(err) self.photometry = np.asarray(photom) self.ellipticity = np.asarray(ell) self.refx = np.asarray(refx) self.refy = np.asarray(refy) self.error = np.asarray(error) return self.photometry, self.error, self.ellipticity, self.refx, self.refy
def testNonlinearityModelTransfer(log, file='data/psf12x.fits', oversample=12.0, sigma=0.75, psfs=5000, amps=12, multiplier=1.5, minerror=-5., maxerror=-1., lowflux=500, highflux=180000, linspace=False): """ Function to study the error in the non-linearity correction on the knowledge of the PSF ellipticity and size. The error has been assumed to follow a sinusoidal curve with random phase and a given number of angular frequencies (defined by the multiplier). The amplitudes being studied, i.e. the size of the maximum deviation, can be spaced either linearly or logarithmically. :param log: logger instance :type log: instance :param file: name of the PSF FITS files to use [default=data/psf12x.fits] :type file: str :param oversample: the PSF oversampling factor, which needs to match the input file [default=12] :type ovesample: float :param sigma: 1sigma radius of the Gaussian weighting function for shape measurements :type sigma: float :param phs: phase in case phases = None :type phs: float :param phases: if None then a single fixed phase will be applied, if an int then a given number of random phases will be used :type phases: None or int :param psfs: the number of PSFs to use. :type psfs: int :param amps: the number of individual samplings used when covering the error space :type amps: int :param multiplier: the number of angular frequencies to be used :type multiplier: int or float :param minerror: the minimum error to be covered, given in log10(min_error) [default=-5 i.e. 0.001%] :type minerror: float :param maxerror: the maximum error to be covered, given in log10(max_error) [default=-1 i.e. 10%] :type maxerror: float :param linspace: whether the amplitudes of the error curves should be linearly or logarithmically spaced. :type linspace: boolean :return: reference value and results dictionaries :rtype: list """ #read in PSF and renormalize it to norm data = pf.getdata(file) data /= np.max(data) #derive reference values from clean PSF settings = dict(sampling=1.0/oversample, sigma=sigma) sh = shape.shapeMeasurement(data.copy()*175000., log, **settings) reference = sh.measureRefinedEllipticity() #range of amplitude to study if linspace: amplitudes = np.linspace(10**minerror, 1**maxerror, amps)[::-1] #flip so that the largest is first else: amplitudes = np.logspace(minerror, maxerror, amps)[::-1] out = {} #loop over all the amplitudes to be studied for i, amp in enumerate(amplitudes): print'Run %i / %i with amplitude=%e' % (i+1, amps, amp) R2 = [] e1 = [] e2 = [] e = [] #random phases to Monte Carlo ph = np.random.random(psfs) for phase in ph: #apply nonlinearity model to the scaled PSF scaled = data.copy() * highflux newdata = VISinstrumentModel.CCDnonLinearityModelSinusoidal(scaled, amp, phase=phase, multi=multiplier) #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(newdata, log, **settings) resultsH = sh.measureRefinedEllipticity() #apply nonlinearity model to the scaled PSF scaled = data.copy() * lowflux newdata = VISinstrumentModel.CCDnonLinearityModelSinusoidal(scaled, amp, phase=phase, multi=multiplier) #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(newdata, log, **settings) resultsL = sh.measureRefinedEllipticity() #save values e1.append(resultsH['e1'] - resultsL['e1']) e2.append(resultsH['e2'] - resultsL['e2']) e.append(resultsH['ellipticity'] - resultsL['ellipticity']) R2.append(resultsH['R2'] - resultsL['R2']) out[amp] = [e1, e2, e, R2] return reference, out
def analyseOutofFocusImpact(log, filename='data/psf4x.fits', psfscale=100000, maxdistance=100, inner=8, outer=60, oversample=4.0, psfs=5000, iterations=5, sigma=0.75, lowghost=-7, highghost=-2, samples=9): """ Calculates PSF size and ellipticity when including an out-of-focus doughnut of a given contrast level. The dougnut pixel values are all scaled to a given scaling value (requirement 5e-5). :param log: logger instance :param filename: name of the PSF file to analyse :param psfscale: level to which the original PSF is scaled to :param maxdistance: maximum distance the ghost image can be from the original PSF (centre to centre) :param inner: inner radius of the out-of-focus doughnut :param outer: outer radius of the out-of-focus doughnut :param oversample: oversampling factor :param psfs: number of PSFs to analyse (number of ghosts in random locations) :param iterations: number of iterations in the shape measurement :param sigma: size of the Gaussian weighting function :param lowghost: log of the highest ghost contrast ratio to study :param highghost: log of the lowest ghost contrast ratio to study :param samples: number of points for the contrast ratio to study :return: results :rtype: dict """ #read in PSF and renormalize it data = pf.getdata(filename) #place it a larger canvas with zero padding around ys, xs = data.shape canvas = np.pad(data, int(maxdistance * oversample + xs + outer), mode='constant', constant_values=0) #requires numpy >= 1.7.0 ys, xs = canvas.shape xcen = int(np.round(xs / 2., 0)) ycen = int(np.round(ys / 2., 0)) #print canvas.shape #print canvas.flags canvas /= np.max(canvas) canvas *= float(psfscale) #make out of focus image, a simple doughnut img, xd, yd = drawDoughnut(inner, outer, oversample=oversample) #set sampling etc. for shape measurement settings = dict(sampling=1.0 / oversample, itereations=iterations, sigma=sigma) #positions x = np.round((np.random.rand(psfs) - 0.5) * maxdistance * oversample, 0).astype(np.int) y = np.round((np.random.rand(psfs) - 0.5) * maxdistance * oversample, 0).astype(np.int) #ghost level ghosts = np.logspace(lowghost, highghost, samples)[::-1] #largest first tot = ghosts.size res = {} for i, scale in enumerate(ghosts): print 'ghost levels: %i / %i' % (i + 1, tot) R2 = [] e1 = [] e2 = [] e = [] #scale the doughtnut pixel values, note that all pixels have the same value... scaled = img.copy() * (scale * psfscale) #loop over the ghost positions for xc, yc in zip(x, y): tmp = canvas.copy() xm = xcen + xc ym = ycen + yc try: tmp[ym - yd:ym + yd + 1, xm - xd:xm + xd + 1] += scaled except: try: tmp[ym - yd:ym + yd, xm - xd:xm + xd] += scaled except: print scaled.shape print tmp[ym - yd:ym + yd + 1, xm - xd:xm + xd + 1].shape print 'ERROR -- cannot place the ghost to the image!!' continue #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(tmp, log, **settings) results = sh.measureRefinedEllipticity() #save values e1.append(results['e1']) e2.append(results['e2']) e.append(results['ellipticity']) R2.append(results['R2']) out = dict(e1=np.asarray(e1), e2=np.asarray(e2), e=np.asarray(e), R2=np.asarray(R2)) res[scale] = out return res
def testCosmicrayRejection(log, file='data/psf1x.fits', oversample=1.0, sigma=0.75, psfs=20000, scale=1e3, min=1e-5, max=50, levels=15, covering=1.4, single=False): """ This is for a single PSF. :param log: :param file: :param oversample: :param sigma: :param psfs: :param scale: :param min: :param max: :param levels: :param covering: :param single: :return: """ data = pf.getdata(file) data /= np.max(data) #derive reference values from clean PSF settings = dict(sampling=1.0 / oversample, sigma=sigma, iterations=6) scaled = data.copy() * scale sh = shape.shapeMeasurement(scaled.copy(), log, **settings) reference = sh.measureRefinedEllipticity() cosmics = cosmicrays.cosmicrays(log, np.zeros((2, 2))) crInfo = cosmics._readCosmicrayInformation() out = {} #loop over all the amplitudes to be studied for level in np.logspace(np.log10(min), np.log10(max), levels): print 'Deposited Energy of Cosmic Rays: %e electrons' % level de1 = [] de2 = [] de = [] R2 = [] dR2 = [] e1 = [] e2 = [] e = [] for i in range(psfs): print 'Run %i / %i' % (i + 1, psfs) #add cosmic rays to the scaled image cosmics = cosmicrays.cosmicrays(log, scaled.copy(), crInfo=crInfo) #newdata = cosmics.addCosmicRays(limit=level) if single: newdata = cosmics.addSingleEvent(limit=level) else: newdata = cosmics.addUpToFraction(covering, limit=level) #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(newdata.copy(), log, **settings) results = sh.measureRefinedEllipticity() #save values e1.append(results['e1']) e2.append(results['e2']) e.append(results['ellipticity']) R2.append(results['R2']) de1.append(results['e1'] - reference['e1']) de2.append(results['e2'] - reference['e2']) de.append(results['ellipticity'] - reference['ellipticity']) dR2.append(results['R2'] - reference['R2']) out[level] = [e1, e2, e, R2, de1, de2, de, dR2] return reference, out
def testCosmicrayRejection( log, file="data/psf1x.fits", oversample=1.0, sigma=0.75, psfs=20000, scale=1e3, min=1e-5, max=50, levels=15, covering=1.4, single=False, ): """ This is for a single PSF. :param log: :param file: :param oversample: :param sigma: :param psfs: :param scale: :param min: :param max: :param levels: :param covering: :param single: :return: """ data = pf.getdata(file) data /= np.max(data) # derive reference values from clean PSF settings = dict(sampling=1.0 / oversample, sigma=sigma, iterations=6) scaled = data.copy() * scale sh = shape.shapeMeasurement(scaled.copy(), log, **settings) reference = sh.measureRefinedEllipticity() cosmics = cosmicrays.cosmicrays(log, np.zeros((2, 2))) crInfo = cosmics._readCosmicrayInformation() out = {} # loop over all the amplitudes to be studied for level in np.logspace(np.log10(min), np.log10(max), levels): print "Deposited Energy of Cosmic Rays: %e electrons" % level de1 = [] de2 = [] de = [] R2 = [] dR2 = [] e1 = [] e2 = [] e = [] for i in range(psfs): print "Run %i / %i" % (i + 1, psfs) # add cosmic rays to the scaled image cosmics = cosmicrays.cosmicrays(log, scaled.copy(), crInfo=crInfo) # newdata = cosmics.addCosmicRays(limit=level) if single: newdata = cosmics.addSingleEvent(limit=level) else: newdata = cosmics.addUpToFraction(covering, limit=level) # measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(newdata.copy(), log, **settings) results = sh.measureRefinedEllipticity() # save values e1.append(results["e1"]) e2.append(results["e2"]) e.append(results["ellipticity"]) R2.append(results["R2"]) de1.append(results["e1"] - reference["e1"]) de2.append(results["e2"] - reference["e2"]) de.append(results["ellipticity"] - reference["ellipticity"]) dR2.append(results["R2"] - reference["R2"]) out[level] = [e1, e2, e, R2, de1, de2, de, dR2] return reference, out