def _testAverageVersusCopy(self, withNaNs=False): """Re-run `testExampleTaskNoOverlaps` and `testExampleTaskWithOverlaps` on a more complex image (with random noise). Ensure that the results are identical (within between 'copy' and 'average' reduceOperation. """ exposure1 = self.exposure.clone() img = exposure1.getMaskedImage().getImage() afwMath.randomGaussianImage(img, afwMath.Random()) exposure2 = exposure1.clone() config = AddAmountImageMapReduceConfig() task = ImageMapReduceTask(config) config.mapper.addAmount = 5. newExp = task.run(exposure1, addNans=withNaNs).exposure newMI1 = newExp.getMaskedImage() config.gridStepX = config.gridStepY = 8. config.reducer.reduceOperation = 'average' task = ImageMapReduceTask(config) newExp = task.run(exposure2, addNans=withNaNs).exposure newMI2 = newExp.getMaskedImage() newMA1 = newMI1.getImage().getArray() isnan = np.isnan(newMA1) if not withNaNs: self.assertEqual(np.sum(isnan), 0) newMA2 = newMI2.getImage().getArray() # Because the average uses a float accumulator, we can have differences, set a tolerance. # Turns out (in practice for this test), only 7 pixels seem to have a small difference. self.assertFloatsAlmostEqual(newMA1[~isnan], newMA2[~isnan], rtol=1e-7)
def addNoise(self, mi): img = mi.getImage() seed = int(afwMath.makeStatistics(mi.getVariance(), afwMath.MEDIAN).getValue()) rdm = afwMath.Random(afwMath.Random.MT19937, seed) rdmImage = img.Factory(img.getDimensions()) afwMath.randomGaussianImage(rdmImage, rdm) img += rdmImage return afwMath.makeStatistics(rdmImage, afwMath.MEAN).getValue(afwMath.MEAN)
def makeFlatNoiseImage(mi, seedStat=afwMath.MAX): img = mi.getImage() seed = int(10. * afwMath.makeStatistics(mi.getImage(), seedStat).getValue() + 1) rdm = afwMath.Random(afwMath.Random.MT19937, seed) rdmImage = img.Factory(img.getDimensions()) afwMath.randomGaussianImage(rdmImage, rdm) return rdmImage
def testChipGapHorizontalBackground(self): """ Test able to match image with horizontal chip gap (row of nans) with .Background""" self.matcher.config.usePolynomial = False self.matcher.config.binSize = 64 chipGapHorizontal = afwImage.ExposureF(600, 600) im = chipGapHorizontal.getMaskedImage().getImage() afwMath.randomGaussianImage(im, afwMath.Random()) im += 10 im.getArray()[200:300, :] = np.nan # simulate 100pix chip gap horizontal chipGapHorizontal.getMaskedImage().getVariance().set(1.0) self.checkAccuracy(self.vanilla, chipGapHorizontal)
def addNoise(mi): sfac = 1.0 img = mi.getImage() rdmImage = img.Factory(img.getDimensions()) afwMath.randomGaussianImage(rdmImage, rdm) rdmImage *= sfac img += rdmImage # and don't forget to add to the variance var = mi.getVariance() var += sfac
def addNoise(mi): img = mi.getImage() seed = int(afwMath.makeStatistics(mi.getVariance(), afwMath.MAX).getValue())+1 rdm = afwMath.Random(afwMath.Random.MT19937, seed) rdmImage = img.Factory(img.getDimensions()) afwMath.randomGaussianImage(rdmImage, rdm) rdmImage *= num.sqrt(seed) img += rdmImage # and don't forget to add to the variance var = mi.getVariance() var += 1.0
def testRampApproximate(self): """Test basic matching of a linear gradient with Approximate.""" self.matcher.config.binSize = 64 testExp = afwImage.ExposureF(self.vanilla, True) testIm = testExp.getMaskedImage().getImage() afwMath.randomGaussianImage(testIm, afwMath.Random()) nx, ny = testExp.getDimensions() dzdx, dzdy, z0 = 1, 2, 0.0 for x in range(nx): for y in range(ny): z = testIm.get(x, y) testIm.set(x, y, z + dzdx * x + dzdy * y + z0) self.checkAccuracy(testExp, self.vanilla)
def testRampBackground(self): """Test basic matching of a linear gradient with .Background.""" self.matcher.config.usePolynomial = False self.matcher.config.binSize = 64 testExp = afwImage.ExposureF(self.vanilla, True) testIm = testExp.getMaskedImage().getImage() afwMath.randomGaussianImage(testIm, afwMath.Random()) nx, ny = testExp.getDimensions() dzdx, dzdy, z0 = 1, 2, 0.0 for x in range(nx): for y in range(ny): z = testIm[x, y, afwImage.LOCAL] testIm[x, y, afwImage.LOCAL] = z + dzdx * x + dzdy * y + z0 self.checkAccuracy(testExp, self.vanilla)
def makeImage(width=500, height=1000): mi = afwImage.MaskedImageF(width, height) var = 50 mi.set(1000, 0x0, var) addSaturated(mi, addCrosstalk=True) ralg, rseed = "MT19937", int(time.time()) if True else 1234 noise = afwImage.ImageF(width, height) afwMath.randomGaussianImage(noise, afwMath.Random(ralg, rseed)) noise *= math.sqrt(var) mi += noise return mi
def test1(self): task = measAlg.ReplaceWithNoiseTask() schema = afwTable.SourceTable.makeMinimalSchema() table = afwTable.SourceTable.make(schema) sources = afwTable.SourceCatalog(table) im = afwImage.ImageF(200, 50) seed = 42 rand = afwMath.Random(afwMath.Random.MT19937, seed) afwMath.randomGaussianImage(im, rand) s = sources.addNew() s.setId(1) fp = afwDet.Footprint() y,x0,x1 = (10, 10, 190) im.getArray()[y, x0:x1] = 10 fp.addSpan(y, x0, x1) s.setFootprint(fp) s = sources.addNew() s.setId(2) fp = afwDet.Footprint() y,x0,x1 = (40, 10, 190) im.getArray()[y, x0:x1] = 10 fp.addSpan(y, x0, x1) s.setFootprint(fp) mi = afwImage.MaskedImageF(im) exposure = afwImage.makeExposure(mi) self._save(mi, 'a') task.begin(exposure, sources) self._save(mi, 'b') sourcei = 0 task.insertSource(exposure, sourcei) self._save(mi, 'c') # do something task.removeSource(exposure, sources, sources[sourcei]) self._save(mi, 'd') sourcei = 1 task.insertSource(exposure, sourcei) self._save(mi, 'e') # do something task.removeSource(exposure, sources, sources[sourcei]) self._save(mi, 'f') task.end(exposure, sources) self._save(mi, 'g')
def setUp(self): self.min, self.range, self.Q = 0, 5, 20 # asinh width, height = 85, 75 self.images = [] self.images.append(afwImage.ImageF(lsst.geom.ExtentI(width, height))) self.images.append(afwImage.ImageF(lsst.geom.ExtentI(width, height))) self.images.append(afwImage.ImageF(lsst.geom.ExtentI(width, height))) for (x, y, A, g_r, r_i) in [ (15, 15, 1000, 1.0, 2.0), (50, 45, 5500, -1.0, -0.5), (30, 30, 600, 1.0, 2.5), (45, 15, 20000, 1.0, 1.0), ]: for i in range(len(self.images)): if i == B: amp = A elif i == G: amp = A * math.pow(10, 0.4 * g_r) elif i == R: amp = A * math.pow(10, 0.4 * r_i) self.images[i][x, y, afwImage.LOCAL] = amp psf = afwMath.AnalyticKernel(15, 15, afwMath.GaussianFunction2D(2.5, 1.5, 0.5)) convolvedImage = type(self.images[0])(self.images[0].getDimensions()) randomImage = type(self.images[0])(self.images[0].getDimensions()) rand = afwMath.Random("MT19937", 666) convolutionControl = afwMath.ConvolutionControl() convolutionControl.setDoNormalize(True) convolutionControl.setDoCopyEdge(True) for i in range(len(self.images)): afwMath.convolve(convolvedImage, self.images[i], psf, convolutionControl) afwMath.randomGaussianImage(randomImage, rand) randomImage *= 2 convolvedImage += randomImage self.images[i][:] = convolvedImage del convolvedImage del randomImage
def setUp(self): self.min, self.range, self.Q = 0, 5, 20 # asinh width, height = 85, 75 self.images = [] self.images.append(afwImage.ImageF(lsst.geom.ExtentI(width, height))) self.images.append(afwImage.ImageF(lsst.geom.ExtentI(width, height))) self.images.append(afwImage.ImageF(lsst.geom.ExtentI(width, height))) for (x, y, A, g_r, r_i) in [(15, 15, 1000, 1.0, 2.0), (50, 45, 5500, -1.0, -0.5), (30, 30, 600, 1.0, 2.5), (45, 15, 20000, 1.0, 1.0), ]: for i in range(len(self.images)): if i == B: amp = A elif i == G: amp = A*math.pow(10, 0.4*g_r) elif i == R: amp = A*math.pow(10, 0.4*r_i) self.images[i][x, y, afwImage.LOCAL] = amp psf = afwMath.AnalyticKernel( 15, 15, afwMath.GaussianFunction2D(2.5, 1.5, 0.5)) convolvedImage = type(self.images[0])(self.images[0].getDimensions()) randomImage = type(self.images[0])(self.images[0].getDimensions()) rand = afwMath.Random("MT19937", 666) for i in range(len(self.images)): afwMath.convolve(convolvedImage, self.images[i], psf, True, True) afwMath.randomGaussianImage(randomImage, rand) randomImage *= 2 convolvedImage += randomImage self.images[i][:] = convolvedImage del convolvedImage del randomImage
def setUp(self): np.random.seed(1) # Make a few test images here # 1) full coverage (plain vanilla image) has mean = 50counts self.vanilla = afwImage.ExposureF(600, 600) im = self.vanilla.getMaskedImage().getImage() afwMath.randomGaussianImage(im, afwMath.Random('MT19937', 1)) im += 50 self.vanilla.getMaskedImage().getVariance().set(1.0) # 2) has chip gap and mean = 10 counts self.chipGap = afwImage.ExposureF(600, 600) im = self.chipGap.getMaskedImage().getImage() afwMath.randomGaussianImage(im, afwMath.Random('MT19937', 2)) im += 10 im.getArray()[:, 200:300] = np.nan # simulate 100pix chip gap self.chipGap.getMaskedImage().getVariance().set(1.0) # 3) has low coverage and mean = 20 counts self.lowCover = afwImage.ExposureF(600, 600) im = self.lowCover.getMaskedImage().getImage() afwMath.randomGaussianImage(im, afwMath.Random('MT19937', 3)) im += 20 self.lowCover.getMaskedImage().getImage().getArray()[:, 200:] = np.nan self.lowCover.getMaskedImage().getVariance().set(1.0) # make a matchBackgrounds object self.matcher = MatchBackgroundsTask() self.matcher.config.usePolynomial = True self.matcher.binSize = 64 self.matcher.debugDataIdString = 'Test Visit' self.sctrl = afwMath.StatisticsControl() self.sctrl.setNanSafe(True) self.sctrl.setAndMask(afwImage.Mask.getPlaneBitMask(["NO_DATA", "DETECTED", "DETECTED_NEGATIVE", "SAT", "BAD", "INTRP", "CR"]))
def setUp(self): width, height = 110, 301 self.mi = afwImage.MaskedImageF(afwGeom.ExtentI(width, height)) self.mi.set(0) sd = 3 # standard deviation of image self.mi.getVariance().set(sd*sd) self.mi.getMask().addMaskPlane("DETECTED") self.FWHM = 5 self.ksize = 31 # size of desired kernel sigma1 = 1.75 sigma2 = 2*sigma1 self.exposure = afwImage.makeExposure(self.mi) self.exposure.setPsf(measAlg.DoubleGaussianPsf(self.ksize, self.ksize, 1.5*sigma1, 1, 0.1)) crval = afwCoord.makeCoord(afwCoord.ICRS, 0.0*afwGeom.degrees, 0.0*afwGeom.degrees) wcs = afwImage.makeWcs(crval, afwGeom.PointD(0, 0), 1.0, 0, 0, 1.0) self.exposure.setWcs(wcs) # # Make a kernel with the exactly correct basis functions. Useful for debugging # basisKernelList = afwMath.KernelList() for sigma in (sigma1, sigma2): basisKernel = afwMath.AnalyticKernel(self.ksize, self.ksize, afwMath.GaussianFunction2D(sigma, sigma)) basisImage = afwImage.ImageD(basisKernel.getDimensions()) basisKernel.computeImage(basisImage, True) basisImage /= np.sum(basisImage.getArray()) if sigma == sigma1: basisImage0 = basisImage else: basisImage -= basisImage0 basisKernelList.append(afwMath.FixedKernel(basisImage)) order = 1 # 1 => up to linear spFunc = afwMath.PolynomialFunction2D(order) exactKernel = afwMath.LinearCombinationKernel(basisKernelList, spFunc) exactKernel.setSpatialParameters([[1.0, 0, 0], [0.0, 0.5*1e-2, 0.2e-2]]) rand = afwMath.Random() # make these tests repeatable by setting seed addNoise = True if addNoise: im = self.mi.getImage() afwMath.randomGaussianImage(im, rand) # N(0, 1) im *= sd # N(0, sd^2) del im xarr, yarr = [], [] for x, y in [(20, 20), (60, 20), (30, 35), (50, 50), (20, 90), (70, 160), (25, 265), (75, 275), (85, 30), (50, 120), (70, 80), (60, 210), (20, 210), ]: xarr.append(x) yarr.append(y) for x, y in zip(xarr, yarr): dx = rand.uniform() - 0.5 # random (centered) offsets dy = rand.uniform() - 0.5 k = exactKernel.getSpatialFunction(1)(x, y) # functional variation of Kernel ... b = (k*sigma1**2/((1 - k)*sigma2**2)) # ... converted double Gaussian's "b" #flux = 80000 - 20*x - 10*(y/float(height))**2 flux = 80000*(1 + 0.1*(rand.uniform() - 0.5)) I0 = flux*(1 + b)/(2*np.pi*(sigma1**2 + b*sigma2**2)) for iy in range(y - self.ksize//2, y + self.ksize//2 + 1): if iy < 0 or iy >= self.mi.getHeight(): continue for ix in range(x - self.ksize//2, x + self.ksize//2 + 1): if ix < 0 or ix >= self.mi.getWidth(): continue I = I0*psfVal(ix, iy, x + dx, y + dy, sigma1, sigma2, b) Isample = rand.poisson(I) if addNoise else I self.mi.getImage().set(ix, iy, self.mi.getImage().get(ix, iy) + Isample) self.mi.getVariance().set(ix, iy, self.mi.getVariance().get(ix, iy) + I) bbox = afwGeom.BoxI(afwGeom.PointI(0,0), afwGeom.ExtentI(width, height)) self.cellSet = afwMath.SpatialCellSet(bbox, 100) self.footprintSet = afwDetection.FootprintSet(self.mi, afwDetection.Threshold(100), "DETECTED") self.catalog = SpatialModelPsfTestCase.measure(self.footprintSet, self.exposure) for source in self.catalog: try: cand = measAlg.makePsfCandidate(source, self.exposure) self.cellSet.insertCandidate(cand) except Exception, e: print e continue
def matchBackgrounds(self, refExposure, sciExposure): """ Match science exposure's background level to that of reference exposure. Process creates a difference image of the reference exposure minus the science exposure, and then generates an afw.math.Background object. It assumes (but does not require/check) that the mask plane already has detections set. If detections have not been set/masked, sources will bias the background estimation. The 'background' of the difference image is smoothed by spline interpolation (by the Background class) or by polynomial interpolation by the Approximate class. This model of difference image is added to the science exposure in memory. Fit diagnostics are also calculated and returned. @param[in] refExposure: reference exposure @param[in,out] sciExposure: science exposure; modified by changing the background level to match that of the reference exposure @returns a pipBase.Struct with fields: - backgroundModel: an afw.math.Approximate or an afw.math.Background. - fitRMS: rms of the fit. This is the sqrt(mean(residuals**2)). - matchedMSE: the MSE of the reference and matched images: mean((refImage - matchedSciImage)**2); should be comparable to difference image's mean variance. - diffImVar: the mean variance of the difference image. """ if lsstDebug.Info(__name__).savefits: refExposure.writeFits(lsstDebug.Info(__name__).figpath + 'refExposure.fits') sciExposure.writeFits(lsstDebug.Info(__name__).figpath + 'sciExposure.fits') # Check Configs for polynomials: if self.config.usePolynomial: x, y = sciExposure.getDimensions() shortSideLength = min(x, y) if shortSideLength < self.config.binSize: raise ValueError("%d = config.binSize > shorter dimension = %d" % (self.config.binSize, shortSideLength)) npoints = shortSideLength // self.config.binSize if shortSideLength % self.config.binSize != 0: npoints += 1 if self.config.order > npoints - 1: raise ValueError("%d = config.order > npoints - 1 = %d" % (self.config.order, npoints - 1)) # Check that exposures are same shape if (sciExposure.getDimensions() != refExposure.getDimensions()): wSci, hSci = sciExposure.getDimensions() wRef, hRef = refExposure.getDimensions() raise RuntimeError( "Exposures are different dimensions. sci:(%i, %i) vs. ref:(%i, %i)" % (wSci,hSci,wRef,hRef)) statsFlag = getattr(afwMath, self.config.gridStatistic) self.sctrl.setNumSigmaClip(self.config.numSigmaClip) self.sctrl.setNumIter(self.config.numIter) im = refExposure.getMaskedImage() diffMI = im.Factory(im,True) diffMI -= sciExposure.getMaskedImage() width = diffMI.getWidth() height = diffMI.getHeight() nx = width // self.config.binSize if width % self.config.binSize != 0: nx += 1 ny = height // self.config.binSize if height % self.config.binSize != 0: ny += 1 bctrl = afwMath.BackgroundControl(nx, ny, self.sctrl, statsFlag) bctrl.setUndersampleStyle(self.config.undersampleStyle) bctrl.setInterpStyle(self.config.interpStyle) bkgd = afwMath.makeBackground(diffMI, bctrl) # Some config and input checks if config.usePolynomial: # 1) Check that order/bin size make sense: # 2) Change binsize or order if underconstrained. # 3) Add some tiny Gaussian noise if the image is completely uniform # (change after ticket 2411) if self.config.usePolynomial: order = self.config.order bgX, bgY, bgZ, bgdZ = self._gridImage(diffMI, self.config.binSize, statsFlag) minNumberGridPoints = min(len(set(bgX)),len(set(bgY))) if len(bgZ) == 0: raise ValueError("No overlap with reference. Nothing to match") elif minNumberGridPoints <= self.config.order: #must either lower order or raise number of bins or throw exception if self.config.undersampleStyle == "THROW_EXCEPTION": raise ValueError("Image does not cover enough of ref image for order and binsize") elif self.config.undersampleStyle == "REDUCE_INTERP_ORDER": self.log.warn("Reducing order to %d"%(minNumberGridPoints - 1)) order = minNumberGridPoints - 1 elif self.config.undersampleStyle == "INCREASE_NXNYSAMPLE": newBinSize = (minNumberGridPoints*self.config.binSize)// (self.config.order +1) bctrl.setNxSample(newBinSize) bctrl.setNySample(newBinSize) bkgd = afwMath.makeBackground(diffMI, bctrl) #do over self.log.warn("Decreasing binsize to %d"%(newBinSize)) if not any(dZ > 1e-8 for dZ in bgdZ) and not any(bgZ): #uniform image gaussianNoiseIm = afwImage.ImageF(diffMI.getImage(), True) afwMath.randomGaussianImage(gaussianNoiseIm, afwMath.Random(1)) gaussianNoiseIm *= 1e-8 diffMI += gaussianNoiseIm bkgd = afwMath.makeBackground(diffMI, bctrl) #Add offset to sciExposure try: if self.config.usePolynomial: actrl = afwMath.ApproximateControl(afwMath.ApproximateControl.CHEBYSHEV, order, order) undersampleStyle = getattr(afwMath, self.config.undersampleStyle) approx = bkgd.getApproximate(actrl,undersampleStyle) bkgdImage = approx.getImage() else: bkgdImage = bkgd.getImageF() except Exception, e: raise RuntimeError("Background/Approximation failed to interp image %s: %s" % ( self.debugDataIdString, e))
proc.measurement.plotmasks = False conf.measurement.noiseSource = 'meta' conf.validate() proc.measurement.prefix = 'meta-' proc.measurement.run(res.exposure, res.sources) print print 'Running with "variance"' conf.measurement.noiseSource = 'variance' conf.measurement.noiseOffset = 5. conf.validate() proc.measurement.prefix = 'var-' proc.measurement.run(res.exposure, res.sources) print print 'Running with "noiseim"' proc.measurement.prefix = 'noiseim-' rand = afwMath.Random() exp = res.exposure nim = afwImage.ImageF(exp.getWidth(), exp.getHeight()) afwMath.randomGaussianImage(nim, rand) nim *= 500. nim += 200. proc.measurement.run(res.exposure, res.sources, noiseImage=nim) print print 'Running with "setnoise"' proc.measurement.prefix = 'setnoise-' proc.measurement.run(res.exposure, res.sources, noiseMeanVar=(50.,500))
def showPsfCandidates(exposure, psfCellSet, psf=None, frame=None, normalize=True, showBadCandidates=True, variance=None, chi=None): """Display the PSF candidates. If psf is provided include PSF model and residuals; if normalize is true normalize the PSFs (and residuals) If chi is True, generate a plot of residuals/sqrt(variance), i.e. chi """ if chi is None: if variance is not None: # old name for chi chi = variance # # Show us the ccandidates # mos = displayUtils.Mosaic() # candidateCenters = [] candidateCentersBad = [] candidateIndex = 0 for cell in psfCellSet.getCellList(): for cand in cell.begin(False): # include bad candidates cand = algorithmsLib.cast_PsfCandidateF(cand) rchi2 = cand.getChi2() if rchi2 > 1e100: rchi2 = numpy.nan if not showBadCandidates and cand.isBad(): continue if psf: im_resid = displayUtils.Mosaic(gutter=0, background=-5, mode="x") try: im = cand.getMaskedImage() # copy of this object's image xc, yc = cand.getXCenter(), cand.getYCenter() margin = 0 if True else 5 w, h = im.getDimensions() bbox = afwGeom.BoxI(afwGeom.PointI(margin, margin), im.getDimensions()) if margin > 0: bim = im.Factory(w + 2*margin, h + 2*margin) stdev = numpy.sqrt(afwMath.makeStatistics(im.getVariance(), afwMath.MEAN).getValue()) afwMath.randomGaussianImage(bim.getImage(), afwMath.Random()) bim *= stdev var = bim.getVariance(); var.set(stdev**2); del var sbim = im.Factory(bim, bbox) sbim <<= im del sbim im = bim xc += margin; yc += margin im = im.Factory(im, True) im.setXY0(cand.getMaskedImage().getXY0()) except: continue if not variance: im_resid.append(im.Factory(im, True)) # residuals using spatial model chi2 = algorithmsLib.subtractPsf(psf, im, xc, yc) resid = im if variance: resid = resid.getImage() var = im.getVariance() var = var.Factory(var, True) numpy.sqrt(var.getArray(), var.getArray()) # inplace sqrt resid /= var im_resid.append(resid) # Fit the PSF components directly to the data (i.e. ignoring the spatial model) im = cand.getMaskedImage() im = im.Factory(im, True) im.setXY0(cand.getMaskedImage().getXY0()) noSpatialKernel = afwMath.cast_LinearCombinationKernel(psf.getKernel()) candCenter = afwGeom.PointD(cand.getXCenter(), cand.getYCenter()) fit = algorithmsLib.fitKernelParamsToImage(noSpatialKernel, im, candCenter) params = fit[0] kernels = afwMath.KernelList(fit[1]) outputKernel = afwMath.LinearCombinationKernel(kernels, params) outImage = afwImage.ImageD(outputKernel.getDimensions()) outputKernel.computeImage(outImage, False) im -= outImage.convertF() resid = im if margin > 0: bim = im.Factory(w + 2*margin, h + 2*margin) afwMath.randomGaussianImage(bim.getImage(), afwMath.Random()) bim *= stdev sbim = im.Factory(bim, bbox) sbim <<= resid del sbim resid = bim if variance: resid = resid.getImage() resid /= var im_resid.append(resid) im = im_resid.makeMosaic() else: im = cand.getMaskedImage() if normalize: im /= afwMath.makeStatistics(im, afwMath.MAX).getValue() objId = splitId(cand.getSource().getId(), True)["objId"] if psf: lab = "%d chi^2 %.1f" % (objId, rchi2) ctype = ds9.RED if cand.isBad() else ds9.GREEN else: lab = "%d flux %8.3g" % (objId, cand.getSource().getPsfFlux()) ctype = ds9.GREEN mos.append(im, lab, ctype) if False and numpy.isnan(rchi2): ds9.mtv(cand.getMaskedImage().getImage(), title="candidate", frame=1) print "amp", cand.getAmplitude() im = cand.getMaskedImage() center = (candidateIndex, xc - im.getX0(), yc - im.getY0()) candidateIndex += 1 if cand.isBad(): candidateCentersBad.append(center) else: candidateCenters.append(center) if variance: title = "chi(Psf fit)" else: title = "Stars & residuals" mosaicImage = mos.makeMosaic(frame=frame, title=title) with ds9.Buffering(): for centers, color in ((candidateCenters, ds9.GREEN), (candidateCentersBad, ds9.RED)): for cen in centers: bbox = mos.getBBox(cen[0]) ds9.dot("+", cen[1] + bbox.getMinX(), cen[2] + bbox.getMinY(), frame=frame, ctype=color) return mosaicImage
def test2(self): # Check that doReplaceWithNoise works with deblended source # hierarchies. seed = 42 rand = afwMath.Random(afwMath.Random.MT19937, seed) psf = self.getpsf() im = afwImage.ImageF(200, 50) skystd = 100 afwMath.randomGaussianImage(im, rand) im *= skystd imorig = afwImage.ImageF(im, True) noiseim = imorig mi = afwImage.MaskedImageF(im) mi.getVariance().set(skystd**2) exposure = afwImage.makeExposure(mi) exposure.setPsf(psf) detconf = measAlg.SourceDetectionConfig() detconf.returnOriginalFootprints = True detconf.reEstimateBackground = False measconf = measAlg.SourceMeasurementConfig() measconf.doReplaceWithNoise = True measconf.replaceWithNoise.noiseSeed = 42 schema = afwTable.SourceTable.makeMinimalSchema() detect = measAlg.SourceDetectionTask(config=detconf, schema=schema) measure = MySourceMeasurementTask(config=measconf, schema=schema, doplot=plots) table = afwTable.SourceTable.make(schema) table.preallocate(10) # We're going to fake up a perfect deblend hierarchy here, by # creating individual images containing single sources and # measuring them, and then creating a deblend hierarchy where # the children have the correct HeavyFootprints. We want to # find that the measurements on the deblend hierarchy and the # blended image are equal to the individual images. # # Note that in the normal setup we don't expect the # measurements to be *identical* because of the faint wings of # the objects; when measuring a deblended child, we pick up # the wings of the other objects. # # In order to get exactly equal measurements, we'll fake some # sources that have no wings -- we'll copy just the source # pixels within the footprint. This means that all the # footprints are the same, and the pixels inside the footprint # are the same. fullim = None sources = None # "normal" measurements xx0,yy0,vx0,vy0 = [],[],[],[] # "no-wing" measurements xx1,yy1,vx1,vy1 = [],[],[],[] y = 25 for i in range(5): # no-noise source image sim = afwImage.ImageF(imorig.getWidth(), imorig.getHeight()) # Put all four sources in the parent (i==0), and one # source in each child (i=[1 to 4]) if i in [0,1]: addPsf(sim, psf, 20, y, 1000) if i in [0,2]: addGaussian(sim, 40, y, 10, 3, 2e5) if i in [0,3]: addGaussian(sim, 75, y, 10, 3, 2e5) if i in [0,4]: addPsf(sim, psf, 95, y, 1000) imcopy = afwImage.ImageF(imorig, True) imcopy += sim # copy the pixels into the exposure object im <<= imcopy if i == 0: detected = detect.makeSourceCatalog(table, exposure) sources = detected.sources print 'detected', len(sources), 'sources' self.assertEqual(len(sources), 1) else: fpSets = detect.detectFootprints(exposure) print 'detected', fpSets.numPos, 'sources' fpSets.positive.makeSources(sources) self.assertEqual(fpSets.numPos, 1) print len(sources), 'sources total' measure.plotpat = 'single-%i.png' % i measure.run(exposure, sources[-1:]) s = sources[-1] fp = s.getFootprint() if i == 0: # This is the blended image fullim = imcopy else: print 'Creating heavy footprint...' heavy = afwDet.makeHeavyFootprint(fp, mi) s.setFootprint(heavy) # Record the single-source measurements. xx0.append(s.getX()) yy0.append(s.getY()) vx0.append(s.getIxx()) vy0.append(s.getIyy()) # "no-wings": add just the source pixels within the footprint im <<= sim h = afwDet.makeHeavyFootprint(fp, mi) sim2 = afwImage.ImageF(imorig.getWidth(), imorig.getHeight()) h.insert(sim2) imcopy = afwImage.ImageF(imorig, True) imcopy += sim2 im <<= imcopy measure.plotpat = 'single2-%i.png' % i measure.run(exposure, sources[i:i+1], noiseImage=noiseim) s = sources[i] xx1.append(s.getX()) yy1.append(s.getY()) vx1.append(s.getIxx()) vy1.append(s.getIyy()) if i == 0: fullim2 = imcopy # Now we'll build the fake deblended hierarchy. parent = sources[0] kids = sources[1:] # Ensure that the parent footprint contains all the child footprints pfp = parent.getFootprint() for s in kids: for span in s.getFootprint().getSpans(): pfp.addSpan(span) pfp.normalize() #parent.setFootprint(pfp) # The parent-child relationship is established through the IDs parentid = parent.getId() for s in kids: s.setParent(parentid) # Reset all the measurements shkey = sources.getTable().getShapeKey() ckey = sources.getTable().getCentroidKey() for s in sources: sh = s.get(shkey) sh.setIxx(np.nan) sh.setIyy(np.nan) sh.setIxy(np.nan) s.set(shkey, sh) c = s.get(ckey) c.setX(np.nan) c.setY(np.nan) s.set(ckey, c) # Measure the "deblended" normal sources im <<= fullim measure.plotpat = 'joint-%(sourcenum)i.png' measure.run(exposure, sources) xx2,yy2,vx2,vy2 = [],[],[],[] for s in sources: xx2.append(s.getX()) yy2.append(s.getY()) vx2.append(s.getIxx()) vy2.append(s.getIyy()) # Measure the "deblended" no-wings sources im <<= fullim2 measure.plotpat = 'joint2-%(sourcenum)i.png' measure.run(exposure, sources, noiseImage=noiseim) xx3,yy3,vx3,vy3 = [],[],[],[] for s in sources: xx3.append(s.getX()) yy3.append(s.getY()) vx3.append(s.getIxx()) vy3.append(s.getIyy()) print 'Normal:' print 'xx ', xx0 print ' vs', xx2 print 'yy ', yy0 print ' vs', yy2 print 'vx ', vx0 print ' vs', vx2 print 'vy ', vy0 print ' vs', vy2 print 'No wings:' print 'xx ', xx1 print ' vs', xx3 print 'yy ', yy1 print ' vs', yy3 print 'vx ', vx1 print ' vs', vx3 print 'vy ', vy1 print ' vs', vy3 # These "normal" tests are not very stringent. # 0.1-pixel centroids self.assertTrue(all([abs(v1-v2) < 0.1 for v1,v2 in zip(xx0,xx2)])) self.assertTrue(all([abs(v1-v2) < 0.1 for v1,v2 in zip(yy0,yy2)])) # 10% variances self.assertTrue(all([abs(v1-v2)/((v1+v2)/2.) < 0.1 for v1,v2 in zip(vx0,vx2)])) self.assertTrue(all([abs(v1-v2)/((v1+v2)/2.) < 0.1 for v1,v2 in zip(vy0,vy2)])) # The "no-wings" tests should be exact. self.assertTrue(xx1 == xx3) self.assertTrue(yy1 == yy3) self.assertTrue(vx1 == vx3) self.assertTrue(vy1 == vy3) # Reset sources for s in sources: sh = s.get(shkey) sh.setIxx(np.nan) sh.setIyy(np.nan) sh.setIxy(np.nan) s.set(shkey, sh) c = s.get(ckey) c.setX(np.nan) c.setY(np.nan) s.set(ckey, c) # Test that the parent/child order is unimportant. im <<= fullim2 measure.doplot = False sources2 = sources.copy() perm = [2,1,0,3,4] for i,j in enumerate(perm): sources2[i] = sources[j] # I'm not convinced that HeavyFootprints get copied correctly... sources2[i].setFootprint(sources[j].getFootprint()) measure.run(exposure, sources2, noiseImage=noiseim) # "measure.run" reorders the sources! xx3,yy3,vx3,vy3 = [],[],[],[] for s in sources: xx3.append(s.getX()) yy3.append(s.getY()) vx3.append(s.getIxx()) vy3.append(s.getIyy()) self.assertTrue(xx1 == xx3) self.assertTrue(yy1 == yy3) self.assertTrue(vx1 == vx3) self.assertTrue(vy1 == vy3) # Reset sources for s in sources: sh = s.get(shkey) sh.setIxx(np.nan) sh.setIyy(np.nan) sh.setIxy(np.nan) s.set(shkey, sh) c = s.get(ckey) c.setX(np.nan) c.setY(np.nan) s.set(ckey, c) # Test that it still works when the parent ID falls in the middle of # the child IDs. im <<= fullim2 measure.doplot = False sources2 = sources.copy() parentid = 3 ids = [parentid, 1,2,4,5] for i,s in enumerate(sources2): s.setId(ids[i]) if i != 0: s.setParent(parentid) s.setFootprint(sources[i].getFootprint()) measure.run(exposure, sources2, noiseImage=noiseim) # The sources get reordered! xx3,yy3,vx3,vy3 = [],[],[],[] xx3,yy3,vx3,vy3 = [0]*5,[0]*5,[0]*5,[0]*5 for i,j in enumerate(ids): xx3[i] = sources2[j-1].getX() yy3[i] = sources2[j-1].getY() vx3[i] = sources2[j-1].getIxx() vy3[i] = sources2[j-1].getIyy() self.assertTrue(xx1 == xx3) self.assertTrue(yy1 == yy3) self.assertTrue(vx1 == vx3) self.assertTrue(vy1 == vy3)
def test2(self): # Check that doReplaceWithNoise works with deblended source # hierarchies. seed = 42 rand = afwMath.Random(afwMath.Random.MT19937, seed) psf = self.getpsf() im = afwImage.ImageF(200, 50) skystd = 100 afwMath.randomGaussianImage(im, rand) im *= skystd imorig = afwImage.ImageF(im, True) noiseim = imorig mi = afwImage.MaskedImageF(im) mi.getVariance().set(skystd**2) exposure = afwImage.makeExposure(mi) exposure.setPsf(psf) detconf = measAlg.SourceDetectionConfig() detconf.returnOriginalFootprints = True detconf.reEstimateBackground = False measconf = measAlg.SourceMeasurementConfig() measconf.doReplaceWithNoise = True measconf.replaceWithNoise.noiseSeed = 42 schema = afwTable.SourceTable.makeMinimalSchema() detect = measAlg.SourceDetectionTask(config=detconf, schema=schema) measure = MySourceMeasurementTask(config=measconf, schema=schema, doplot=plots) table = afwTable.SourceTable.make(schema) table.preallocate(10) # We're going to fake up a perfect deblend hierarchy here, by # creating individual images containing single sources and # measuring them, and then creating a deblend hierarchy where # the children have the correct HeavyFootprints. We want to # find that the measurements on the deblend hierarchy and the # blended image are equal to the individual images. # # Note that in the normal setup we don't expect the # measurements to be *identical* because of the faint wings of # the objects; when measuring a deblended child, we pick up # the wings of the other objects. # # In order to get exactly equal measurements, we'll fake some # sources that have no wings -- we'll copy just the source # pixels within the footprint. This means that all the # footprints are the same, and the pixels inside the footprint # are the same. fullim = None sources = None # "normal" measurements xx0, yy0, vx0, vy0 = [], [], [], [] # "no-wing" measurements xx1, yy1, vx1, vy1 = [], [], [], [] y = 25 for i in range(5): # no-noise source image sim = afwImage.ImageF(imorig.getWidth(), imorig.getHeight()) # Put all four sources in the parent (i==0), and one # source in each child (i=[1 to 4]) if i in [0, 1]: addPsf(sim, psf, 20, y, 1000) if i in [0, 2]: addGaussian(sim, 40, y, 10, 3, 2e5) if i in [0, 3]: addGaussian(sim, 75, y, 10, 3, 2e5) if i in [0, 4]: addPsf(sim, psf, 95, y, 1000) imcopy = afwImage.ImageF(imorig, True) imcopy += sim # copy the pixels into the exposure object im <<= imcopy if i == 0: detected = detect.makeSourceCatalog(table, exposure) sources = detected.sources print 'detected', len(sources), 'sources' self.assertEqual(len(sources), 1) else: fpSets = detect.detectFootprints(exposure) print 'detected', fpSets.numPos, 'sources' fpSets.positive.makeSources(sources) self.assertEqual(fpSets.numPos, 1) print len(sources), 'sources total' measure.plotpat = 'single-%i.png' % i measure.run(exposure, sources[-1:]) s = sources[-1] fp = s.getFootprint() if i == 0: # This is the blended image fullim = imcopy else: print 'Creating heavy footprint...' heavy = afwDet.makeHeavyFootprint(fp, mi) s.setFootprint(heavy) # Record the single-source measurements. xx0.append(s.getX()) yy0.append(s.getY()) vx0.append(s.getIxx()) vy0.append(s.getIyy()) # "no-wings": add just the source pixels within the footprint im <<= sim h = afwDet.makeHeavyFootprint(fp, mi) sim2 = afwImage.ImageF(imorig.getWidth(), imorig.getHeight()) h.insert(sim2) imcopy = afwImage.ImageF(imorig, True) imcopy += sim2 im <<= imcopy measure.plotpat = 'single2-%i.png' % i measure.run(exposure, sources[i:i + 1], noiseImage=noiseim) s = sources[i] xx1.append(s.getX()) yy1.append(s.getY()) vx1.append(s.getIxx()) vy1.append(s.getIyy()) if i == 0: fullim2 = imcopy # Now we'll build the fake deblended hierarchy. parent = sources[0] kids = sources[1:] # Ensure that the parent footprint contains all the child footprints pfp = parent.getFootprint() for s in kids: for span in s.getFootprint().getSpans(): pfp.addSpan(span) pfp.normalize() #parent.setFootprint(pfp) # The parent-child relationship is established through the IDs parentid = parent.getId() for s in kids: s.setParent(parentid) # Reset all the measurements shkey = sources.getTable().getShapeKey() ckey = sources.getTable().getCentroidKey() for s in sources: sh = s.get(shkey) sh.setIxx(np.nan) sh.setIyy(np.nan) sh.setIxy(np.nan) s.set(shkey, sh) c = s.get(ckey) c.setX(np.nan) c.setY(np.nan) s.set(ckey, c) # Measure the "deblended" normal sources im <<= fullim measure.plotpat = 'joint-%(sourcenum)i.png' measure.run(exposure, sources) xx2, yy2, vx2, vy2 = [], [], [], [] for s in sources: xx2.append(s.getX()) yy2.append(s.getY()) vx2.append(s.getIxx()) vy2.append(s.getIyy()) # Measure the "deblended" no-wings sources im <<= fullim2 measure.plotpat = 'joint2-%(sourcenum)i.png' measure.run(exposure, sources, noiseImage=noiseim) xx3, yy3, vx3, vy3 = [], [], [], [] for s in sources: xx3.append(s.getX()) yy3.append(s.getY()) vx3.append(s.getIxx()) vy3.append(s.getIyy()) print 'Normal:' print 'xx ', xx0 print ' vs', xx2 print 'yy ', yy0 print ' vs', yy2 print 'vx ', vx0 print ' vs', vx2 print 'vy ', vy0 print ' vs', vy2 print 'No wings:' print 'xx ', xx1 print ' vs', xx3 print 'yy ', yy1 print ' vs', yy3 print 'vx ', vx1 print ' vs', vx3 print 'vy ', vy1 print ' vs', vy3 # These "normal" tests are not very stringent. # 0.1-pixel centroids self.assertTrue(all([abs(v1 - v2) < 0.1 for v1, v2 in zip(xx0, xx2)])) self.assertTrue(all([abs(v1 - v2) < 0.1 for v1, v2 in zip(yy0, yy2)])) # 10% variances self.assertTrue( all([ abs(v1 - v2) / ((v1 + v2) / 2.) < 0.1 for v1, v2 in zip(vx0, vx2) ])) self.assertTrue( all([ abs(v1 - v2) / ((v1 + v2) / 2.) < 0.1 for v1, v2 in zip(vy0, vy2) ])) # The "no-wings" tests should be exact. self.assertTrue(xx1 == xx3) self.assertTrue(yy1 == yy3) self.assertTrue(vx1 == vx3) self.assertTrue(vy1 == vy3) # Reset sources for s in sources: sh = s.get(shkey) sh.setIxx(np.nan) sh.setIyy(np.nan) sh.setIxy(np.nan) s.set(shkey, sh) c = s.get(ckey) c.setX(np.nan) c.setY(np.nan) s.set(ckey, c) # Test that the parent/child order is unimportant. im <<= fullim2 measure.doplot = False sources2 = sources.copy() perm = [2, 1, 0, 3, 4] for i, j in enumerate(perm): sources2[i] = sources[j] # I'm not convinced that HeavyFootprints get copied correctly... sources2[i].setFootprint(sources[j].getFootprint()) measure.run(exposure, sources2, noiseImage=noiseim) # "measure.run" reorders the sources! xx3, yy3, vx3, vy3 = [], [], [], [] for s in sources: xx3.append(s.getX()) yy3.append(s.getY()) vx3.append(s.getIxx()) vy3.append(s.getIyy()) self.assertTrue(xx1 == xx3) self.assertTrue(yy1 == yy3) self.assertTrue(vx1 == vx3) self.assertTrue(vy1 == vy3) # Reset sources for s in sources: sh = s.get(shkey) sh.setIxx(np.nan) sh.setIyy(np.nan) sh.setIxy(np.nan) s.set(shkey, sh) c = s.get(ckey) c.setX(np.nan) c.setY(np.nan) s.set(ckey, c) # Test that it still works when the parent ID falls in the middle of # the child IDs. im <<= fullim2 measure.doplot = False sources2 = sources.copy() parentid = 3 ids = [parentid, 1, 2, 4, 5] for i, s in enumerate(sources2): s.setId(ids[i]) if i != 0: s.setParent(parentid) s.setFootprint(sources[i].getFootprint()) measure.run(exposure, sources2, noiseImage=noiseim) # The sources get reordered! xx3, yy3, vx3, vy3 = [], [], [], [] xx3, yy3, vx3, vy3 = [0] * 5, [0] * 5, [0] * 5, [0] * 5 for i, j in enumerate(ids): xx3[i] = sources2[j - 1].getX() yy3[i] = sources2[j - 1].getY() vx3[i] = sources2[j - 1].getIxx() vy3[i] = sources2[j - 1].getIyy() self.assertTrue(xx1 == xx3) self.assertTrue(yy1 == yy3) self.assertTrue(vx1 == vx3) self.assertTrue(vy1 == vy3)
def testRandomGaussianImage(self): afwMath.randomGaussianImage(self.image, self.rand)
def showPsfCandidates(exposure, psfCellSet, psf=None, frame=None, normalize=True, showBadCandidates=True, fitBasisComponents=False, variance=None, chi=None): """Display the PSF candidates. If psf is provided include PSF model and residuals; if normalize is true normalize the PSFs (and residuals) If chi is True, generate a plot of residuals/sqrt(variance), i.e. chi If fitBasisComponents is true, also find the best linear combination of the PSF's components (if they exist) """ if chi is None: if variance is not None: # old name for chi chi = variance # # Show us the ccandidates # mos = displayUtils.Mosaic() # candidateCenters = [] candidateCentersBad = [] candidateIndex = 0 for cell in psfCellSet.getCellList(): for cand in cell.begin(False): # include bad candidates cand = algorithmsLib.cast_PsfCandidateF(cand) rchi2 = cand.getChi2() if rchi2 > 1e100: rchi2 = numpy.nan if not showBadCandidates and cand.isBad(): continue if psf: im_resid = displayUtils.Mosaic(gutter=0, background=-5, mode="x") try: im = cand.getMaskedImage() # copy of this object's image xc, yc = cand.getXCenter(), cand.getYCenter() margin = 0 if True else 5 w, h = im.getDimensions() bbox = afwGeom.BoxI(afwGeom.PointI(margin, margin), im.getDimensions()) if margin > 0: bim = im.Factory(w + 2*margin, h + 2*margin) stdev = numpy.sqrt(afwMath.makeStatistics(im.getVariance(), afwMath.MEAN).getValue()) afwMath.randomGaussianImage(bim.getImage(), afwMath.Random()) bim *= stdev var = bim.getVariance(); var.set(stdev**2); del var sbim = im.Factory(bim, bbox) sbim <<= im del sbim im = bim xc += margin; yc += margin im = im.Factory(im, True) im.setXY0(cand.getMaskedImage().getXY0()) except: continue if not variance: im_resid.append(im.Factory(im, True)) if True: # tweak up centroids mi = im psfIm = mi.getImage() config = measAlg.SourceMeasurementConfig() config.centroider.name = "centroid.sdss" config.slots.centroid = config.centroider.name schema = afwTable.SourceTable.makeMinimalSchema() measureSources = config.makeMeasureSources(schema) catalog = afwTable.SourceCatalog(schema) config.slots.setupTable(catalog.table) extra = 10 # enough margin to run the sdss centroider miBig = mi.Factory(im.getWidth() + 2*extra, im.getHeight() + 2*extra) miBig[extra:-extra, extra:-extra] = mi miBig.setXY0(mi.getX0() - extra, mi.getY0() - extra) mi = miBig; del miBig exp = afwImage.makeExposure(mi) exp.setPsf(psf) footprintSet = afwDet.FootprintSet(mi, afwDet.Threshold(0.5*numpy.max(psfIm.getArray())), "DETECTED") footprintSet.makeSources(catalog) if len(catalog) == 0: raise RuntimeError("Failed to detect any objects") elif len(catalog) == 1: source = catalog[0] else: # more than one source; find the once closest to (xc, yc) for i, s in enumerate(catalog): d = numpy.hypot(xc - s.getX(), yc - s.getY()) if i == 0 or d < dmin: source, dmin = s, d measureSources.applyWithPeak(source, exp) xc, yc = source.getCentroid() # residuals using spatial model try: chi2 = algorithmsLib.subtractPsf(psf, im, xc, yc) except: chi2 = numpy.nan continue resid = im if variance: resid = resid.getImage() var = im.getVariance() var = var.Factory(var, True) numpy.sqrt(var.getArray(), var.getArray()) # inplace sqrt resid /= var im_resid.append(resid) # Fit the PSF components directly to the data (i.e. ignoring the spatial model) if fitBasisComponents: im = cand.getMaskedImage() im = im.Factory(im, True) im.setXY0(cand.getMaskedImage().getXY0()) noSpatialKernel = afwMath.cast_LinearCombinationKernel(psf.getKernel()) candCenter = afwGeom.PointD(cand.getXCenter(), cand.getYCenter()) fit = algorithmsLib.fitKernelParamsToImage(noSpatialKernel, im, candCenter) params = fit[0] kernels = afwMath.KernelList(fit[1]) outputKernel = afwMath.LinearCombinationKernel(kernels, params) outImage = afwImage.ImageD(outputKernel.getDimensions()) outputKernel.computeImage(outImage, False) im -= outImage.convertF() resid = im if margin > 0: bim = im.Factory(w + 2*margin, h + 2*margin) afwMath.randomGaussianImage(bim.getImage(), afwMath.Random()) bim *= stdev sbim = im.Factory(bim, bbox) sbim <<= resid del sbim resid = bim if variance: resid = resid.getImage() resid /= var im_resid.append(resid) im = im_resid.makeMosaic() else: im = cand.getMaskedImage() if normalize: im /= afwMath.makeStatistics(im, afwMath.MAX).getValue() objId = splitId(cand.getSource().getId(), True)["objId"] if psf: lab = "%d chi^2 %.1f" % (objId, rchi2) ctype = ds9.RED if cand.isBad() else ds9.GREEN else: lab = "%d flux %8.3g" % (objId, cand.getSource().getPsfFlux()) ctype = ds9.GREEN mos.append(im, lab, ctype) if False and numpy.isnan(rchi2): ds9.mtv(cand.getMaskedImage().getImage(), title="candidate", frame=1) print "amp", cand.getAmplitude() im = cand.getMaskedImage() center = (candidateIndex, xc - im.getX0(), yc - im.getY0()) candidateIndex += 1 if cand.isBad(): candidateCentersBad.append(center) else: candidateCenters.append(center) if variance: title = "chi(Psf fit)" else: title = "Stars & residuals" mosaicImage = mos.makeMosaic(frame=frame, title=title) with ds9.Buffering(): for centers, color in ((candidateCenters, ds9.GREEN), (candidateCentersBad, ds9.RED)): for cen in centers: bbox = mos.getBBox(cen[0]) ds9.dot("+", cen[1] + bbox.getMinX(), cen[2] + bbox.getMinY(), frame=frame, ctype=color) return mosaicImage
def runone(self, kk, rand): psf = self.getpsf() im = afwImage.ImageF(120, 200) skystd = 100 afwMath.randomGaussianImage(im, rand) im *= skystd # The SDSS adaptive moments code seems sometimes to latch onto # an incorrect answer (maybe from a noise spike or something). # None of the flags seem to be set. The result are variance # measurements a bit bigger than the PSF. With different # noise draws the source values here will show this effect # (hence the loop in "test1" to try "runone" will different # noise draws). # The real point of this test case, though, is to show that # replacing other detections by noise results in better # measurements. We do this by constructing a fake image # containing six rows. In the top three rows, we have a # galaxy flanked by two stars that are far enough away that # they don't confuse the SDSS adaptive moments code. In the # bottom three rows, they're close enough that the detections # don't merge, but the stars cause the variance of the galaxy # to be mis-estimated. We want to show that with the # "doReplaceWithNoise" option, the measurements on the # bottom three improve. # If you love ASCII art (and who doesn't, really), the # synthetic image is going to look like this: # # * GGG * # * GGG * # * GGG * # * GGG * # * GGG * # * GGG * # We have three of each to work around the instability # mentioned above. x = 60 y0 = 16 ystep = 33 for i in range(6): dx = [28,29,30, 35,36,37][i] y = y0 + i*ystep # x y sx sy flux addGaussian(im, x, y, 10, 3, 2e5) addPsf(im, psf, x+dx, y, 1000) addPsf(im, psf, x-dx, y, 1000) #im.writeFits('im.fits') mi = afwImage.MaskedImageF(im) var = mi.getVariance() var.set(skystd**2) exposure = afwImage.makeExposure(mi) exposure.setPsf(psf) detconf = measAlg.SourceDetectionConfig() detconf.returnOriginalFootprints = True detconf.reEstimateBackground = False measconf = measAlg.SourceMeasurementConfig() measconf.doReplaceWithNoise = False #newalgs = [ 'shape.hsm.ksb', 'shape.hsm.bj', 'shape.hsm.linear' ] #measconf.algorithms = list(measconf.algorithms.names) + newalgs schema = afwTable.SourceTable.makeMinimalSchema() detect = measAlg.SourceDetectionTask(config=detconf, schema=schema) measure = measAlg.SourceMeasurementTask(config=measconf, schema=schema) print 'Running detection...' table = afwTable.SourceTable.make(schema) detected = detect.makeSourceCatalog(table, exposure) sources = detected.sources # We don't want the sources to be close enough that their # detection masks touch. self.assertEqual(len(sources), 18) # Run measurement with and without "doReplaceWithNoise"... for jj in range(2): print 'Running measurement...' measure.run(exposure, sources) #fields = schema.getNames() #print 'Fields:', fields fields = ['centroid.sdss', 'shape.sdss', #'shape.hsm.bj.moments', #'shape.hsm.ksb.moments', #'shape.hsm.linear.moments', #'shape.sdss.flags.maxiter', 'shape.sdss.flags.shift', #'shape.sdss.flags.unweighted', 'shape.sdss.flags.unweightedbad' ] keys = [schema.find(f).key for f in fields] xx,yy,vx,vy = [],[],[],[] for source in sources: #print ' ', source #for f,k in zip(fields, keys): # val = source.get(k) # print ' ', f, val xx.append(source.getX()) yy.append(source.getY()) vx.append(source.getIxx()) vy.append(source.getIyy()) if plots: plotSources(im, sources, schema) plt.savefig('%i%s.png' % (kk, chr(ord('a')+jj))) # Now we want to find the galaxy variance measurements... # Sort, first vertically then horizontally # iy ~ row number iy = [int(round((y - y0) / float(ystep))) for y in yy] iy = np.array(iy) xx = np.array(xx) vx = np.array(vx) vy = np.array(vy) I = np.argsort(iy * 1000 + xx) vx = vx[I] vy = vy[I] # The "left" stars will be indices 0, 3, 6, ... # The galaxies will be 1, 4, 7, ... vx = vx[slice(1, 18, 3)] # Bottom three galaxies may be contaminated by the stars bad = vx[:3] # Top three should be clean good = vx[3:] # When SdssShape fails, we get variance ~ 11 I = np.flatnonzero(bad > 50.) # Hope that we got at least one valid measurement self.assertTrue(len(I) > 0) bad = bad[I] I = np.flatnonzero(good > 50.) self.assertTrue(len(I) > 0) good = good[I] print 'bad:', bad print 'good:', good # Typical: # bad: [ 209.78476672 192.35271583 176.76274525] # good: [ 99.40557099 110.5701382 ] oklo,okhi = 80,120 self.assertTrue(all((good > oklo) * (good < okhi))) if jj == 0: # Without "doReplaceWithNoise", we expect to find the variances # overestimated. self.assertTrue(all(bad > okhi)) else: # With "doReplaceWithNoise", no problem! self.assertTrue(all((bad > oklo) * (bad < okhi))) # Set "doReplaceWithNoise" for the second time through the loop... measconf.doReplaceWithNoise = True
def getRandomImage(self, bb): # Create an Image and fill it with Gaussian noise. rim = afwImage.ImageF(bb.getWidth(), bb.getHeight()) rim.setXY0(bb.getMinX(), bb.getMinY()) afwMath.randomGaussianImage(rim, self.rand) return rim
def setUp(self): width, height = 110, 301 self.mi = afwImage.MaskedImageF(afwGeom.ExtentI(width, height)) self.mi.set(0) sd = 3 # standard deviation of image self.mi.getVariance().set(sd*sd) self.mi.getMask().addMaskPlane("DETECTED") self.FWHM = 5 self.ksize = 31 # size of desired kernel sigma1 = 1.75 sigma2 = 2*sigma1 self.exposure = afwImage.makeExposure(self.mi) self.exposure.setPsf(measAlg.DoubleGaussianPsf(self.ksize, self.ksize, 1.5*sigma1, 1, 0.1)) crval = afwCoord.makeCoord(afwCoord.ICRS, 0.0*afwGeom.degrees, 0.0*afwGeom.degrees) wcs = afwImage.makeWcs(crval, afwGeom.PointD(0, 0), 1.0, 0, 0, 1.0) self.exposure.setWcs(wcs) ccd = cameraGeom.Ccd(cameraGeom.Id(1)) ccd.addAmp(cameraGeom.Amp(cameraGeom.Id(0), afwGeom.BoxI(afwGeom.PointI(0,0), self.exposure.getDimensions()), afwGeom.BoxI(afwGeom.PointI(0,0), afwGeom.ExtentI(0,0)), afwGeom.BoxI(afwGeom.PointI(0,0), self.exposure.getDimensions()), cameraGeom.ElectronicParams(1.0, 100.0, 65535))) self.exposure.setDetector(ccd) self.exposure.getDetector().setDistortion(None) # # Make a kernel with the exactly correct basis functions. Useful for debugging # basisKernelList = afwMath.KernelList() for sigma in (sigma1, sigma2): basisKernel = afwMath.AnalyticKernel(self.ksize, self.ksize, afwMath.GaussianFunction2D(sigma, sigma)) basisImage = afwImage.ImageD(basisKernel.getDimensions()) basisKernel.computeImage(basisImage, True) basisImage /= np.sum(basisImage.getArray()) if sigma == sigma1: basisImage0 = basisImage else: basisImage -= basisImage0 basisKernelList.append(afwMath.FixedKernel(basisImage)) order = 1 # 1 => up to linear spFunc = afwMath.PolynomialFunction2D(order) exactKernel = afwMath.LinearCombinationKernel(basisKernelList, spFunc) exactKernel.setSpatialParameters([[1.0, 0, 0], [0.0, 0.5*1e-2, 0.2e-2]]) rand = afwMath.Random() # make these tests repeatable by setting seed addNoise = True if addNoise: im = self.mi.getImage() afwMath.randomGaussianImage(im, rand) # N(0, 1) im *= sd # N(0, sd^2) del im xarr, yarr = [], [] for x, y in [(20, 20), (60, 20), (30, 35), (50, 50), (20, 90), (70, 160), (25, 265), (75, 275), (85, 30), (50, 120), (70, 80), (60, 210), (20, 210), ]: xarr.append(x) yarr.append(y) for x, y in zip(xarr, yarr): dx = rand.uniform() - 0.5 # random (centered) offsets dy = rand.uniform() - 0.5 k = exactKernel.getSpatialFunction(1)(x, y) # functional variation of Kernel ... b = (k*sigma1**2/((1 - k)*sigma2**2)) # ... converted double Gaussian's "b" #flux = 80000 - 20*x - 10*(y/float(height))**2 flux = 80000*(1 + 0.1*(rand.uniform() - 0.5)) I0 = flux*(1 + b)/(2*np.pi*(sigma1**2 + b*sigma2**2)) for iy in range(y - self.ksize//2, y + self.ksize//2 + 1): if iy < 0 or iy >= self.mi.getHeight(): continue for ix in range(x - self.ksize//2, x + self.ksize//2 + 1): if ix < 0 or ix >= self.mi.getWidth(): continue I = I0*psfVal(ix, iy, x + dx, y + dy, sigma1, sigma2, b) Isample = rand.poisson(I) if addNoise else I self.mi.getImage().set(ix, iy, self.mi.getImage().get(ix, iy) + Isample) self.mi.getVariance().set(ix, iy, self.mi.getVariance().get(ix, iy) + I) # bbox = afwGeom.BoxI(afwGeom.PointI(0,0), afwGeom.ExtentI(width, height)) self.cellSet = afwMath.SpatialCellSet(bbox, 100) self.footprintSet = afwDetection.FootprintSet(self.mi, afwDetection.Threshold(100), "DETECTED") self.catalog = SpatialModelPsfTestCase.measure(self.footprintSet, self.exposure) for source in self.catalog: try: cand = measAlg.makePsfCandidate(source, self.exposure) self.cellSet.insertCandidate(cand) except Exception, e: print e continue
conf.measurement.noiseSource = 'meta' conf.validate() proc.measurement.prefix = 'meta-' proc.measurement.run(res.exposure, res.sources) print print 'Running with "variance"' conf.measurement.noiseSource = 'variance' conf.measurement.noiseOffset = 5. conf.validate() proc.measurement.prefix = 'var-' proc.measurement.run(res.exposure, res.sources) print print 'Running with "noiseim"' proc.measurement.prefix = 'noiseim-' rand = afwMath.Random() exp = res.exposure nim = afwImage.ImageF(exp.getWidth(), exp.getHeight()) afwMath.randomGaussianImage(nim, rand) nim *= 500. nim += 200. proc.measurement.run(res.exposure, res.sources, noiseImage=nim) print print 'Running with "setnoise"' proc.measurement.prefix = 'setnoise-' proc.measurement.run(res.exposure, res.sources, noiseMeanVar=(50., 500))
def setUp(self): self.schema = afwTable.SourceTable.makeMinimalSchema() config = measBase.SingleFrameMeasurementConfig() config.algorithms.names = ["base_PixelFlags", "base_SdssCentroid", "base_GaussianFlux", "base_SdssShape", "base_CircularApertureFlux", "base_PsfFlux", ] config.algorithms["base_CircularApertureFlux"].radii = [3.0] config.slots.centroid = "base_SdssCentroid" config.slots.psfFlux = "base_PsfFlux" config.slots.apFlux = "base_CircularApertureFlux_3_0" config.slots.modelFlux = None config.slots.gaussianFlux = None config.slots.calibFlux = None config.slots.shape = "base_SdssShape" self.measureTask = measBase.SingleFrameMeasurementTask(self.schema, config=config) width, height = 110, 301 self.mi = afwImage.MaskedImageF(lsst.geom.ExtentI(width, height)) self.mi.set(0) sd = 3 # standard deviation of image self.mi.getVariance().set(sd*sd) self.mi.getMask().addMaskPlane("DETECTED") self.FWHM = 5 self.ksize = 31 # size of desired kernel sigma1 = 1.75 sigma2 = 2*sigma1 self.exposure = afwImage.makeExposure(self.mi) self.exposure.setPsf(measAlg.DoubleGaussianPsf(self.ksize, self.ksize, 1.5*sigma1, 1, 0.1)) self.exposure.setDetector(DetectorWrapper().detector) # # Make a kernel with the exactly correct basis functions. Useful for debugging # basisKernelList = [] for sigma in (sigma1, sigma2): basisKernel = afwMath.AnalyticKernel(self.ksize, self.ksize, afwMath.GaussianFunction2D(sigma, sigma)) basisImage = afwImage.ImageD(basisKernel.getDimensions()) basisKernel.computeImage(basisImage, True) basisImage /= np.sum(basisImage.getArray()) if sigma == sigma1: basisImage0 = basisImage else: basisImage -= basisImage0 basisKernelList.append(afwMath.FixedKernel(basisImage)) order = 1 # 1 => up to linear spFunc = afwMath.PolynomialFunction2D(order) exactKernel = afwMath.LinearCombinationKernel(basisKernelList, spFunc) exactKernel.setSpatialParameters([[1.0, 0, 0], [0.0, 0.5*1e-2, 0.2e-2]]) self.exactPsf = measAlg.PcaPsf(exactKernel) rand = afwMath.Random() # make these tests repeatable by setting seed addNoise = True if addNoise: im = self.mi.getImage() afwMath.randomGaussianImage(im, rand) # N(0, 1) im *= sd # N(0, sd^2) del im xarr, yarr = [], [] for x, y in [(20, 20), (60, 20), (30, 35), (50, 50), (20, 90), (70, 160), (25, 265), (75, 275), (85, 30), (50, 120), (70, 80), (60, 210), (20, 210), ]: xarr.append(x) yarr.append(y) for x, y in zip(xarr, yarr): dx = rand.uniform() - 0.5 # random (centered) offsets dy = rand.uniform() - 0.5 k = exactKernel.getSpatialFunction(1)(x, y) # functional variation of Kernel ... b = (k*sigma1**2/((1 - k)*sigma2**2)) # ... converted double Gaussian's "b" # flux = 80000 - 20*x - 10*(y/float(height))**2 flux = 80000*(1 + 0.1*(rand.uniform() - 0.5)) I0 = flux*(1 + b)/(2*np.pi*(sigma1**2 + b*sigma2**2)) for iy in range(y - self.ksize//2, y + self.ksize//2 + 1): if iy < 0 or iy >= self.mi.getHeight(): continue for ix in range(x - self.ksize//2, x + self.ksize//2 + 1): if ix < 0 or ix >= self.mi.getWidth(): continue intensity = I0*psfVal(ix, iy, x + dx, y + dy, sigma1, sigma2, b) Isample = rand.poisson(intensity) if addNoise else intensity self.mi.image[ix, iy, afwImage.LOCAL] += Isample self.mi.variance[ix, iy, afwImage.LOCAL] += intensity # bbox = lsst.geom.BoxI(lsst.geom.PointI(0, 0), lsst.geom.ExtentI(width, height)) self.cellSet = afwMath.SpatialCellSet(bbox, 100) self.footprintSet = afwDetection.FootprintSet(self.mi, afwDetection.Threshold(100), "DETECTED") self.catalog = self.measure(self.footprintSet, self.exposure) for source in self.catalog: try: cand = measAlg.makePsfCandidate(source, self.exposure) self.cellSet.insertCandidate(cand) except Exception as e: print(e) continue