コード例 #1
0
    def testIdentity(self):
        nobj = 1000
        for i in range(nobj):
            s = self.ss1.addNew()
            s.setId(i)
            s.set(afwTable.SourceTable.getCoordKey().getRa(),
                  (10 + 0.001 * i) * afwGeom.degrees)
            s.set(afwTable.SourceTable.getCoordKey().getDec(),
                  (10 + 0.001 * i) * afwGeom.degrees)

            s = self.ss2.addNew()
            s.setId(2 * nobj + i)
            # Give slight offsets for Coord testing of matches to/from catalog in checkMatchToFromCatalog()
            # Chosen such that the maximum offset (nobj*1E-7 deg = 0.36 arcsec) is within the maximum
            # distance (1 arcsec) in afwTable.matchRaDec.
            s.set(afwTable.SourceTable.getCoordKey().getRa(),
                  (10 + 0.0010001 * i) * afwGeom.degrees)
            s.set(afwTable.SourceTable.getCoordKey().getDec(),
                  (10 + 0.0010001 * i) * afwGeom.degrees)

        # Old API (pre DM-855)
        mat = afwTable.matchRaDec(self.ss1, self.ss2, 1.0 * afwGeom.arcseconds,
                                  False)
        self.assertEqual(len(mat), nobj)
        # New API
        mc = afwTable.MatchControl()
        mc.findOnlyClosest = False
        mat = afwTable.matchRaDec(self.ss1, self.ss2, 1.0 * afwGeom.arcseconds,
                                  mc)
        self.assertEqual(len(mat), nobj)

        cat = afwTable.packMatches(mat)

        mat2 = afwTable.unpackMatches(cat, self.ss1, self.ss2)

        for m1, m2, c in zip(mat, mat2, cat):
            self.assertEqual(m1.first, m2.first)
            self.assertEqual(m1.second, m2.second)
            self.assertEqual(m1.distance, m2.distance)
            self.assertEqual(m1.first.getId(), c["first"])
            self.assertEqual(m1.second.getId(), c["second"])
            self.assertEqual(m1.distance, c["distance"])

        self.checkPickle(mat, checkSlots=False)
        self.checkPickle(mat2, checkSlots=False)

        self.checkMatchToFromCatalog(mat, cat)

        if False:
            s0 = mat[0][0]
            s1 = mat[0][1]
            print(s0.getRa(), s1.getRa(), s0.getId(), s1.getId())
コード例 #2
0
    def testMismatches(self):
        """ Chech that matchRaDec works as expected when using
            the includeMismatches option
        """
        cat1 = afwTable.SourceCatalog(self.table)
        cat2 = afwTable.SourceCatalog(self.table)
        nobj = 100
        for i in range(nobj):
            s1 = cat1.addNew()
            s2 = cat2.addNew()
            s1.setId(i)
            s2.setId(i)
            s1.set(afwTable.SourceTable.getCoordKey().getRa(),
                   (10 + 0.0001 * i) * afwGeom.degrees)
            s2.set(afwTable.SourceTable.getCoordKey().getRa(),
                   (10.005 + 0.0001 * i) * afwGeom.degrees)
            s1.set(afwTable.SourceTable.getCoordKey().getDec(),
                   (10 + 0.0001 * i) * afwGeom.degrees)
            s2.set(afwTable.SourceTable.getCoordKey().getDec(),
                   (10.005 + 0.0001 * i) * afwGeom.degrees)

        for closest in (True, False):
            mc = afwTable.MatchControl()
            mc.findOnlyClosest = closest
            mc.includeMismatches = False
            matches = afwTable.matchRaDec(cat1, cat2, 1.0 * afwGeom.arcseconds,
                                          mc)
            mc.includeMismatches = True
            matchesMismatches = afwTable.matchRaDec(cat1, cat2,
                                                    1.0 * afwGeom.arcseconds,
                                                    mc)

            catMatches = afwTable.SourceCatalog(self.table)
            catMismatches = afwTable.SourceCatalog(self.table)
            for m in matchesMismatches:
                if m[1] is not None:
                    if not any(x == m[0] for x in catMatches):
                        catMatches.append(m[0])
                else:
                    catMismatches.append(m[0])
            if closest:
                self.assertEqual(len(catMatches), len(matches))
            matches2 = afwTable.matchRaDec(catMatches, cat2,
                                           1.0 * afwGeom.arcseconds, mc)
            self.assertEqual(len(matches), len(matches2))
            mc.includeMismatches = False
            noMatches = afwTable.matchRaDec(catMismatches, cat2,
                                            1.0 * afwGeom.arcseconds, mc)
            self.assertEqual(len(noMatches), 0)
コード例 #3
0
    def testMismatches(self):
        """ Chech that matchRaDec works as expected when using
            the includeMismatches option
        """
        cat1 = afwTable.SourceCatalog(self.table)
        cat2 = afwTable.SourceCatalog(self.table)
        nobj = 100
        for i in range(nobj):
            s1 = cat1.addNew()
            s2 = cat2.addNew()
            s1.setId(i)
            s2.setId(i)
            s1.set(afwTable.SourceTable.getCoordKey().getRa(),
                   (10 + 0.0001*i) * lsst.geom.degrees)
            s2.set(afwTable.SourceTable.getCoordKey().getRa(),
                   (10.005 + 0.0001*i) * lsst.geom.degrees)
            s1.set(afwTable.SourceTable.getCoordKey().getDec(),
                   (10 + 0.0001*i) * lsst.geom.degrees)
            s2.set(afwTable.SourceTable.getCoordKey().getDec(),
                   (10.005 + 0.0001*i) * lsst.geom.degrees)

        for closest in (True, False):
            mc = afwTable.MatchControl()
            mc.findOnlyClosest = closest
            mc.includeMismatches = False
            matches = afwTable.matchRaDec(
                cat1, cat2, 1.0*lsst.geom.arcseconds, mc)
            mc.includeMismatches = True
            matchesMismatches = afwTable.matchRaDec(
                cat1, cat2, 1.0*lsst.geom.arcseconds, mc)

            catMatches = afwTable.SourceCatalog(self.table)
            catMismatches = afwTable.SourceCatalog(self.table)
            for m in matchesMismatches:
                if m[1] is not None:
                    if not any(x == m[0] for x in catMatches):
                        catMatches.append(m[0])
                else:
                    catMismatches.append(m[0])
            if closest:
                self.assertEqual(len(catMatches), len(matches))
            matches2 = afwTable.matchRaDec(
                catMatches, cat2, 1.0*lsst.geom.arcseconds, mc)
            self.assertEqual(len(matches), len(matches2))
            mc.includeMismatches = False
            noMatches = afwTable.matchRaDec(
                catMismatches, cat2, 1.0*lsst.geom.arcseconds, mc)
            self.assertEqual(len(noMatches), 0)
コード例 #4
0
    def testNaNPositions(self):
        ss1 = afwTable.SourceCatalog(self.table)
        ss2 = afwTable.SourceCatalog(self.table)
        for ss in (ss1, ss2):
            ss.addNew().set(afwTable.SourceTable.getCoordKey().getRa(),
                            float('nan') * lsst.geom.radians)

            ss.addNew().set(afwTable.SourceTable.getCoordKey().getDec(),
                            float('nan') * lsst.geom.radians)

            s = ss.addNew()
            s.set(afwTable.SourceTable.getCoordKey().getRa(),
                  0.0 * lsst.geom.radians)
            s.set(afwTable.SourceTable.getCoordKey().getDec(),
                  0.0 * lsst.geom.radians)

            s = ss.addNew()
            s.set(afwTable.SourceTable.getCoordKey().getRa(),
                  float('nan') * lsst.geom.radians)
            s.set(afwTable.SourceTable.getCoordKey().getDec(),
                  float('nan') * lsst.geom.radians)

        mc = afwTable.MatchControl()
        mc.findOnlyClosest = False
        mat = afwTable.matchRaDec(ss1, ss2, 1.0*lsst.geom.arcseconds, mc)
        self.assertEqual(len(mat), 1)
コード例 #5
0
    def testDistancePrecision(self):
        """Test for precision of the calculated distance

        Check that the distance produced by matchRaDec is the same
        as the distance produced from calculating the separation
        between the matched coordinates.

        Based on DM-13891.
        """
        num = 1000  # Number of points
        radius = 0.5*lsst.geom.arcseconds  # Matching radius
        tol = 1.0e-10  # Absolute tolerance
        rng = np.random.RandomState(12345)  # I have the same combination on my luggage
        coordKey = afwTable.SourceTable.getCoordKey()
        raKey = coordKey.getRa()
        decKey = coordKey.getDec()
        for ii in range(num):
            src1 = self.ss1.addNew()
            src1.setId(ii)
            src1.set(raKey, (10 + 0.001*ii) * lsst.geom.degrees)
            src1.set(decKey, (10 + 0.001*ii) * lsst.geom.degrees)

            src2 = self.ss2.addNew()
            src2.setId(2*num + ii)
            src2.set(coordKey,
                     src1.getCoord().offset(rng.uniform(high=360)*lsst.geom.degrees,
                                            rng.uniform(high=radius.asArcseconds())*lsst.geom.arcseconds))

        matches = afwTable.matchRaDec(self.ss1, self.ss2, radius)
        dist1 = np.array([(mm.distance*lsst.geom.radians).asArcseconds() for mm in matches])
        dist2 = np.array([mm.first.getCoord().separation(mm.second.getCoord()).asArcseconds()
                         for mm in matches])
        diff = dist1 - dist2
        self.assertLess(diff.std(), tol)  # I get 4e-12
        self.assertFloatsAlmostEqual(dist1, dist2, atol=tol)
コード例 #6
0
    def testNaNPositions(self):
        ss1 = afwTable.SourceCatalog(self.table)
        ss2 = afwTable.SourceCatalog(self.table)
        for ss in (ss1, ss2):
            ss.addNew().set(afwTable.SourceTable.getCoordKey().getRa(),
                            float('nan') * afwGeom.radians)

            ss.addNew().set(afwTable.SourceTable.getCoordKey().getDec(),
                            float('nan') * afwGeom.radians)

            s = ss.addNew()
            s.set(afwTable.SourceTable.getCoordKey().getRa(),
                  0.0 * afwGeom.radians)
            s.set(afwTable.SourceTable.getCoordKey().getDec(),
                  0.0 * afwGeom.radians)

            s = ss.addNew()
            s.set(afwTable.SourceTable.getCoordKey().getRa(),
                  float('nan') * afwGeom.radians)
            s.set(afwTable.SourceTable.getCoordKey().getDec(),
                  float('nan') * afwGeom.radians)

        mc = afwTable.MatchControl()
        mc.findOnlyClosest = False
        mat = afwTable.matchRaDec(ss1, ss2, 1.0 * afwGeom.arcseconds, mc)
        self.assertEqual(len(mat), 1)
コード例 #7
0
    def makeMatches(self, refCat, srcCat, nSrc):
        for i in range(nSrc):

            refSrc = refCat.addNew()
            srcSrc = srcCat.addNew()

            raDeg, decDeg = np.random.randn(2)
            coord = afwGeom.SpherePoint(raDeg, decDeg, afwGeom.degrees)

            refSrc.set("g_flux", 10**(-0.4*18))
            refSrc.set("r_flux", 10**(-0.4*18))
            refSrc.set("resolved", False)
            refSrc.set("photometric", True)
            refSrc.setCoord(coord)

            srcSrc.setCoord(coord)
            srcSrc.set("slot_PsfFlux_instFlux", 10.)
            srcSrc.set("slot_PsfFlux_instFluxErr", 1.)
            for flag in self.sourceSelector.config.badFlags:
                srcSrc.set(flag, False)

        mc = afwTable.MatchControl()
        mc.symmetricMatch = False
        mat = afwTable.matchRaDec(refCat, srcCat, 1.0 * afwGeom.arcseconds, mc)
        self.assertEqual(len(mat), nSrc)
        return mat
コード例 #8
0
    def run(self, catalog, filterName=None):
        """!Load reference objects and match to them

        @param[in] catalog  Catalog to match to (lsst.afw.table.SourceCatalog)
        @param[in] filterName  Name of filter, for loading fluxes (str)
        @return Struct with matches (lsst.afw.table.SourceMatchVector) and
            matchMeta (lsst.meas.astrom.MatchMetadata)
        """
        circle = self.calculateCircle(catalog)
        matchMeta = self.refObjLoader.getMetadataCircle(circle.center, circle.radius, filterName)
        emptyResult = Struct(matches=[], matchMeta=matchMeta)
        sourceSelection = self.sourceSelection.run(catalog)
        if len(sourceSelection.sourceCat) == 0:
            self.log.warn("No objects selected from %d objects in source catalog", len(catalog))
            return emptyResult
        refData = self.refObjLoader.loadSkyCircle(circle.center, circle.radius, filterName)
        refCat = refData.refCat
        refSelection = self.referenceSelection.run(refCat)
        if len(refSelection.sourceCat) == 0:
            self.log.warn("No objects selected from %d objects in reference catalog", len(refCat))
            return emptyResult
        matches = afwTable.matchRaDec(refSelection.sourceCat, sourceSelection.sourceCat,
                                      self.config.matchRadius*arcseconds)
        self.log.info("Matched %d from %d/%d input and %d/%d reference sources" %
                      (len(matches), len(sourceSelection.sourceCat), len(catalog),
                       len(refSelection.sourceCat), len(refCat)))
        return Struct(matches=matches, matchMeta=matchMeta, refCat=refCat, sourceSelection=sourceSelection,
                      refSelection=refSelection)
コード例 #9
0
ファイル: sourceMatch.py プロジェクト: ziggyman/afw_old
    def testNaNPositions(self):
        ss1 = afwTable.SourceCatalog(self.table)
        ss2 = afwTable.SourceCatalog(self.table)
        for ss in (ss1, ss2):
            ss.addNew().set(afwTable.SourceTable.getCoordKey().getRa(),
                            float('nan') * afwGeom.radians)

            ss.addNew().set(afwTable.SourceTable.getCoordKey().getDec(),
                            float('nan') * afwGeom.radians)

            s = ss.addNew()
            s.set(afwTable.SourceTable.getCoordKey().getRa(),
                  0.0 * afwGeom.radians)
            s.set(afwTable.SourceTable.getCoordKey().getDec(),
                  0.0 * afwGeom.radians)

            s = ss.addNew()
            s.set(afwTable.SourceTable.getCoordKey().getRa(),
                  float('nan') * afwGeom.radians)
            s.set(afwTable.SourceTable.getCoordKey().getDec(),
                  float('nan') * afwGeom.radians)

        mat = afwTable.matchRaDec(ss1, ss2, 1.0 * afwGeom.arcseconds, False)
        self.assertEqual(len(mat), 1)
        self.checkPickle(mat)
コード例 #10
0
    def makeMatches(self, refCat, srcCat, nSrc):
        for i in range(nSrc):

            refSrc = refCat.addNew()
            srcSrc = srcCat.addNew()

            raDeg, decDeg = np.random.randn(2)
            coord = afwGeom.SpherePoint(raDeg, decDeg, afwGeom.degrees)

            refSrc.set("g_flux", 10**(-0.4*18))
            refSrc.set("r_flux", 10**(-0.4*18))
            refSrc.set("resolved", False)
            refSrc.set("photometric", True)
            refSrc.setCoord(coord)

            srcSrc.setCoord(coord)
            srcSrc.set(srcSrc.getTable().getPsfFluxKey(), 10.)
            srcSrc.set(srcSrc.getTable().getPsfFluxErrKey(), 1.)
            for flag in self.sourceSelector.config.badFlags:
                srcSrc.set(flag, False)

        mc = afwTable.MatchControl()
        mc.symmetricMatch = False
        mat = afwTable.matchRaDec(refCat, srcCat, 1.0 * afwGeom.arcseconds, mc)
        self.assertEqual(len(mat), nSrc)
        return mat
コード例 #11
0
    def testIdentity(self):
        nobj = 1000
        for i in range(nobj):
            s = self.ss1.addNew()
            s.setId(i)
            s.set(afwTable.SourceTable.getCoordKey().getRa(), (10 + 0.001*i) * afwGeom.degrees)
            s.set(afwTable.SourceTable.getCoordKey().getDec(), (10 + 0.001*i) * afwGeom.degrees)

            s = self.ss2.addNew()
            s.setId(2*nobj + i)
            s.set(afwTable.SourceTable.getCoordKey().getRa(), (10 + 0.001*i) * afwGeom.degrees)
            s.set(afwTable.SourceTable.getCoordKey().getDec(), (10 + 0.001*i) * afwGeom.degrees)

        mat = afwTable.matchRaDec(self.ss1, self.ss2, 1.0 * afwGeom.arcseconds, False)

        self.assertEqual(len(mat), nobj)

        cat = afwTable.packMatches(mat)
            
        mat2 = afwTable.unpackMatches(cat, self.ss1, self.ss2)
        
        for m1, m2, c in zip(mat, mat2, cat):
            self.assertEqual(m1.first, m2.first)
            self.assertEqual(m1.second, m2.second)
            self.assertEqual(m1.distance, m2.distance)
            self.assertEqual(m1.first.getId(), c["first"])
            self.assertEqual(m1.second.getId(), c["second"])
            self.assertEqual(m1.distance, c["distance"])

        if False:
            s0 = mat[0][0]
            s1 = mat[0][1]
            print s0.getRa(), s1.getRa(), s0.getId(), s1.getId()
コード例 #12
0
    def testIdentity(self):
        nobj = 1000
        for i in range(nobj):
            s = self.ss1.addNew()
            s.setId(i)
            s.set(afwTable.SourceTable.getCoordKey().getRa(), (10 + 0.001*i) * afwGeom.degrees)
            s.set(afwTable.SourceTable.getCoordKey().getDec(), (10 + 0.001*i) * afwGeom.degrees)

            s = self.ss2.addNew()
            s.setId(2*nobj + i)
            # Give slight offsets for Coord testing of matches to/from catalog in checkMatchToFromCatalog()
            # Chosen such that the maximum offset (nobj*1E-7 deg = 0.36 arcsec) is within the maximum
            # distance (1 arcsec) in afwTable.matchRaDec.
            s.set(afwTable.SourceTable.getCoordKey().getRa(), (10 + 0.0010001*i) * afwGeom.degrees)
            s.set(afwTable.SourceTable.getCoordKey().getDec(), (10 + 0.0010001*i) * afwGeom.degrees)

        # Old API (pre DM-855)
        mat = afwTable.matchRaDec(self.ss1, self.ss2, 1.0 * afwGeom.arcseconds, False)
        self.assertEqual(len(mat), nobj)
        # New API
        mc = afwTable.MatchControl()
        mc.findOnlyClosest = False
        mat = afwTable.matchRaDec(self.ss1, self.ss2, 1.0*afwGeom.arcseconds, mc)
        self.assertEqual(len(mat), nobj)

        cat = afwTable.packMatches(mat)

        mat2 = afwTable.unpackMatches(cat, self.ss1, self.ss2)

        for m1, m2, c in zip(mat, mat2, cat):
            self.assertEqual(m1.first, m2.first)
            self.assertEqual(m1.second, m2.second)
            self.assertEqual(m1.distance, m2.distance)
            self.assertEqual(m1.first.getId(), c["first"])
            self.assertEqual(m1.second.getId(), c["second"])
            self.assertEqual(m1.distance, c["distance"])

        self.checkPickle(mat, checkSlots=False)
        self.checkPickle(mat2, checkSlots=False)

        self.checkMatchToFromCatalog(mat, cat)

        if False:
            s0 = mat[0][0]
            s1 = mat[0][1]
            print s0.getRa(), s1.getRa(), s0.getId(), s1.getId()
コード例 #13
0
ファイル: sourceMatch.py プロジェクト: dr-guangtou/hs_hsc
    def testIdentity(self):
        nobj = 1000
        for i in range(nobj):
            s = self.ss1.addNew()
            s.setId(i)
            s.set(afwTable.SourceTable.getCoordKey().getRa(),
                  (10 + 0.001 * i) * afwGeom.degrees)
            s.set(afwTable.SourceTable.getCoordKey().getDec(),
                  (10 + 0.001 * i) * afwGeom.degrees)

            s = self.ss2.addNew()
            s.setId(2 * nobj + i)
            s.set(afwTable.SourceTable.getCoordKey().getRa(),
                  (10 + 0.001 * i) * afwGeom.degrees)
            s.set(afwTable.SourceTable.getCoordKey().getDec(),
                  (10 + 0.001 * i) * afwGeom.degrees)
        # Old API (pre DM-855)
        mat = afwTable.matchRaDec(self.ss1, self.ss2, 1.0 * afwGeom.arcseconds,
                                  False)
        self.assertEqual(len(mat), nobj)
        # New API
        mc = afwTable.MatchControl()
        mc.findOnlyClosest = False
        mat = afwTable.matchRaDec(self.ss1, self.ss2, 1.0 * afwGeom.arcseconds,
                                  mc)
        self.assertEqual(len(mat), nobj)

        cat = afwTable.packMatches(mat)

        mat2 = afwTable.unpackMatches(cat, self.ss1, self.ss2)

        for m1, m2, c in zip(mat, mat2, cat):
            self.assertEqual(m1.first, m2.first)
            self.assertEqual(m1.second, m2.second)
            self.assertEqual(m1.distance, m2.distance)
            self.assertEqual(m1.first.getId(), c["first"])
            self.assertEqual(m1.second.getId(), c["second"])
            self.assertEqual(m1.distance, c["distance"])

        self.checkPickle(mat, checkSlots=False)
        self.checkPickle(mat2, checkSlots=False)

        if False:
            s0 = mat[0][0]
            s1 = mat[0][1]
            print s0.getRa(), s1.getRa(), s0.getId(), s1.getId()
コード例 #14
0
    def run(self, butler, coaddSources, ccdInputs, coaddWcs):
        """
        """

        if len(self.config.flags) == 0:
            return

        flags = self._keys.keys()
        visitKey = ccdInputs.schema.find("visit").key
        ccdKey = ccdInputs.schema.find("ccd").key
        radius = self.config.matchRadius * afwGeom.arcseconds

        self.log.info("Propagating flags %s from inputs" % (flags, ))

        counts = dict(
            (f, numpy.zeros(len(coaddSources), dtype=int)) for f in flags)
        indices = numpy.array([s.getId() for s in coaddSources
                               ])  # Allowing for non-contiguous data

        # Accumulate counts of flags being set
        for ccdRecord in ccdInputs:
            v = ccdRecord.get(visitKey)
            c = ccdRecord.get(ccdKey)
            ccdSources = butler.get("src",
                                    run=int(v),
                                    ccd=int(c),
                                    immediate=True)
            for sourceRecord in ccdSources:
                sourceRecord.updateCoord(ccdRecord.getWcs())
            for flag in flags:
                # We assume that the flags will be relatively rare, so it is more efficient to match
                # against a subset of the input catalog for each flag than it is to match once against
                # the entire catalog.  It would be best to have built a kd-tree on coaddSources and
                # keep reusing that for the matching, but we don't have a suitable implementation.
                mc = afwTable.MatchControl()
                mc.findOnlyClosest = False
                matches = afwTable.matchRaDec(coaddSources,
                                              ccdSources[ccdSources.get(flag)],
                                              radius, mc)
                for m in matches:
                    index = (numpy.where(indices == m.first.getId()))[0][0]
                    counts[flag][index] += 1

        # Apply threshold
        for f in flags:
            key = self._keys[f]
            for s, num in zip(coaddSources, counts[f]):
                numOverlaps = len(
                    ccdInputs.subsetContaining(s.getCentroid(), coaddWcs,
                                               True))
                s.setFlag(key, bool(num > numOverlaps * self.config.flags[f]))
            self.log.info("Propagated %d sources with flag %s" %
                          (sum(s.get(key) for s in coaddSources), f))
コード例 #15
0
def getNoMatchCat(butler,
                  dataType,
                  filters=['HSC-G', 'HSC-R', 'HSC-I', 'HSC-Z', 'HSC-Y'],
                  selectSG="/tigress/garmilla/data/cosmos_sg_all.fits",
                  matchRadius=1 * afwGeom.arcseconds,
                  mode='hsc',
                  **kargs):

    mc = afwTable.MatchControl()
    mc.includeMismatches = True
    mc.findOnlyClosest = True

    sgTable = afwTable.SimpleCatalog.readFits(selectSG)
    sgTable["coord.ra"][:] = np.radians(sgTable["coord.ra"])
    sgTable["coord.dec"][:] = np.radians(sgTable["coord.dec"])

    outputCats = []
    for f in filters:
        cat = butler.fetchDataset(dataType, filterSuffix=f, filter=f, **kargs)
        if mode == 'hsc':
            schema = cat.getSchema()
        elif mode == 'hst':
            schema = sgTable.getSchema()
        outputCat = afwTable.SimpleCatalog(schema)
        suffix = _getFilterSuffix(f)
        if mode == 'hsc':
            matched = afwTable.matchRaDec(cat, sgTable, matchRadius, mc)
        elif mode == 'hst':
            matched = afwTable.matchRaDec(sgTable, cat, matchRadius, mc)
        for m1, m2, d in matched:
            if m2 is None:
                record = outputCat.addNew()
                record.assign(m1)
        outputCats.append(outputCat)

    result = outputCats[0]
    for i in range(1, len(outputCats)):
        result.extend(outputCats[i], deep=False)

    return result
コード例 #16
0
ファイル: processCcd.py プロジェクト: laurenam/pipe_tasks
    def propagateCalibFlags(self, icSources, sources, matchRadius=1):
        """Match the icSources and sources, and propagate Interesting Flags (e.g. PSF star) to the sources
        """

        if icSources is None:
            self.log.warn("Not matching icSource and Source catalogs, because the former is None")
            return
        if sources is None:
            self.log.warn("Not matching icSource and Source catalogs, because the latter is None")
            return

        self.log.info("Matching icSource and Source catalogs to propagate flags.")

        closest = False                 # return all matched objects
        matched = afwTable.matchRaDec(icSources, sources, matchRadius*afwGeom.arcseconds, closest)
        if self.config.doDeblend:
            matched = [m for m in matched if m[1].get("deblend.nchild") == 0] # if deblended, keep children
        #
        # Because we had to allow multiple matches to handle parents, we now need to
        # prune to the best matches
        #
        bestMatches = {}
        for m0, m1, d in matched:
            id0 = m0.getId()
            if bestMatches.has_key(id0):
                if d > bestMatches[id0][2]:
                    continue

            bestMatches[id0] = (m0, m1, d)

        matched = bestMatches.values()
        #
        # Check that we got it right
        #
        if len(set(m[0].getId() for m in matched)) != len(matched):
            self.log.warn("At least one icSource is matched to more than one Source")

        if len(matched) == 0:
            self.log.warn("No matches found between icSource and Source catalogs.")

        #
        # Copy over the desired flags
        #
        for ics, s, d in matched:
            s.setFlag(self.calibSourceKey, True)
            # We don't want to overwrite s's footprint with ics's; DM-407
            icsFootprint = ics.getFootprint()
            try:
                ics.setFootprint(s.getFootprint())
                s.assign(ics, self.schemaMapper)
            finally:
                ics.setFootprint(icsFootprint)
コード例 #17
0
 def processCcd(ccdSources, wcsUpdate):
     for sourceRecord in ccdSources:
         sourceRecord.updateCoord(wcsUpdate)
     for flag in flags:
         # We assume that the flags will be relatively rare, so it is more efficient to match
         # against a subset of the input catalog for each flag than it is to match once against
         # the entire catalog.  It would be best to have built a kd-tree on coaddSources and
         # keep reusing that for the matching, but we don't have a suitable implementation.
         mc = afwTable.MatchControl()
         mc.findOnlyClosest = False
         matches = afwTable.matchRaDec(coaddSources, ccdSources[ccdSources.get(flag)], radius, mc)
         for m in matches:
             index = (numpy.where(indices == m.first.getId()))[0][0]
             counts[flag][index] += 1
コード例 #18
0
def match_meas_fluxes(butler,
                      visit,
                      star_truth_summary_file,
                      flux_type='base_PsfFlux',
                      max_offset=0.1):
    flux_col = f'{flux_type}_instFlux'
    conn = sqlite3.connect(star_truth_summary_file)
    radius = lsst.geom.Angle(max_offset, lsst.geom.arcseconds)
    dfs = []
    datarefs = butler.subset('src', visit=visit)
    for i, dataref in enumerate(list(datarefs)):
        print(i)
        calib = dataref.get('calexp').getPhotoCalib()
        src = scp.get_point_sources(dataref.get('src'))
        ras = np.degrees(src.get('coord_ra'))
        decs = np.degrees(src.get('coord_dec'))
        ra_min, ra_max = min(ras), max(ras)
        dec_min, dec_max = min(decs), max(decs)
        query = f'''select * from truth_summary where
                    {ra_min} <= ra and ra <= {ra_max} and
                    {dec_min} <= dec and dec <= {dec_max}'''
        truth_df = pd.read_sql(query, conn)
        truth_cat = make_SourceCatalog(truth_df)

        matches = afw_table.matchRaDec(truth_cat, src, radius)
        num_matches = len(matches)

        ids = np.zeros(num_matches, dtype=np.int)
        offsets = np.zeros(num_matches, dtype=np.float)
        true_fluxes = np.zeros(num_matches, dtype=np.float)
        meas_fluxes = np.zeros(num_matches, dtype=np.float)
        meas_fluxerrs = np.zeros(num_matches, dtype=np.float)

        for i, match in enumerate(matches):
            ids[i] = match.first['id']
            offsets[i] = np.degrees(match.distance) * 3600 * 1000.
            true_fluxes[i] = match.first[f'flux_{band}']
            meas_fluxes[i] = calib.instFluxToNanojansky(match.second[flux_col])
            meas_fluxerrs[i] \
                = calib.instFluxToNanojansky(match.second[flux_col + 'Err'])

        dfs.append(
            pd.DataFrame(data=dict(id=ids,
                                   offset=offsets,
                                   true_flux=true_fluxes,
                                   meas_flux=meas_fluxes,
                                   meas_fluxerr=meas_fluxerrs)))
    df = pd.concat(dfs)
    return df
コード例 #19
0
    def run(self, catalog, filterName=None):
        """!Load reference objects and match to them

        @param[in] catalog  Catalog to match to (lsst.afw.table.SourceCatalog)
        @param[in] filterName  Name of filter, for loading fluxes (str)
        @return Struct with matches (lsst.afw.table.SourceMatchVector) and
            matchMeta (lsst.meas.astrom.MatchMetadata)
        """
        circle = self.calculateCircle(catalog)
        matchMeta = MatchMetadata(circle.center, circle.radius, filterName)
        refData = self.refObjLoader.loadSkyCircle(circle.center, circle.radius, filterName)
        matches = afwTable.matchRaDec(refData.refCat, catalog, self.config.matchRadius*arcseconds)
        self.log.info("Matched %d from %d input and %d reference sources" %
                      (len(matches), len(catalog), len(refData.refCat)))
        return Struct(matches=matches, matchMeta=matchMeta)
コード例 #20
0
ファイル: directMatch.py プロジェクト: lsst/meas_astrom
    def run(self, catalog, filterName=None, epoch=None):
        """Load reference objects and match to them.

        Parameters
        ----------
        catalog : `lsst.afw.table.SourceCatalog`
            Catalog to match.
        filterName : `str`
            Name of filter loading fluxes.
        epoch : `astropy.time.Time` or `None`
            Epoch to which to correct proper motion and parallax, or `None` to
            not apply such corrections.

        Returns
        -------
        result : `lsst.pipe.base.Struct`
            Result struct with components:

            ``matches``
                Matched sources with associated reference
                (`lsst.afw.table.SourceMatchVector`).
            ``matchMeta``
                Match metadata (`lsst.meas.astrom.MatchMetadata`).
        """
        if self.refObjLoader is None:
            raise RuntimeError("Running matcher task with no refObjLoader set in __ini__ or setRefObjLoader")
        circle = self.calculateCircle(catalog)
        matchMeta = self.refObjLoader.getMetadataCircle(circle.center, circle.radius, filterName, epoch=epoch)
        emptyResult = Struct(matches=[], matchMeta=matchMeta)
        sourceSelection = self.sourceSelection.run(catalog)
        if len(sourceSelection.sourceCat) == 0:
            self.log.warning("No objects selected from %d objects in source catalog", len(catalog))
            return emptyResult
        refData = self.refObjLoader.loadSkyCircle(circle.center, circle.radius, filterName, epoch=epoch)
        refCat = refData.refCat
        refSelection = self.referenceSelection.run(refCat)
        if len(refSelection.sourceCat) == 0:
            self.log.warning("No objects selected from %d objects in reference catalog", len(refCat))
            return emptyResult
        matches = afwTable.matchRaDec(refSelection.sourceCat, sourceSelection.sourceCat,
                                      self.config.matchRadius*arcseconds)
        self.log.info("Matched %d from %d/%d input and %d/%d reference sources",
                      len(matches), len(sourceSelection.sourceCat), len(catalog),
                      len(refSelection.sourceCat), len(refCat))
        return Struct(matches=matches, matchMeta=matchMeta, refCat=refCat, sourceSelection=sourceSelection,
                      refSelection=refSelection)
コード例 #21
0
    def matchSources(self, inputSources, templateSources):
        """Match sources between the input and template

        The order of the input arguments matters (because the later Wcs
        fitting assumes a particular order).

        @param inputSources: Source catalog of the input frame
        @param templateSources: Source of the target frame
        @return Match list
        """
        matches = afwTable.matchRaDec(templateSources, inputSources,
                                      self.config.matchRadius*afwGeom.arcseconds)
        self.log.info("Matching within %.1f arcsec: %d matches" % (self.config.matchRadius, len(matches)))
        self.metadata.set("MATCH_NUM", len(matches))
        if len(matches) == 0:
            raise RuntimeError("Unable to match source catalogs")
        return matches
コード例 #22
0
    def run(self, catalog, filterName=None, epoch=None):
        """Load reference objects and match to them.

        Parameters
        ----------
        catalog : `lsst.afw.table.SourceCatalog`
            Catalog to match.
        filterName : `str`
            Name of filter loading fluxes.
        epoch : `astropy.time.Time` or `None`
            Epoch to which to correct proper motion and parallax, or `None` to
            not apply such corrections.

        Returns
        -------
        result : `lsst.pipe.base.Struct`
            Result struct with components:

            ``matches``
                Matched sources with associated reference
                (`lsst.afw.table.SourceMatchVector`).
            ``matchMeta``
                Match metadata (`lsst.meas.astrom.MatchMetadata`).
        """
        if self.refObjLoader is None:
            raise RuntimeError("Running matcher task with no refObjLoader set in __ini__ or setRefObjLoader")
        circle = self.calculateCircle(catalog)
        matchMeta = self.refObjLoader.getMetadataCircle(circle.center, circle.radius, filterName, epoch=epoch)
        emptyResult = Struct(matches=[], matchMeta=matchMeta)
        sourceSelection = self.sourceSelection.run(catalog)
        if len(sourceSelection.sourceCat) == 0:
            self.log.warn("No objects selected from %d objects in source catalog", len(catalog))
            return emptyResult
        refData = self.refObjLoader.loadSkyCircle(circle.center, circle.radius, filterName, epoch=epoch)
        refCat = refData.refCat
        refSelection = self.referenceSelection.run(refCat)
        if len(refSelection.sourceCat) == 0:
            self.log.warn("No objects selected from %d objects in reference catalog", len(refCat))
            return emptyResult
        matches = afwTable.matchRaDec(refSelection.sourceCat, sourceSelection.sourceCat,
                                      self.config.matchRadius*arcseconds)
        self.log.info("Matched %d from %d/%d input and %d/%d reference sources" %
                      (len(matches), len(sourceSelection.sourceCat), len(catalog),
                       len(refSelection.sourceCat), len(refCat)))
        return Struct(matches=matches, matchMeta=matchMeta, refCat=refCat, sourceSelection=sourceSelection,
                      refSelection=refSelection)
コード例 #23
0
def main():

    # 偽データを作る
    writeFakeData()

    # 偽データを読み込む
    scat1 = loadSourceCatalog("coord1.dat")
    scat2 = loadSourceCatalog("coord2.dat")
    
    # 偽データ 1, 2 をマッチさせる
    distance = 1.0*afwGeom.arcseconds
    matches = afwTable.matchRaDec(scat1, scat2, distance)    

    # マッチした id の列を表示する
    for match in matches:
        s1, s2, dist = match
        print s1.getId(), s2.getId(), (dist*afwGeom.radians).asArcseconds()
コード例 #24
0
    def matchSources(self, inputSources, templateSources):
        """Match sources between the input and template

        The order of the input arguments matters (because the later Wcs
        fitting assumes a particular order).

        @param inputSources: Source catalog of the input frame
        @param templateSources: Source of the target frame
        @return Match list
        """
        matches = afwTable.matchRaDec(templateSources, inputSources,
                                      self.config.matchRadius*afwGeom.arcseconds)
        self.log.info("Matching within %.1f arcsec: %d matches" % (self.config.matchRadius, len(matches)))
        self.metadata.set("MATCH_NUM", len(matches))
        if len(matches) == 0:
            raise RuntimeError("Unable to match source catalogs")
        return matches
コード例 #25
0
def main():

    # write some fake data
    writeFakeData()

    # read in the fake data
    scat1 = loadSourceCatalog("coord1.dat")
    scat2 = loadSourceCatalog("coord2.dat")
    
    # do the match
    distance = 1.0*afwGeom.arcseconds
    matches = afwTable.matchRaDec(scat1, scat2, distance)    

    # print the IDs
    for match in matches:
        s1, s2, dist = match
        print s1.getId(), s2.getId(), (dist*afwGeom.radians).asArcseconds()
コード例 #26
0
    def testNaNPositions(self):
        ss1 = afwTable.SourceCatalog(self.table)
        ss2 = afwTable.SourceCatalog(self.table)
        for ss in (ss1, ss2):
            ss.addNew().set(afwTable.SourceTable.getCoordKey().getRa(), float('nan') * afwGeom.radians)

            ss.addNew().set(afwTable.SourceTable.getCoordKey().getDec(), float('nan') * afwGeom.radians)

            s = ss.addNew()
            s.set(afwTable.SourceTable.getCoordKey().getRa(), 0.0 * afwGeom.radians)
            s.set(afwTable.SourceTable.getCoordKey().getDec(), 0.0 * afwGeom.radians)

            s = ss.addNew()
            s.set(afwTable.SourceTable.getCoordKey().getRa(), float('nan') * afwGeom.radians)
            s.set(afwTable.SourceTable.getCoordKey().getDec(), float('nan') * afwGeom.radians)

        mat = afwTable.matchRaDec(ss1, ss2, 1.0 * afwGeom.arcseconds, False)
        self.assertEqual(len(mat), 1)
コード例 #27
0
    def testDistancePrecision(self):
        """Test for precision of the calculated distance

        Check that the distance produced by matchRaDec is the same
        as the distance produced from calculating the separation
        between the matched coordinates.

        Based on DM-13891.
        """
        num = 1000  # Number of points
        radius = 0.5 * afwGeom.arcseconds  # Matching radius
        tol = 1.0e-10  # Absolute tolerance
        rng = np.random.RandomState(
            12345)  # I have the same combination on my luggage
        coordKey = afwTable.SourceTable.getCoordKey()
        raKey = coordKey.getRa()
        decKey = coordKey.getDec()
        for ii in range(num):
            src1 = self.ss1.addNew()
            src1.setId(ii)
            src1.set(raKey, (10 + 0.001 * ii) * afwGeom.degrees)
            src1.set(decKey, (10 + 0.001 * ii) * afwGeom.degrees)

            src2 = self.ss2.addNew()
            src2.setId(2 * num + ii)
            src2.set(
                coordKey,
                src1.getCoord().offset(
                    rng.uniform(high=360) * afwGeom.degrees,
                    rng.uniform(high=radius.asArcseconds()) *
                    afwGeom.arcseconds))

        matches = afwTable.matchRaDec(self.ss1, self.ss2, radius)
        dist1 = np.array([(mm.distance * afwGeom.radians).asArcseconds()
                          for mm in matches])
        dist2 = np.array([
            mm.first.getCoord().separation(
                mm.second.getCoord()).asArcseconds() for mm in matches
        ])
        diff = dist1 - dist2
        self.assertLess(diff.std(), tol)  # I get 4e-12
        self.assertFloatsAlmostEqual(dist1, dist2, atol=tol)
コード例 #28
0
    def makeMatches(self, refCat, srcCat, nSrc):
        for i in range(nSrc):

            refSrc = refCat.addNew()
            srcSrc = srcCat.addNew()
            
            coord  = afwCoord.Coord(afwGeom.Point2D(*np.random.randn(2)), afwGeom.degrees)
            
            refSrc.set("g_flux", 10**(-0.4*18))
            refSrc.set("r_flux", 10**(-0.4*18))
            refSrc.set("resolved", False)
            refSrc.set("photometric", True)
            refSrc.setCoord(coord)

            srcSrc.setCoord(coord)
            srcSrc.set(srcSrc.getTable().getPsfFluxKey(), 10.)
            srcSrc.set(srcSrc.getTable().getPsfFluxErrKey(), 1.)
            for flag in self.sourceSelector.config.badFlags:
                srcSrc.set(flag, False)

        mat = afwTable.matchRaDec(refCat, srcCat, 1.0 * afwGeom.arcseconds, False)
        self.assertEqual(len(mat), nSrc)
        return mat
コード例 #29
0
ファイル: forcedPhot.py プロジェクト: mjuric/lsst-pipe_tasks
 def correctReferences(self, dataRef, references):
     self.log.info("Correcting reference positions...")
     sources = dataRef.get("src")
     matches = afwTable.matchRaDec(sources, references, self.config.radius * afwGeom.arcseconds)
     num = len(matches)
     self.log.info("%d matches between source and reference catalogs" % num)
     stats = afwMath.StatisticsControl()
     # XXX statistics parameters?
     dra, ddec = afwMath.vectorF(), afwMath.vectorF()
     dra.reserve(num)
     ddec.reserve(num)
     units = afwGeom.arcseconds
     # XXX errors in positions?
     for m in matches:
         src = m.first
         if src.getPsfFlux() < self.config.minFlux:
             continue
         ref = m.second
         offset = ref.getCoord().getTangentPlaneOffset(src.getCoord())
         dra.push_back(offset[0].asAngularUnits(units))
         ddec.push_back(offset[1].asAngularUnits(units))
     num = len(dra)
     draStats = afwMath.makeStatistics(dra, afwMath.MEANCLIP | afwMath.STDEVCLIP, stats)
     ddecStats = afwMath.makeStatistics(ddec, afwMath.MEANCLIP | afwMath.STDEVCLIP, stats)
     draMean = draStats.getValue(afwMath.MEANCLIP)
     ddecMean = ddecStats.getValue(afwMath.MEANCLIP)
     self.log.info("Offset from %d sources is dRA = %f +/- %f arcsec, dDec = %f +/- %f arcsec" %
                   (num, draMean, draStats.getValue(afwMath.STDEVCLIP), dDecMean,
                    ddecStats.getValue(afwMath.STDEVCLIP)))
     angle = math.atan2(ddecMean, draMean)*afwGeom.radians
     distance = math.hypot(draMean, ddecMean)*units
     for ref in references:
         coord = ref.getCoord()
         coord.offset(angle, distance)
         ref.setCoord(coord)
     return references
コード例 #30
0
def propagateCalibFlags(keysToCopy, calibSources, sources, matchRadius=1):
    """Match the calibSources and sources, and propagate Interesting Flags (e.g. PSF star) to the sources
    """
    if calibSources is None or sources is None:
        return

    closest = False                 # return all matched objects
    matched = afwTable.matchRaDec(calibSources, sources, matchRadius*afwGeom.arcseconds, closest)
    #
    # Because we had to allow multiple matches to handle parents, we now need to
    # prune to the best matches
    #
    bestMatches = {}
    for m0, m1, d in matched:
        id0 = m0.getId()
        if bestMatches.has_key(id0):
            if d > bestMatches[id0][2]:
                continue

        bestMatches[id0] = (m0, m1, d)

    matched = bestMatches.values()
    #
    # Check that we got it right
    #
    if len(set(m[0].getId() for m in matched)) != len(matched):
        print("At least one calibSource is matched to more than one Source")
    #
    # Copy over the desired flags
    #
    for cs, s, d in matched:
        skey, ckey = keysToCopy[0]
        s.setFlag(skey, True)

        for skey, ckey in keysToCopy[1:]:
            s.set(skey, cs.get(ckey))
コード例 #31
0
def buildXY(hscCat,
            sgTable,
            matchRadius=1 * afwGeom.arcseconds,
            includeMismatches=True,
            multiMeas=False):

    mc = afwTable.MatchControl()
    mc.includeMismatches = includeMismatches
    mc.findOnlyClosest = True

    print "Matching with HST catalog"
    matchedSG = afwTable.matchRaDec(hscCat, sgTable, matchRadius, mc)
    print "Found {0} matches with HST objects".format(len(matchedSG))

    # Build truth table
    stellar = {}
    classKey = sgTable.getSchema().find('mu.class').key
    magAutoKey = sgTable.getSchema().find('mag.auto').key
    noMatch = []
    for m1, m2, d in matchedSG:
        if m2 is None:
            noMatch.append(m1.getId())
        else:
            if not multiMeas:
                id = m2.getId()
                isStar = (m2.get(classKey) == 2)
                magAuto = m2.get(magAutoKey)
                if id not in stellar:
                    stellar[id] = [isStar, magAuto, d, m1]
                else:
                    if d < stellar[id][2]:
                        stellar[id] = [isStar, magAuto, d,
                                       m1]  # Only keep closest for now
            else:
                id = m1.getId()
                isStar = (m2.get(classKey) == 2)
                magAuto = m2.get(magAutoKey)
                stellar[id] = [isStar, magAuto, d, m1]

    if includeMismatches:
        print "{0} objects from {1} in the HSC catalog had no match in the HST catalog.".format(
            len(noMatch), len(hscCat))
        print "{0} objects from the HSC catalog with a match in the HST catalog were not the closest match.".format(
            len(matchedSG) - len(noMatch) - len(stellar))

    print "Of which I picked {0}".format(len(stellar))

    scm = createSchemaMapper(hscCat, withStellar=True)
    schema = scm.getOutputSchema()
    cat = afwTable.SourceCatalog(schema)
    cat.reserve(len(stellar))
    stellarKey = schema.find('stellar').key
    magAutoKey = schema.find('mag.auto').key

    for id in stellar:
        isStar, magAuto, d, m2 = stellar[id]
        record = cat.addNew()
        record.assign(m2, scm)
        record.set(stellarKey, isStar)
        record.set(magAutoKey, magAuto)

    if includeMismatches:
        return cat, noMatch

    return cat
コード例 #32
0
def main(rerun, dataIds, fakes, root='/lustre/Subaru/SSP', rad=10):

    doCoadd = 'tract' in dataIds[0].keys()
    butler = dafPer.Butler(os.path.join(root, "rerun", rerun))

    #read in fits file, replace with txt file or anything else
    fits = pyfits.open(fakes)
    data = fits[1].data
    radecCat = loadRaDec(data)
    ndata = len(data)
    datamask = np.ones(ndata, dtype=bool)
    ids = data["ID"] if "ID" in data.names else range(len(data))
    idDict = dict(zip(ids, xrange(ndata)))

    for dataId in dataIds:
        print dataId
        try:
            sources = butler.get('deepCoadd_src' if doCoadd else 'src',
                                 dataId,
                                 immediate=True,
                                 flags=afwTable.SOURCE_IO_NO_FOOTPRINTS)
            cal_md = butler.get('deepCoadd_md' if doCoadd else 'calexp_md',
                                dataId,
                                immediate=True)
            calexp = butler.get('deepCoadd' if doCoadd else 'calexp',
                                dataId,
                                immediate=True)
        except:
            print "skipping", dataId
            continue

        if False:
            matches = afwTable.matchRaDec(sources, radecCat,
                                          3.3 * afwGeom.arcseconds)
            for (src, fake, d) in matches:
                datamask[idDict[fake.getId()]] = False

        msk = calexp.getMaskedImage().getMask()
        detected = msk.clone()
        detected &= msk.getPlaneBitMask("DETECTED")
        wcs = calexp.getWcs()
        count, good_count = 0, 0
        for i_d, datum in enumerate(radecCat):
            pixCoord = afwGeom.Point2I(wcs.skyToPixel(datum.getCoord()))
            pixBox = afwGeom.BoxI(pixCoord, afwGeom.Extent2I(1, 1))
            pixBox.grow(rad)
            pixBox.clip(calexp.getBBox(afwImage.PARENT))
            if pixBox.isEmpty():
                continue
            else:
                count += 1
                subMask = afwImage.MaskU(detected, pixBox, afwImage.PARENT)
                if sum(subMask.getArray().ravel()) != 0:
                    datamask[i_d] = False
                else:
                    good_count += 1
        print count, good_count

    newdata = data[datamask]
    print ndata, len(newdata)
    hdu = pyfits.BinTableHDU(newdata)
    hdu.writeto('blank_sources.fits', clobber=True)
コード例 #33
0
    def run(self, sensorRef, templateIdList=None):
        """Subtract an image from a template coadd and measure the result

        Steps include:
        - warp template coadd to match WCS of image
        - PSF match image to warped template
        - subtract image from PSF-matched, warped template
        - persist difference image
        - detect sources
        - measure sources

        @param sensorRef: sensor-level butler data reference, used for the following data products:
        Input only:
        - calexp
        - psf
        - ccdExposureId
        - ccdExposureId_bits
        - self.config.coaddName + "Coadd_skyMap"
        - self.config.coaddName + "Coadd"
        Input or output, depending on config:
        - self.config.coaddName + "Diff_subtractedExp"
        Output, depending on config:
        - self.config.coaddName + "Diff_matchedExp"
        - self.config.coaddName + "Diff_src"

        @return pipe_base Struct containing these fields:
        - subtractedExposure: exposure after subtracting template;
            the unpersisted version if subtraction not run but detection run
            None if neither subtraction nor detection run (i.e. nothing useful done)
        - subtractRes: results of subtraction task; None if subtraction not run
        - sources: detected and possibly measured sources; None if detection not run
        """
        self.log.info("Processing %s" % (sensorRef.dataId))

        # initialize outputs and some intermediate products
        subtractedExposure = None
        subtractRes = None
        selectSources = None
        kernelSources = None
        controlSources = None
        diaSources = None

        # We make one IdFactory that will be used by both icSrc and src datasets;
        # I don't know if this is the way we ultimately want to do things, but at least
        # this ensures the source IDs are fully unique.
        expBits = sensorRef.get("ccdExposureId_bits")
        expId = int(sensorRef.get("ccdExposureId"))
        idFactory = afwTable.IdFactory.makeSource(expId, 64 - expBits)

        # Retrieve the science image we wish to analyze
        exposure = sensorRef.get("calexp", immediate=True)
        if self.config.doAddCalexpBackground:
            mi = exposure.getMaskedImage()
            mi += sensorRef.get("calexpBackground").getImage()
        if not exposure.hasPsf():
            raise pipeBase.TaskError("Exposure has no psf")
        sciencePsf = exposure.getPsf()

        subtractedExposureName = self.config.coaddName + "Diff_differenceExp"
        templateExposure = None  # Stitched coadd exposure
        templateSources = None  # Sources on the template image
        if self.config.doSubtract:
            template = self.getTemplate.run(exposure,
                                            sensorRef,
                                            templateIdList=templateIdList)
            templateExposure = template.exposure
            templateSources = template.sources

            # compute scienceSigmaOrig: sigma of PSF of science image before pre-convolution
            scienceSigmaOrig = sciencePsf.computeShape().getDeterminantRadius()

            # sigma of PSF of template image before warping
            templateSigma = templateExposure.getPsf().computeShape(
            ).getDeterminantRadius()

            # if requested, convolve the science exposure with its PSF
            # (properly, this should be a cross-correlation, but our code does not yet support that)
            # compute scienceSigmaPost: sigma of science exposure with pre-convolution, if done,
            # else sigma of original science exposure
            if self.config.doPreConvolve:
                convControl = afwMath.ConvolutionControl()
                # cannot convolve in place, so make a new MI to receive convolved image
                srcMI = exposure.getMaskedImage()
                destMI = srcMI.Factory(srcMI.getDimensions())
                srcPsf = sciencePsf
                if self.config.useGaussianForPreConvolution:
                    # convolve with a simplified PSF model: a double Gaussian
                    kWidth, kHeight = sciencePsf.getLocalKernel(
                    ).getDimensions()
                    preConvPsf = SingleGaussianPsf(kWidth, kHeight,
                                                   scienceSigmaOrig)
                else:
                    # convolve with science exposure's PSF model
                    preConvPsf = srcPsf
                afwMath.convolve(destMI, srcMI, preConvPsf.getLocalKernel(),
                                 convControl)
                exposure.setMaskedImage(destMI)
                scienceSigmaPost = scienceSigmaOrig * math.sqrt(2)
            else:
                scienceSigmaPost = scienceSigmaOrig

            # If requested, find sources in the image
            if self.config.doSelectSources:
                if not sensorRef.datasetExists("src"):
                    self.log.warn(
                        "Src product does not exist; running detection, measurement, selection"
                    )
                    # Run own detection and measurement; necessary in nightly processing
                    selectSources = self.subtract.getSelectSources(
                        exposure,
                        sigma=scienceSigmaPost,
                        doSmooth=not self.doPreConvolve,
                        idFactory=idFactory,
                    )
                else:
                    self.log.info("Source selection via src product")
                    # Sources already exist; for data release processing
                    selectSources = sensorRef.get("src")

                # Number of basis functions
                nparam = len(
                    makeKernelBasisList(
                        self.subtract.config.kernel.active,
                        referenceFwhmPix=scienceSigmaPost * FwhmPerSigma,
                        targetFwhmPix=templateSigma * FwhmPerSigma))

                if self.config.doAddMetrics:
                    # Modify the schema of all Sources
                    kcQa = KernelCandidateQa(nparam)
                    selectSources = kcQa.addToSchema(selectSources)

                if self.config.kernelSourcesFromRef:
                    # match exposure sources to reference catalog
                    astromRet = self.astrometer.loadAndMatch(
                        exposure=exposure, sourceCat=selectSources)
                    matches = astromRet.matches
                elif templateSources:
                    # match exposure sources to template sources
                    mc = afwTable.MatchControl()
                    mc.findOnlyClosest = False
                    matches = afwTable.matchRaDec(templateSources,
                                                  selectSources,
                                                  1.0 * afwGeom.arcseconds, mc)
                else:
                    raise RuntimeError(
                        "doSelectSources=True and kernelSourcesFromRef=False,"
                        +
                        "but template sources not available. Cannot match science "
                        +
                        "sources with template sources. Run process* on data from "
                        + "which templates are built.")

                kernelSources = self.sourceSelector.selectStars(
                    exposure, selectSources, matches=matches).starCat

                random.shuffle(kernelSources, random.random)
                controlSources = kernelSources[::self.config.controlStepSize]
                kernelSources = [
                    k for i, k in enumerate(kernelSources)
                    if i % self.config.controlStepSize
                ]

                if self.config.doSelectDcrCatalog:
                    redSelector = DiaCatalogSourceSelectorTask(
                        DiaCatalogSourceSelectorConfig(
                            grMin=self.sourceSelector.config.grMax,
                            grMax=99.999))
                    redSources = redSelector.selectStars(
                        exposure, selectSources, matches=matches).starCat
                    controlSources.extend(redSources)

                    blueSelector = DiaCatalogSourceSelectorTask(
                        DiaCatalogSourceSelectorConfig(
                            grMin=-99.999,
                            grMax=self.sourceSelector.config.grMin))
                    blueSources = blueSelector.selectStars(
                        exposure, selectSources, matches=matches).starCat
                    controlSources.extend(blueSources)

                if self.config.doSelectVariableCatalog:
                    varSelector = DiaCatalogSourceSelectorTask(
                        DiaCatalogSourceSelectorConfig(includeVariable=True))
                    varSources = varSelector.selectStars(
                        exposure, selectSources, matches=matches).starCat
                    controlSources.extend(varSources)

                self.log.info(
                    "Selected %d / %d sources for Psf matching (%d for control sample)"
                    % (len(kernelSources), len(selectSources),
                       len(controlSources)))
            allresids = {}
            if self.config.doUseRegister:
                self.log.info("Registering images")

                if templateSources is None:
                    # Run detection on the template, which is
                    # temporarily background-subtracted
                    templateSources = self.subtract.getSelectSources(
                        templateExposure,
                        sigma=templateSigma,
                        doSmooth=True,
                        idFactory=idFactory)

                # Third step: we need to fit the relative astrometry.
                #
                wcsResults = self.fitAstrometry(templateSources,
                                                templateExposure,
                                                selectSources)
                warpedExp = self.register.warpExposure(templateExposure,
                                                       wcsResults.wcs,
                                                       exposure.getWcs(),
                                                       exposure.getBBox())
                templateExposure = warpedExp

                # Create debugging outputs on the astrometric
                # residuals as a function of position.  Persistence
                # not yet implemented; expected on (I believe) #2636.
                if self.config.doDebugRegister:
                    # Grab matches to reference catalog
                    srcToMatch = {x.second.getId(): x.first for x in matches}

                    refCoordKey = wcsResults.matches[0].first.getTable(
                    ).getCoordKey()
                    inCentroidKey = wcsResults.matches[0].second.getTable(
                    ).getCentroidKey()
                    sids = [m.first.getId() for m in wcsResults.matches]
                    positions = [
                        m.first.get(refCoordKey) for m in wcsResults.matches
                    ]
                    residuals = [
                        m.first.get(refCoordKey).getOffsetFrom(
                            wcsResults.wcs.pixelToSky(
                                m.second.get(inCentroidKey)))
                        for m in wcsResults.matches
                    ]
                    allresids = dict(zip(sids, zip(positions, residuals)))

                    cresiduals = [
                        m.first.get(refCoordKey).getTangentPlaneOffset(
                            wcsResults.wcs.pixelToSky(
                                m.second.get(inCentroidKey)))
                        for m in wcsResults.matches
                    ]
                    colors = numpy.array([
                        -2.5 * numpy.log10(srcToMatch[x].get("g")) +
                        2.5 * numpy.log10(srcToMatch[x].get("r")) for x in sids
                        if x in srcToMatch.keys()
                    ])
                    dlong = numpy.array([
                        r[0].asArcseconds() for s, r in zip(sids, cresiduals)
                        if s in srcToMatch.keys()
                    ])
                    dlat = numpy.array([
                        r[1].asArcseconds() for s, r in zip(sids, cresiduals)
                        if s in srcToMatch.keys()
                    ])
                    idx1 = numpy.where(
                        colors < self.sourceSelector.config.grMin)
                    idx2 = numpy.where(
                        (colors >= self.sourceSelector.config.grMin)
                        & (colors <= self.sourceSelector.config.grMax))
                    idx3 = numpy.where(
                        colors > self.sourceSelector.config.grMax)
                    rms1Long = IqrToSigma * \
                        (numpy.percentile(dlong[idx1], 75)-numpy.percentile(dlong[idx1], 25))
                    rms1Lat = IqrToSigma * (numpy.percentile(dlat[idx1], 75) -
                                            numpy.percentile(dlat[idx1], 25))
                    rms2Long = IqrToSigma * \
                        (numpy.percentile(dlong[idx2], 75)-numpy.percentile(dlong[idx2], 25))
                    rms2Lat = IqrToSigma * (numpy.percentile(dlat[idx2], 75) -
                                            numpy.percentile(dlat[idx2], 25))
                    rms3Long = IqrToSigma * \
                        (numpy.percentile(dlong[idx3], 75)-numpy.percentile(dlong[idx3], 25))
                    rms3Lat = IqrToSigma * (numpy.percentile(dlat[idx3], 75) -
                                            numpy.percentile(dlat[idx3], 25))
                    self.log.info("Blue star offsets'': %.3f %.3f, %.3f %.3f" %
                                  (numpy.median(dlong[idx1]), rms1Long,
                                   numpy.median(dlat[idx1]), rms1Lat))
                    self.log.info(
                        "Green star offsets'': %.3f %.3f, %.3f %.3f" %
                        (numpy.median(dlong[idx2]), rms2Long,
                         numpy.median(dlat[idx2]), rms2Lat))
                    self.log.info("Red star offsets'': %.3f %.3f, %.3f %.3f" %
                                  (numpy.median(dlong[idx3]), rms3Long,
                                   numpy.median(dlat[idx3]), rms3Lat))

                    self.metadata.add("RegisterBlueLongOffsetMedian",
                                      numpy.median(dlong[idx1]))
                    self.metadata.add("RegisterGreenLongOffsetMedian",
                                      numpy.median(dlong[idx2]))
                    self.metadata.add("RegisterRedLongOffsetMedian",
                                      numpy.median(dlong[idx3]))
                    self.metadata.add("RegisterBlueLongOffsetStd", rms1Long)
                    self.metadata.add("RegisterGreenLongOffsetStd", rms2Long)
                    self.metadata.add("RegisterRedLongOffsetStd", rms3Long)

                    self.metadata.add("RegisterBlueLatOffsetMedian",
                                      numpy.median(dlat[idx1]))
                    self.metadata.add("RegisterGreenLatOffsetMedian",
                                      numpy.median(dlat[idx2]))
                    self.metadata.add("RegisterRedLatOffsetMedian",
                                      numpy.median(dlat[idx3]))
                    self.metadata.add("RegisterBlueLatOffsetStd", rms1Lat)
                    self.metadata.add("RegisterGreenLatOffsetStd", rms2Lat)
                    self.metadata.add("RegisterRedLatOffsetStd", rms3Lat)

            # warp template exposure to match exposure,
            # PSF match template exposure to exposure,
            # then return the difference

            # Return warped template...  Construct sourceKernelCand list after subtract
            self.log.info("Subtracting images")
            subtractRes = self.subtract.subtractExposures(
                templateExposure=templateExposure,
                scienceExposure=exposure,
                candidateList=kernelSources,
                convolveTemplate=self.config.convolveTemplate,
                doWarping=not self.config.doUseRegister)
            subtractedExposure = subtractRes.subtractedExposure

            if self.config.doWriteMatchedExp:
                sensorRef.put(subtractRes.matchedExposure,
                              self.config.coaddName + "Diff_matchedExp")

        if self.config.doDetection:
            self.log.info("Computing diffim PSF")
            if subtractedExposure is None:
                subtractedExposure = sensorRef.get(subtractedExposureName)

            # Get Psf from the appropriate input image if it doesn't exist
            if not subtractedExposure.hasPsf():
                if self.config.convolveTemplate:
                    subtractedExposure.setPsf(exposure.getPsf())
                else:
                    if templateExposure is None:
                        template = self.getTemplate.run(
                            exposure, sensorRef, templateIdList=templateIdList)
                    subtractedExposure.setPsf(template.exposure.getPsf())

        # If doSubtract is False, then subtractedExposure was fetched from disk (above), thus it may have
        # already been decorrelated. Thus, we do not do decorrelation if doSubtract is False.
        if self.config.doDecorrelation and self.config.doSubtract:
            decorrResult = self.decorrelate.run(exposure, templateExposure,
                                                subtractedExposure,
                                                subtractRes.psfMatchingKernel)
            subtractedExposure = decorrResult.correctedExposure

        if self.config.doDetection:
            self.log.info("Running diaSource detection")
            # Erase existing detection mask planes
            mask = subtractedExposure.getMaskedImage().getMask()
            mask &= ~(mask.getPlaneBitMask("DETECTED")
                      | mask.getPlaneBitMask("DETECTED_NEGATIVE"))

            table = afwTable.SourceTable.make(self.schema, idFactory)
            table.setMetadata(self.algMetadata)
            results = self.detection.makeSourceCatalog(
                table=table,
                exposure=subtractedExposure,
                doSmooth=not self.config.doPreConvolve)

            if self.config.doMerge:
                fpSet = results.fpSets.positive
                fpSet.merge(results.fpSets.negative, self.config.growFootprint,
                            self.config.growFootprint, False)
                diaSources = afwTable.SourceCatalog(table)
                fpSet.makeSources(diaSources)
                self.log.info("Merging detections into %d sources" %
                              (len(diaSources)))
            else:
                diaSources = results.sources

            if self.config.doMeasurement:
                self.log.info("Running diaSource measurement")
                if not self.config.doDipoleFitting:
                    self.measurement.run(diaSources, subtractedExposure)
                else:
                    if self.config.doSubtract:
                        self.measurement.run(diaSources, subtractedExposure,
                                             exposure,
                                             subtractRes.matchedExposure)
                    else:
                        self.measurement.run(diaSources, subtractedExposure,
                                             exposure)

            # Match with the calexp sources if possible
            if self.config.doMatchSources:
                if sensorRef.datasetExists("src"):
                    # Create key,val pair where key=diaSourceId and val=sourceId
                    matchRadAsec = self.config.diaSourceMatchRadius
                    matchRadPixel = matchRadAsec / exposure.getWcs(
                    ).pixelScale().asArcseconds()

                    srcMatches = afwTable.matchXy(sensorRef.get("src"),
                                                  diaSources, matchRadPixel)
                    srcMatchDict = dict([(srcMatch.second.getId(),
                                          srcMatch.first.getId())
                                         for srcMatch in srcMatches])
                    self.log.info("Matched %d / %d diaSources to sources" %
                                  (len(srcMatchDict), len(diaSources)))
                else:
                    self.log.warn(
                        "Src product does not exist; cannot match with diaSources"
                    )
                    srcMatchDict = {}

                # Create key,val pair where key=diaSourceId and val=refId
                refAstromConfig = AstrometryConfig()
                refAstromConfig.matcher.maxMatchDistArcSec = matchRadAsec
                refAstrometer = AstrometryTask(refAstromConfig)
                astromRet = refAstrometer.run(exposure=exposure,
                                              sourceCat=diaSources)
                refMatches = astromRet.matches
                if refMatches is None:
                    self.log.warn(
                        "No diaSource matches with reference catalog")
                    refMatchDict = {}
                else:
                    self.log.info(
                        "Matched %d / %d diaSources to reference catalog" %
                        (len(refMatches), len(diaSources)))
                    refMatchDict = dict([(refMatch.second.getId(),
                                          refMatch.first.getId())
                                         for refMatch in refMatches])

                # Assign source Ids
                for diaSource in diaSources:
                    sid = diaSource.getId()
                    if sid in srcMatchDict:
                        diaSource.set("srcMatchId", srcMatchDict[sid])
                    if sid in refMatchDict:
                        diaSource.set("refMatchId", refMatchDict[sid])

            if diaSources is not None and self.config.doWriteSources:
                sensorRef.put(diaSources,
                              self.config.coaddName + "Diff_diaSrc")

            if self.config.doAddMetrics and self.config.doSelectSources:
                self.log.info("Evaluating metrics and control sample")

                kernelCandList = []
                for cell in subtractRes.kernelCellSet.getCellList():
                    for cand in cell.begin(False):  # include bad candidates
                        kernelCandList.append(cand)

                # Get basis list to build control sample kernels
                basisList = kernelCandList[0].getKernel(
                    KernelCandidateF.ORIG).getKernelList()

                controlCandList = \
                    diffimTools.sourceTableToCandidateList(controlSources,
                                                           subtractRes.warpedExposure, exposure,
                                                           self.config.subtract.kernel.active,
                                                           self.config.subtract.kernel.active.detectionConfig,
                                                           self.log, doBuild=True, basisList=basisList)

                kcQa.apply(kernelCandList,
                           subtractRes.psfMatchingKernel,
                           subtractRes.backgroundModel,
                           dof=nparam)
                kcQa.apply(controlCandList, subtractRes.psfMatchingKernel,
                           subtractRes.backgroundModel)

                if self.config.doDetection:
                    kcQa.aggregate(selectSources, self.metadata, allresids,
                                   diaSources)
                else:
                    kcQa.aggregate(selectSources, self.metadata, allresids)

                sensorRef.put(selectSources,
                              self.config.coaddName + "Diff_kernelSrc")

        if self.config.doWriteSubtractedExp:
            sensorRef.put(subtractedExposure, subtractedExposureName)

        self.runDebug(exposure, subtractRes, selectSources, kernelSources,
                      diaSources)
        return pipeBase.Struct(
            subtractedExposure=subtractedExposure,
            subtractRes=subtractRes,
            sources=diaSources,
        )
コード例 #34
0
def matchCats(cat1,
              cat2,
              matchRadius=1 * afwGeom.arcseconds,
              includeMismatches=False,
              multiMeas=False,
              suffix='.2'):
    """
    Match to catalogs and return a catalog with the fields of the two catalogs
    """

    mc = afwTable.MatchControl()
    mc.includeMismatches = includeMismatches
    mc.findOnlyClosest = True

    matched = afwTable.matchRaDec(cat1, cat2, matchRadius, mc)

    haveCentroid = {}
    for m1, m2, d in matched:
        haveCentroid[m1.getId()] = (m1, m2, d)

    bestMatches = {}
    if includeMismatches:
        noMatch = []
    for m1, m2, d in matched:
        if m2 is None:
            noMatch.append(m1)
        else:
            if not multiMeas:
                id2 = m2.getId()
                if id2 not in bestMatches:
                    bestMatches[id2] = (m1, m2, d)
                else:
                    if d < bestMatches[id2][2]:
                        bestMatches[id2] = (m1, m2, d)
            else:
                id1 = m1.getId()
                bestMatches[id1] = (m1, m2, d)

    if includeMismatches:
        print "{0} objects from {1} in the first catalog had no match in the second catalog.".format(
            len(noMatch), len(cat1))
        print "{0} objects from the first catalog with a match in the second catalog were not the closest match.".format(
            len(matched) - len(noMatch) - len(bestMatches))

    if includeMismatches and not multiMeas:
        nMatches = len(cat1)
        print "I found {0} matches".format(len(bestMatches))
    else:
        nMatches = len(bestMatches)
        print "I found {0} matches".format(nMatches)

    schema1 = cat1.getSchema()
    schema2 = cat2.getSchema()
    names1 = cat1.schema.getNames()
    names2 = cat2.schema.getNames()

    schema = afwTable.SimpleTable.makeMinimalSchema()

    catKeys = []
    cat1Keys = []
    cat2Keys = []
    for name in names1:
        cat1Keys.append(schema1.find(name).getKey())
        if name not in ['id', 'coord']:
            catKeys.append(schema.addField(schema1.find(name).getField()))
        else:
            catKeys.append(schema.find(name).getKey())
    for name in names2:
        cat2Keys.append(schema2.find(name).getKey())
        if name not in schema1.getNames():
            catKeys.append(schema.addField(schema2.find(name).getField()))
        else:
            catKeys.append(
                schema.addField(
                    schema2.find(name).getField().copyRenamed(name + suffix)))

    cat = afwTable.SimpleCatalog(schema)
    cat.reserve(nMatches)

    if includeMismatches and not multiMeas:
        for m1 in cat1:
            id1 = m1.getId()
            record = cat.addNew()
            for i in range(len(cat1Keys)):
                record.set(catKeys[i], m1.get(cat1Keys[i]))
            if id1 in haveCentroid:
                m2 = haveCentroid[id1][1]
                if m2 is not None:
                    id2 = m2.getId()
                    if id2 in bestMatches:
                        if bestMatches[id2][0] == m1:
                            for i in range(len(cat1Keys), len(catKeys)):
                                record.set(catKeys[i],
                                           m2.get(cat2Keys[i - len(cat1Keys)]))
                    else:
                        raise RunTimeError(
                            "If an object in the second catalog has a match it has to be in bestMatches"
                        )
    else:
        for id in bestMatches:
            m1, m2, d = bestMatches[id]
            record = cat.addNew()
            for i in range(len(cat1Keys)):
                record.set(catKeys[i], m1.get(cat1Keys[i]))
            for i in range(len(cat1Keys), len(catKeys)):
                record.set(catKeys[i], m2.get(cat2Keys[i - len(cat1Keys)]))

    return cat
コード例 #35
0
def strictMatch(cat1,
                cat2,
                matchRadius=1 * afwGeom.arcseconds,
                includeMismatches=True,
                multiMeas=False):
    """
    Match two catalogs using a one to one relation where each match is the closest
    object
    """

    mc = afwTable.MatchControl()
    mc.includeMismatches = includeMismatches
    mc.findOnlyClosest = True

    #matched = afwTable.matchRaDec(cat1, cat2, matchRadius, True)
    matched = afwTable.matchRaDec(cat1, cat2, matchRadius, mc)

    bestMatches = {}
    noMatch = []
    for m1, m2, d in matched:
        if m2 is None:
            noMatch.append(m1)
        else:
            if not multiMeas:
                id = m2.getId()
                if id not in bestMatches:
                    bestMatches[id] = (m1, m2, d)
                else:
                    if d < bestMatches[id][2]:
                        bestMatches[id] = (m1, m2, d)
            else:
                id = m1.getId()
                bestMatches[id] = (m1, m2, d)

    if includeMismatches:
        print "{0} objects from {1} in the first catalog had no match in the second catalog.".format(
            len(noMatch), len(cat1))
        print "{0} objects from the first catalog with a match in the second catalog were not the closest match.".format(
            len(matched) - len(noMatch) - len(bestMatches))

    scm = createSchemaMapper(cat1, cat2)
    schema = scm.getOutputSchema()
    cat = afwTable.SimpleCatalog(schema)
    cat.reserve(len(bestMatches))
    cat2Fields = []
    cat2Keys = []
    catKeys = []
    schema2 = cat2.getSchema()
    suffixes = getCatSuffixes(cat2)
    for suffix in suffixes:
        cat2Fields.extend(schema2.extract("*" + suffix).keys())
    for f in cat2Fields:
        cat2Keys.append(schema2.find(f).key)
        catKeys.append(schema.find(f).key)
    for id in bestMatches:
        m1, m2, d = bestMatches[id]
        record = cat.addNew()
        record.assign(m1, scm)
        for i in range(len(cat2Keys)):
            record.set(catKeys[i], m2.get(cat2Keys[i]))
    return cat
コード例 #36
0
    def run(self, butler, coaddSources, ccdInputs, coaddWcs):
        """!Propagate flags from individual visit measurements to coadd

        This requires matching the coadd source catalog to each of the catalogs
        from the inputs, and thresholding on the number of times a source is
        flagged on the input catalog.  The threshold is made on the relative
        occurrence of the flag in each source.  Flagging a source that is always
        flagged in inputs corresponds to a threshold of 1, while flagging a
        source that is flagged in any of the input corresponds to a threshold of
        0.  But neither of these extrema are really useful in practise.

        Setting the threshold too high means that sources that are not consistently
        flagged (e.g., due to chip gaps) will not have the flag propagated.  Setting
        that threshold too low means that random sources which are falsely flagged in
        the inputs will start to dominate.  If in doubt, we suggest making this threshold
        relatively low, but not zero (e.g., 0.1 to 0.2 or so).  The more confidence in
        the quality of the flagging, the lower the threshold can be.

        The relative occurrence accounts for the edge of the field-of-view of
        the camera, but does not include chip gaps, bad or saturated pixels, etc.

        @param[in] butler  Data butler, for retrieving the input source catalogs
        @param[in,out] coaddSources  Source catalog from the coadd
        @param[in] ccdInputs  Table of CCDs that contribute to the coadd
        @param[in] coaddWcs  Wcs for coadd
        """
        if len(self.config.flags) == 0:
            return

        flags = self._keys.keys()
        visitKey = ccdInputs.schema.find("visit").key
        ccdKey = ccdInputs.schema.find("ccd").key
        radius = self.config.matchRadius*afwGeom.arcseconds

        self.log.info("Propagating flags %s from inputs" % (flags,))

        counts = dict((f, numpy.zeros(len(coaddSources), dtype=int)) for f in flags)
        indices = numpy.array([s.getId() for s in coaddSources]) # Allowing for non-contiguous data

        # Accumulate counts of flags being set
        for ccdRecord in ccdInputs:
            v = ccdRecord.get(visitKey)
            c = ccdRecord.get(ccdKey)
            ccdSources = butler.get("src", visit=int(v), ccd=int(c), immediate=True)
            for sourceRecord in ccdSources:
                sourceRecord.updateCoord(ccdRecord.getWcs())
            for flag in flags:
                # We assume that the flags will be relatively rare, so it is more efficient to match
                # against a subset of the input catalog for each flag than it is to match once against
                # the entire catalog.  It would be best to have built a kd-tree on coaddSources and
                # keep reusing that for the matching, but we don't have a suitable implementation.
                matches = afwTable.matchRaDec(coaddSources, ccdSources[ccdSources.get(flag)], radius, False)
                for m in matches:
                    index = (numpy.where(indices == m.first.getId()))[0][0]
                    counts[flag][index] += 1

        # Apply threshold
        for f in flags:
            key = self._keys[f]
            for s, num in zip(coaddSources, counts[f]):
                numOverlaps = len(ccdInputs.subsetContaining(s.getCentroid(), coaddWcs, True))
                s.setFlag(key, bool(num > numOverlaps*self.config.flags[f]))
            self.log.info("Propagated %d sources with flag %s" % (sum(s.get(key) for s in coaddSources), f))
コード例 #37
0
    def run(self, butler, coaddSources, ccdInputs, coaddWcs):
        """!Propagate flags from individual visit measurements to coadd

        This requires matching the coadd source catalog to each of the catalogs
        from the inputs, and thresholding on the number of times a source is
        flagged on the input catalog.  The threshold is made on the relative
        occurrence of the flag in each source.  Flagging a source that is always
        flagged in inputs corresponds to a threshold of 1, while flagging a
        source that is flagged in any of the input corresponds to a threshold of
        0.  But neither of these extrema are really useful in practise.

        Setting the threshold too high means that sources that are not consistently
        flagged (e.g., due to chip gaps) will not have the flag propagated.  Setting
        that threshold too low means that random sources which are falsely flagged in
        the inputs will start to dominate.  If in doubt, we suggest making this threshold
        relatively low, but not zero (e.g., 0.1 to 0.2 or so).  The more confidence in
        the quality of the flagging, the lower the threshold can be.

        The relative occurrence accounts for the edge of the field-of-view of
        the camera, but does not include chip gaps, bad or saturated pixels, etc.

        @param[in] butler  Data butler, for retrieving the input source catalogs
        @param[in,out] coaddSources  Source catalog from the coadd
        @param[in] ccdInputs  Table of CCDs that contribute to the coadd
        @param[in] coaddWcs  Wcs for coadd
        """
        if len(self.config.flags) == 0:
            return

        flags = self._keys.keys()
        visitKey = ccdInputs.schema.find("visit").key
        ccdKey = ccdInputs.schema.find("ccd").key
        radius = self.config.matchRadius * afwGeom.arcseconds

        self.log.info("Propagating flags %s from inputs" % (flags, ))

        counts = dict(
            (f, numpy.zeros(len(coaddSources), dtype=int)) for f in flags)
        indices = numpy.array([s.getId() for s in coaddSources
                               ])  # Allowing for non-contiguous data

        # Accumulate counts of flags being set
        for ccdRecord in ccdInputs:
            v = ccdRecord.get(visitKey)
            c = ccdRecord.get(ccdKey)
            ccdSources = butler.get("src",
                                    visit=int(v),
                                    ccd=int(c),
                                    immediate=True)
            for sourceRecord in ccdSources:
                sourceRecord.updateCoord(ccdRecord.getWcs())
            for flag in flags:
                # We assume that the flags will be relatively rare, so it is more efficient to match
                # against a subset of the input catalog for each flag than it is to match once against
                # the entire catalog.  It would be best to have built a kd-tree on coaddSources and
                # keep reusing that for the matching, but we don't have a suitable implementation.
                mc = afwTable.MatchControl()
                mc.findOnlyClosest = False
                matches = afwTable.matchRaDec(coaddSources,
                                              ccdSources[ccdSources.get(flag)],
                                              radius, mc)
                for m in matches:
                    index = (numpy.where(indices == m.first.getId()))[0][0]
                    counts[flag][index] += 1

        # Apply threshold
        for f in flags:
            key = self._keys[f]
            for s, num in zip(coaddSources, counts[f]):
                numOverlaps = len(
                    ccdInputs.subsetContaining(s.getCentroid(), coaddWcs,
                                               True))
                s.setFlag(key, bool(num > numOverlaps * self.config.flags[f]))
            self.log.info("Propagated %d sources with flag %s" %
                          (sum(s.get(key) for s in coaddSources), f))
コード例 #38
0
        new_rec.set(name, record[name])
    new_rec.set('mag_{}'.format(filter_),
                calib.getMagnitude(record['modelfit_CModel_flux']))

# Read in the galaxy catalog data.
with warnings.catch_warnings():
    warnings.filterwarnings('ignore')
    gc = GCRCatalogs.load_catalog('proto-dc2_v2.1.2_test')

# Create a SourceCatalog from the gc data, restricting to the
# tract/patch being considered.
galaxy_catalog = patch_selector(gc, band=filter_, max_mag=mag_max)

# Find positional matches within 100 milliarcseconds.
radius = afw_geom.Angle(0.1, afw_geom.arcseconds)
matches = afw_table.matchRaDec(drp_catalog, galaxy_catalog, radius)

# Compare magnitudes for matched objects.
drp_mag = np.zeros(len(matches), dtype=np.float)
gc_mag = np.zeros(len(matches), dtype=np.float)
sep = np.zeros(len(matches), dtype=np.float)
# Arrays for a quiver plot.
u = np.zeros(len(matches), dtype=np.float)
v = np.zeros(len(matches), dtype=np.float)
for i, match in enumerate(matches):
    drp_mag[i] = match.first['mag_{}'.format(filter_)]
    gc_mag[i] = match.second['mag_{}'.format(filter_)]
    sep[i] = np.degrees(match.distance) * 3600. * 1000.
    u[i] = match.first['coord_ra'] - match.second['coord_ra']
    v[i] = match.first['coord_dec'] - match.second['coord_dec']
コード例 #39
0
ファイル: sourceMatch.py プロジェクト: ziggyman/afw_old
    def testPhotometricCalib(self):
        """Test matching the CFHT catalogue (as generated using LSST code) to the SDSS catalogue"""

        band = 2  # SDSS r

        #
        # Read SDSS catalogue
        #
        ifd = open(
            os.path.join(lsst.utils.getPackageDir("afwdata"), "CFHT", "D2",
                         "sdss.dat"), "r")

        sdss = afwTable.SourceCatalog(self.table)
        sdssSecondary = afwTable.SourceCatalog(self.table)

        PRIMARY, SECONDARY = 1, 2  # values of mode

        id = 0
        for line in ifd.readlines():
            if re.search(r"^\s*#", line):
                continue

            fields = line.split()
            objId = int(fields[0])
            fields[1]
            mode = int(fields[2])
            ra, dec = [float(f) for f in fields[3:5]]
            psfMags = [float(f) for f in fields[5:]]

            if mode == PRIMARY:
                s = sdss.addNew()
            elif SECONDARY:
                s = sdssSecondary.addNew()

            s.setId(objId)
            s.setRa(ra * afwGeom.degrees)
            s.setDec(dec * afwGeom.degrees)
            s.set(self.table.getPsfFluxKey(), psfMags[band])

        del ifd
        #
        # Read catalalogue built from the template image
        #
        #
        # Read SDSS catalogue
        #
        ifd = open(
            os.path.join(lsst.utils.getPackageDir("afwdata"), "CFHT", "D2",
                         "template.dat"), "r")

        template = afwTable.SourceCatalog(self.table)

        id = 0
        for line in ifd.readlines():
            if re.search(r"^\s*#", line):
                continue

            fields = line.split()
            id, flags = [int(f) for f in fields[0:2]]
            ra, dec = [float(f) for f in fields[2:4]]
            flux = [float(f) for f in fields[4:]]

            if flags & 0x1:  # EDGE
                continue

            s = template.addNew()
            s.setId(id)
            id += 1
            s.set(afwTable.SourceTable.getCoordKey().getRa(),
                  ra * afwGeom.degrees)
            s.set(afwTable.SourceTable.getCoordKey().getDec(),
                  dec * afwGeom.degrees)
            s.set(self.table.getPsfFluxKey(), flux[0])

        del ifd
        #
        # Actually do the match
        #
        matches = afwTable.matchRaDec(sdss, template, 1.0 * afwGeom.arcseconds,
                                      False)

        self.assertEqual(len(matches), 901)
        self.checkPickle(matches)

        if False:
            for mat in matches:
                s0 = mat[0]
                s1 = mat[1]
                d = mat[2]
                print s0.getRa(), s0.getDec(), s1.getRa(), s1.getDec(
                ), s0.getPsfFlux(), s1.getPsfFlux()
        #
        # Actually do the match
        #
        for s in sdssSecondary:
            sdss.append(s)

        matches = afwTable.matchRaDec(sdss, 1.0 * afwGeom.arcseconds, False)
        nmiss = 1  # one object doesn't match
        self.assertEqual(len(matches), len(sdssSecondary) - nmiss)
        self.checkPickle(matches)
        #
        # Find the one that didn't match
        #
        if False:
            matchIds = set()
            for s0, s1, d in matches:
                matchIds.add(s0.getId())
                matchIds.add(s1.getId())

            for s in sdssSecondary:
                if s.getId() not in matchIds:
                    print "RHL", s.getId()

        matches = afwTable.matchRaDec(sdss, 1.0 * afwGeom.arcseconds, True)
        self.assertEqual(len(matches), 2 * (len(sdssSecondary) - nmiss))
        self.checkPickle(matches)

        if False:
            for mat in matches:
                s0 = mat[0]
                s1 = mat[1]
                mat[2]
                print s0.getId(), s1.getId(), s0.getRa(), s0.getDec(),
                print s1.getRa(), s1.getDec(), s0.getPsfFlux(), s1.getPsfFlux()
コード例 #40
0
def loadAndMatchData(repo, visits, fields, ref, ref_field, camcol, filter):
    """Load data from specific visit+field pairs.  Match with reference.

    @param repo  The repository.  This is generally the directory on disk
                    that contains the repository and mapper.
    @param visits  'runs' in SDSS nomenclature.  List.
    @param fields  Field within a camcol.  List.
    @param ref        The run of the reference image set.  Scalar.
    @param ref_field  The field of the reference image set.  Scalar.
    @param camcol   Camera column to use.  List.
    @param filter   Name of the filter.  Scalar

    Return a pipeBase.Struct with mag, dist, and number of matches.

    Notes: {visit, filter, field, camcol} are sufficient to unique specfiy
      a data ID for the Butler in the obs_sdss camera mapping.
    """

    flags = ["base_PixelFlags_flag_saturated", "base_PixelFlags_flag_cr", "base_PixelFlags_flag_interpolated",
             "base_PsfFlux_flag_edge"]

    # setup butler
    butler = dafPersist.Butler(repo)

    for indx, c in enumerate(camcol):
        dataid = {'run': ref, 'filter': filter, 'field': ref_field, 'camcol': c}
        oldSrc = butler.get('src', dataid, immediate=True)
        print(len(oldSrc), "sources in camcol :", c)
        if indx == 0:
            # retrieve the schema of the source catalog and extend it in order
            # to add a field to record the camcol number
            oldSchema = oldSrc.getSchema()
            mapper = afwTable.SchemaMapper(oldSchema)
            mapper.addMinimalSchema(oldSchema)
            mapper.addOutputField(afwTable.Field[np.int32]("camcol", "camcol number"))
            newSchema = mapper.getOutputSchema()

            # create the new extented source catalog
            srcRef = afwTable.SourceCatalog(newSchema)

        # create temporary catalog
        tmpCat = afwTable.SourceCatalog(srcRef.table)
        tmpCat.extend(oldSrc, mapper=mapper)
        # fill in the camcol information in numpy mode in order to be efficient
        tmpCat['camcol'][:] = c
        # append the temporary catalog to the extended source catalog
        srcRef.extend(tmpCat, deep=False)

    print(len(srcRef), "Sources in reference visit :", ref)

    mag = []
    dist = []
    for v, f in zip(visits, fields):
        if v == ref:
            continue
        for indx, c in enumerate(camcol):
            dataid = {'run': v, 'filter': filter, 'field': f, 'camcol': c}
            if indx == 0:
                srcVis = butler.get('src', dataid, immediate=True)
            else:
                srcVis.extend(butler.get('src', dataid, immediate=True), False)
            print(len(srcVis), "sources in camcol : ", c)

        match = afwTable.matchRaDec(srcRef, srcVis, afwGeom.Angle(1, afwGeom.arcseconds))
        matchNum = len(match)
        print("Visit :", v, matchNum, "matches found")

        schemaRef = srcRef.getSchema()
        schemaVis = srcVis.getSchema()
        extRefKey = schemaRef["base_ClassificationExtendedness_value"].asKey()
        extVisKey = schemaVis["base_ClassificationExtendedness_value"].asKey()
        flagKeysRef = []
        flagKeysVis = []
        for fl in flags:
            keyRef = schemaRef[fl].asKey()
            flagKeysRef.append(keyRef)
            keyVis = schemaVis[fl].asKey()
            flagKeysVis.append(keyVis)

        for m in match:
            mRef = m.first
            mVis = m.second

            for fl in flagKeysRef:
                if mRef.get(fl):
                    continue
            for fl in flagKeysVis:
                if mVis.get(fl):
                    continue

            # cleanup the reference sources in order to keep only decent star-like objects
            if mRef.get(extRefKey) >= 1.0 or mVis.get(extVisKey) >= 1.0:
                continue

            ang = afwGeom.radToMas(m.distance)

            # retrieve the camcol corresponding to the reference source
            camcolRef = mRef.get('camcol')
            # retrieve the calibration object associated to the camcol
            did = {'run': ref, 'filter': filter, 'field': ref_field, 'camcol': camcolRef}
            photoCalib = butler.get("calexp_photoCalib", did)
            # compute magnitude
            refMag = photoCalib.instFluxToMagnitude(mRef.get('base_PsfFlux_instFlux'))

            mag.append(refMag)
            dist.append(ang)

    return pipeBase.Struct(
        mag=mag,
        dist=dist,
        match=matchNum
    )
コード例 #41
0
def point_source_matches(dataref,
                         ref_cat0,
                         max_offset=0.1,
                         src_columns=(),
                         ref_columns=(),
                         flux_type='base_PsfFlux'):
    """
    Match point sources between a reference catalog and the dataref
    pointing to a src catalog.

    Parameters
    ----------
    dataref: lsst.daf.persistence.butlerSubset.ButlerDataref
        Dataref pointing to the desired sensor-visit.
    ref_cat0: lsst.afw.table.SimpleCatalog
        The reference catalog.
    max_offset: float [0.1]
        Maximum offset for positional matching in arcseconds.
    src_columns: list-like [()]
        Columns from the src catalog to save in the output dataframe.
    ref_columns: list-like [()]
        Columns from the reference catalog to save in the output dataframe.
        The column names will have 'ref_' prepended.
    flux_type: str ['base_PsfFlux']
        Flux type for point sources.

    Returns
    -------
    pandas.DataFrame
    """
    flux_col = f'{flux_type}_instFlux'
    src0 = dataref.get('src')
    band = dataref.dataId['filter']

    # Apply point source selections to the source catalog.
    ext = src0.get('base_ClassificationExtendedness_value')
    model_flag = src0.get(f'{flux_type}_flag')
    model_flux = src0.get(flux_col)
    num_children = src0.get('deblend_nChild')
    src = src0.subset((ext == 0) & (model_flag == False) & (model_flux > 0)
                      & (num_children == 0))

    # Match RA, Dec with the reference catalog stars.
    ref_cat = ref_cat0.subset((ref_cat0.get('resolved') == 0))
    radius = lsst_geom.Angle(max_offset, lsst_geom.arcseconds)
    matches = afw_table.matchRaDec(ref_cat, src, radius)
    num_matches = len(matches)

    offsets = np.zeros(num_matches, dtype=np.float)
    ref_ras = np.zeros(num_matches, dtype=np.float)
    ref_decs = np.zeros(num_matches, dtype=np.float)
    ref_mags = np.zeros(num_matches, dtype=np.float)
    src_mags = np.zeros(num_matches, dtype=np.float)
    ref_data = defaultdict(list)
    src_data = defaultdict(list)
    calib = dataref.get('calexp_photoCalib')
    for i, match in enumerate(matches):
        offsets[i] = np.degrees(match.distance) * 3600 * 1000.
        ref_mags[i] = match.first[f'lsst_{band}']
        ref_ras[i] = match.first['coord_ra']
        ref_decs[i] = match.first['coord_dec']
        src_mags[i] = calib.instFluxToMagnitude(match.second[flux_col])
        for ref_col in ref_columns:
            ref_data[f'ref_{ref_col}'].append(match.first[ref_col])
        for src_col in src_columns:
            src_data[src_col].append(match.second[src_col])
    data = dict(offset=offsets,
                ref_mag=ref_mags,
                src_mag=src_mags,
                ref_ra=ref_ras,
                ref_dec=ref_decs)
    data.update(src_data)
    data.update(ref_data)
    return pd.DataFrame(data=data)
コード例 #42
0
ファイル: sourceMatch.py プロジェクト: dr-guangtou/hs_hsc
    def testPhotometricCalib(self):
        """Test matching the CFHT catalogue (as generated using LSST code) to the SDSS catalogue"""

        if not eups.productDir("afwdata"):
            print >> sys.stderr, "Failed to open sdss catalogue"
            return

        band = 2                        # SDSS r
        
        #
        # Read SDSS catalogue
        #
        ifd = open(os.path.join(eups.productDir("afwdata"), "CFHT", "D2", "sdss.dat"), "r")

        sdss = afwTable.SourceCatalog(self.table)
        sdssSecondary = afwTable.SourceCatalog(self.table)

        PRIMARY, SECONDARY = 1, 2       # values of mode

        id = 0
        for line in ifd.readlines():
            if re.search(r"^\s*#", line):
                continue

            fields = line.split()
            objId = int(fields[0])
            name = fields[1]
            mode = int(fields[2])
            ra, dec = [float(f) for f in fields[3:5]]
            psfMags = [float(f) for f in fields[5:]]

            if mode == PRIMARY:
                s = sdss.addNew()
            elif SECONDARY:
                s = sdssSecondary.addNew()

            s.setId(objId)
            s.setRa(ra * afwGeom.degrees)
            s.setDec(dec * afwGeom.degrees)
            s.set(self.table.getPsfFluxKey(), psfMags[band])

        del ifd
        #
        # Read catalalogue built from the template image
        #
        #
        # Read SDSS catalogue
        #
        ifd = open(os.path.join(eups.productDir("afwdata"), "CFHT", "D2", "template.dat"), "r")

        template = afwTable.SourceCatalog(self.table)

        id = 0
        for line in ifd.readlines():
            if re.search(r"^\s*#", line):
                continue

            fields = line.split()
            id, flags = [int(f) for f in  fields[0:2]]
            ra, dec = [float(f) for f in fields[2:4]]
            flux = [float(f) for f in fields[4:]]

            if flags & 0x1:             # EDGE
                continue

            s = template.addNew()
            s.setId(id)
            id += 1
            s.set(afwTable.SourceTable.getCoordKey().getRa(), ra * afwGeom.degrees)
            s.set(afwTable.SourceTable.getCoordKey().getDec(), dec * afwGeom.degrees)
            s.set(self.table.getPsfFluxKey(), flux[0])

        del ifd
        #
        # Actually do the match
        #
        mc = afwTable.MatchControl()
        mc.findOnlyClosest = False

        matches = afwTable.matchRaDec(sdss, template, 1.0*afwGeom.arcseconds, mc)

        self.assertEqual(len(matches), 901)
        self.checkPickle(matches)

        if False:
            for mat in matches:
                s0 = mat[0]
                s1 = mat[1]
                d = mat[2]
                print s0.getRa(), s0.getDec(), s1.getRa(), s1.getDec(), s0.getPsfFlux(), s1.getPsfFlux()
        #
        # Actually do the match
        #
        for s in sdssSecondary:
            sdss.append(s)

        mc = afwTable.MatchControl()
        mc.symmetricMatch = False
        matches = afwTable.matchRaDec(sdss, 1.0*afwGeom.arcseconds, mc)
        nmiss = 1                                              # one object doesn't match
        self.assertEqual(len(matches), len(sdssSecondary) - nmiss)
        self.checkPickle(matches)
        #
        # Find the one that didn't match
        #
        if False:
            matchIds = set()
            for s0, s1, d in matches:
                matchIds.add(s0.getId())
                matchIds.add(s1.getId())

            for s in sdssSecondary:
                if s.getId() not in matchIds:
                    print "RHL", s.getId()

        matches = afwTable.matchRaDec(sdss, 1.0*afwGeom.arcseconds)
        self.assertEqual(len(matches), 2*(len(sdssSecondary) - nmiss))
        self.checkPickle(matches)

        if False:
            for mat in matches:
                s0 = mat[0]
                s1 = mat[1]
                d = mat[2]
                print s0.getId(), s1.getId(), s0.getRa(), s0.getDec(),
                print s1.getRa(), s1.getDec(), s0.getPsfFlux(), s1.getPsfFlux()
コード例 #43
0
    def run(self, sensorRef, templateIdList=None):
        """Subtract an image from a template coadd and measure the result

        Steps include:
        - warp template coadd to match WCS of image
        - PSF match image to warped template
        - subtract image from PSF-matched, warped template
        - persist difference image
        - detect sources
        - measure sources

        @param sensorRef: sensor-level butler data reference, used for the following data products:
        Input only:
        - calexp
        - psf
        - ccdExposureId
        - ccdExposureId_bits
        - self.config.coaddName + "Coadd_skyMap"
        - self.config.coaddName + "Coadd"
        Input or output, depending on config:
        - self.config.coaddName + "Diff_subtractedExp"
        Output, depending on config:
        - self.config.coaddName + "Diff_matchedExp"
        - self.config.coaddName + "Diff_src"

        @return pipe_base Struct containing these fields:
        - subtractedExposure: exposure after subtracting template;
            the unpersisted version if subtraction not run but detection run
            None if neither subtraction nor detection run (i.e. nothing useful done)
        - subtractRes: results of subtraction task; None if subtraction not run
        - sources: detected and possibly measured sources; None if detection not run
        """
        self.log.info("Processing %s" % (sensorRef.dataId))

        # initialize outputs and some intermediate products
        subtractedExposure = None
        subtractRes = None
        selectSources = None
        kernelSources = None
        controlSources = None
        diaSources = None

        # We make one IdFactory that will be used by both icSrc and src datasets;
        # I don't know if this is the way we ultimately want to do things, but at least
        # this ensures the source IDs are fully unique.
        expBits = sensorRef.get("ccdExposureId_bits")
        expId = long(sensorRef.get("ccdExposureId"))
        idFactory = afwTable.IdFactory.makeSource(expId, 64 - expBits)

        # Retrieve the science image we wish to analyze
        exposure = sensorRef.get("calexp", immediate=True)
        if self.config.doAddCalexpBackground:
            mi = exposure.getMaskedImage()
            mi += sensorRef.get("calexpBackground").getImage()
        if not exposure.hasPsf():
            raise pipeBase.TaskError("Exposure has no psf")
        sciencePsf = exposure.getPsf()

        subtractedExposureName = self.config.coaddName + "Diff_differenceExp"
        templateExposure = None  # Stitched coadd exposure
        templateSources = None   # Sources on the template image
        if self.config.doSubtract:
            print templateIdList
            template = self.getTemplate.run(exposure, sensorRef, templateIdList=templateIdList)
            templateExposure = template.exposure
            templateSources = template.sources

            # compute scienceSigmaOrig: sigma of PSF of science image before pre-convolution
            ctr = afwGeom.Box2D(exposure.getBBox()).getCenter()
            psfAttr = PsfAttributes(sciencePsf, afwGeom.Point2I(ctr))
            scienceSigmaOrig = psfAttr.computeGaussianWidth(psfAttr.ADAPTIVE_MOMENT)

            # sigma of PSF of template image before warping
            ctr = afwGeom.Box2D(templateExposure.getBBox()).getCenter()
            psfAttr = PsfAttributes(templateExposure.getPsf(), afwGeom.Point2I(ctr))
            templateSigma = psfAttr.computeGaussianWidth(psfAttr.ADAPTIVE_MOMENT)

            # if requested, convolve the science exposure with its PSF
            # (properly, this should be a cross-correlation, but our code does not yet support that)
            # compute scienceSigmaPost: sigma of science exposure with pre-convolution, if done,
            # else sigma of original science exposure
            if self.config.doPreConvolve:
                convControl = afwMath.ConvolutionControl()
                # cannot convolve in place, so make a new MI to receive convolved image
                srcMI = exposure.getMaskedImage()
                destMI = srcMI.Factory(srcMI.getDimensions())
                srcPsf = sciencePsf
                if self.config.useGaussianForPreConvolution:
                    # convolve with a simplified PSF model: a double Gaussian
                    kWidth, kHeight = sciencePsf.getLocalKernel().getDimensions()
                    preConvPsf = SingleGaussianPsf(kWidth, kHeight, scienceSigmaOrig)
                else:
                    # convolve with science exposure's PSF model
                    preConvPsf = srcPsf
                afwMath.convolve(destMI, srcMI, preConvPsf.getLocalKernel(), convControl)
                exposure.setMaskedImage(destMI)
                scienceSigmaPost = scienceSigmaOrig * math.sqrt(2)
            else:
                scienceSigmaPost = scienceSigmaOrig

            # If requested, find sources in the image
            if self.config.doSelectSources:
                if not sensorRef.datasetExists("src"):
                    self.log.warn("Src product does not exist; running detection, measurement, selection")
                    # Run own detection and measurement; necessary in nightly processing
                    selectSources = self.subtract.getSelectSources(
                        exposure,
                        sigma = scienceSigmaPost,
                        doSmooth = not self.doPreConvolve,
                        idFactory = idFactory,
                    )
                else:
                    self.log.info("Source selection via src product")
                    # Sources already exist; for data release processing
                    selectSources = sensorRef.get("src")

                # Number of basis functions
                nparam = len(makeKernelBasisList(self.subtract.config.kernel.active,
                                                 referenceFwhmPix=scienceSigmaPost * FwhmPerSigma,
                                                 targetFwhmPix=templateSigma * FwhmPerSigma))

                if self.config.doAddMetrics:
                    # Modify the schema of all Sources
                    kcQa = KernelCandidateQa(nparam)
                    selectSources = kcQa.addToSchema(selectSources)

                if self.config.kernelSourcesFromRef:
                    # match exposure sources to reference catalog
                    astromRet = self.astrometer.loadAndMatch(exposure=exposure, sourceCat=selectSources)
                    matches = astromRet.matches
                elif templateSources:
                    # match exposure sources to template sources
                    matches = afwTable.matchRaDec(templateSources, selectSources, 1.0*afwGeom.arcseconds,
                                                  False)
                else:
                    raise RuntimeError("doSelectSources=True and kernelSourcesFromRef=False," +
                                       "but template sources not available. Cannot match science " +
                                       "sources with template sources. Run process* on data from " +
                                       "which templates are built.")

                kernelSources = self.sourceSelector.selectStars(exposure, selectSources,
                    matches=matches).starCat

                random.shuffle(kernelSources, random.random)
                controlSources = kernelSources[::self.config.controlStepSize]
                kernelSources = [k for i,k in enumerate(kernelSources) if i % self.config.controlStepSize]

                if self.config.doSelectDcrCatalog:
                    redSelector  = DiaCatalogSourceSelectorTask(
                        DiaCatalogSourceSelectorConfig(grMin=self.sourceSelector.config.grMax, grMax=99.999))
                    redSources   = redSelector.selectStars(exposure, selectSources, matches=matches).starCat
                    controlSources.extend(redSources)

                    blueSelector = DiaCatalogSourceSelectorTask(
                        DiaCatalogSourceSelectorConfig(grMin=-99.999, grMax=self.sourceSelector.config.grMin))
                    blueSources  = blueSelector.selectStars(exposure, selectSources, matches=matches).starCat
                    controlSources.extend(blueSources)

                if self.config.doSelectVariableCatalog:
                    varSelector = DiaCatalogSourceSelectorTask(
                        DiaCatalogSourceSelectorConfig(includeVariable=True))
                    varSources  = varSelector.selectStars(exposure, selectSources, matches=matches).starCat
                    controlSources.extend(varSources)

                self.log.info("Selected %d / %d sources for Psf matching (%d for control sample)" 
                              % (len(kernelSources), len(selectSources), len(controlSources)))
            allresids = {}
            if self.config.doUseRegister:
                self.log.info("Registering images")

                if templateSources is None:
                    # Run detection on the template, which is
                    # temporarily background-subtracted
                    templateSources = self.subtract.getSelectSources(
                        templateExposure,
                        sigma=templateSigma,
                        doSmooth=True,
                        idFactory=idFactory
                    )

                # Third step: we need to fit the relative astrometry.
                #
                wcsResults = self.fitAstrometry(templateSources, templateExposure, selectSources)
                warpedExp = self.register.warpExposure(templateExposure, wcsResults.wcs,
                                            exposure.getWcs(), exposure.getBBox())
                templateExposure = warpedExp

                # Create debugging outputs on the astrometric
                # residuals as a function of position.  Persistence
                # not yet implemented; expected on (I believe) #2636.
                if self.config.doDebugRegister:
                    # Grab matches to reference catalog
                    srcToMatch = {x.second.getId() : x.first for x in matches}

                    refCoordKey = wcsResults.matches[0].first.getTable().getCoordKey()
                    inCentroidKey = wcsResults.matches[0].second.getTable().getCentroidKey()
                    sids      = [m.first.getId() for m in wcsResults.matches]
                    positions = [m.first.get(refCoordKey) for m in wcsResults.matches]
                    residuals = [m.first.get(refCoordKey).getOffsetFrom(wcsResults.wcs.pixelToSky(
                                m.second.get(inCentroidKey))) for m in wcsResults.matches]
                    allresids = dict(zip(sids, zip(positions, residuals)))

                    cresiduals = [m.first.get(refCoordKey).getTangentPlaneOffset(
                            wcsResults.wcs.pixelToSky(
                                m.second.get(inCentroidKey))) for m in wcsResults.matches]
                    colors    = numpy.array([-2.5*numpy.log10(srcToMatch[x].get("g"))
                                              + 2.5*numpy.log10(srcToMatch[x].get("r")) 
                                              for x in sids if x in srcToMatch.keys()])
                    dlong     = numpy.array([r[0].asArcseconds() for s,r in zip(sids, cresiduals) 
                                             if s in srcToMatch.keys()])
                    dlat      = numpy.array([r[1].asArcseconds() for s,r in zip(sids, cresiduals) 
                                             if s in srcToMatch.keys()])
                    idx1      = numpy.where(colors<self.sourceSelector.config.grMin)
                    idx2      = numpy.where((colors>=self.sourceSelector.config.grMin)&
                                            (colors<=self.sourceSelector.config.grMax))
                    idx3      = numpy.where(colors>self.sourceSelector.config.grMax)
                    rms1Long  = IqrToSigma*(numpy.percentile(dlong[idx1],75)-numpy.percentile(dlong[idx1],25))
                    rms1Lat   = IqrToSigma*(numpy.percentile(dlat[idx1],75)-numpy.percentile(dlat[idx1],25))
                    rms2Long  = IqrToSigma*(numpy.percentile(dlong[idx2],75)-numpy.percentile(dlong[idx2],25))
                    rms2Lat   = IqrToSigma*(numpy.percentile(dlat[idx2],75)-numpy.percentile(dlat[idx2],25))
                    rms3Long  = IqrToSigma*(numpy.percentile(dlong[idx3],75)-numpy.percentile(dlong[idx3],25))
                    rms3Lat   = IqrToSigma*(numpy.percentile(dlat[idx3],75)-numpy.percentile(dlat[idx3],25))
                    self.log.info("Blue star offsets'': %.3f %.3f, %.3f %.3f"  % (numpy.median(dlong[idx1]), 
                                                                                  rms1Long,
                                                                                  numpy.median(dlat[idx1]), 
                                                                                  rms1Lat))
                    self.log.info("Green star offsets'': %.3f %.3f, %.3f %.3f"  % (numpy.median(dlong[idx2]), 
                                                                                   rms2Long,
                                                                                   numpy.median(dlat[idx2]), 
                                                                                   rms2Lat))
                    self.log.info("Red star offsets'': %.3f %.3f, %.3f %.3f"  % (numpy.median(dlong[idx3]), 
                                                                                 rms3Long,
                                                                                 numpy.median(dlat[idx3]), 
                                                                                 rms3Lat))

                    self.metadata.add("RegisterBlueLongOffsetMedian", numpy.median(dlong[idx1]))
                    self.metadata.add("RegisterGreenLongOffsetMedian", numpy.median(dlong[idx2]))
                    self.metadata.add("RegisterRedLongOffsetMedian", numpy.median(dlong[idx3]))
                    self.metadata.add("RegisterBlueLongOffsetStd", rms1Long)
                    self.metadata.add("RegisterGreenLongOffsetStd", rms2Long)
                    self.metadata.add("RegisterRedLongOffsetStd", rms3Long)

                    self.metadata.add("RegisterBlueLatOffsetMedian", numpy.median(dlat[idx1]))
                    self.metadata.add("RegisterGreenLatOffsetMedian", numpy.median(dlat[idx2]))
                    self.metadata.add("RegisterRedLatOffsetMedian", numpy.median(dlat[idx3]))
                    self.metadata.add("RegisterBlueLatOffsetStd", rms1Lat)
                    self.metadata.add("RegisterGreenLatOffsetStd", rms2Lat)
                    self.metadata.add("RegisterRedLatOffsetStd", rms3Lat)

            # warp template exposure to match exposure,
            # PSF match template exposure to exposure,
            # then return the difference

            #Return warped template...  Construct sourceKernelCand list after subtract
            self.log.info("Subtracting images")
            subtractRes = self.subtract.subtractExposures(
                templateExposure=templateExposure,
                scienceExposure=exposure,
                candidateList=kernelSources,
                convolveTemplate=self.config.convolveTemplate,
                doWarping=not self.config.doUseRegister
            )
            subtractedExposure = subtractRes.subtractedExposure

            if self.config.doWriteMatchedExp:
                sensorRef.put(subtractRes.matchedExposure, self.config.coaddName + "Diff_matchedExp")

        if self.config.doDetection:
            self.log.info("Running diaSource detection")
            if subtractedExposure is None:
                subtractedExposure = sensorRef.get(subtractedExposureName)

            # Get Psf from the appropriate input image if it doesn't exist
            if not subtractedExposure.hasPsf():
                if self.config.convolveTemplate:
                    subtractedExposure.setPsf(exposure.getPsf())
                else:
                    if templateExposure is None:
                        template = self.getTemplate.run(exposure, sensorRef, templateIdList=templateIdList)
                    subtractedExposure.setPsf(template.exposure.getPsf())

            # Erase existing detection mask planes
            mask  = subtractedExposure.getMaskedImage().getMask()
            mask &= ~(mask.getPlaneBitMask("DETECTED") | mask.getPlaneBitMask("DETECTED_NEGATIVE"))

            table = afwTable.SourceTable.make(self.schema, idFactory)
            table.setMetadata(self.algMetadata)
            results = self.detection.makeSourceCatalog(
                table=table,
                exposure=subtractedExposure,
                doSmooth=not self.config.doPreConvolve
                )

            if self.config.doMerge:
                fpSet = results.fpSets.positive
                fpSet.merge(results.fpSets.negative, self.config.growFootprint,
                            self.config.growFootprint, False)
                diaSources = afwTable.SourceCatalog(table)
                fpSet.makeSources(diaSources)
                self.log.info("Merging detections into %d sources" % (len(diaSources)))
            else:
                diaSources = results.sources

            if self.config.doMeasurement:
                self.log.info("Running diaSource measurement")
                self.measurement.run(diaSources, subtractedExposure)

            # Match with the calexp sources if possible
            if self.config.doMatchSources:
                if sensorRef.datasetExists("src"):
                    # Create key,val pair where key=diaSourceId and val=sourceId
                    matchRadAsec = self.config.diaSourceMatchRadius
                    matchRadPixel = matchRadAsec / exposure.getWcs().pixelScale().asArcseconds()

                    srcMatches = afwTable.matchXy(sensorRef.get("src"), diaSources, matchRadPixel, True)
                    srcMatchDict = dict([(srcMatch.second.getId(), srcMatch.first.getId()) for 
                                         srcMatch in srcMatches])
                    self.log.info("Matched %d / %d diaSources to sources" % (len(srcMatchDict),
                                                                             len(diaSources)))
                else:
                    self.log.warn("Src product does not exist; cannot match with diaSources")
                    srcMatchDict = {}

                # Create key,val pair where key=diaSourceId and val=refId
                refAstromConfig = measAstrom.AstrometryConfig()
                refAstromConfig.matcher.maxMatchDistArcSec = matchRadAsec
                refAstrometer = measAstrom.AstrometryTask(refAstromConfig)
                astromRet = refAstrometer.run(exposure=exposure, sourceCat=diaSources)
                refMatches = astromRet.matches
                if refMatches is None:
                    self.log.warn("No diaSource matches with reference catalog")
                    refMatchDict = {}
                else:
                    self.log.info("Matched %d / %d diaSources to reference catalog" % (len(refMatches),
                                                                                       len(diaSources)))
                    refMatchDict = dict([(refMatch.second.getId(), refMatch.first.getId()) for \
                                             refMatch in refMatches])

                # Assign source Ids
                for diaSource in diaSources:
                    sid = diaSource.getId()
                    if srcMatchDict.has_key(sid):
                        diaSource.set("srcMatchId", srcMatchDict[sid])
                    if refMatchDict.has_key(sid):
                        diaSource.set("refMatchId", refMatchDict[sid])

            if diaSources is not None and self.config.doWriteSources:
                sensorRef.put(diaSources, self.config.coaddName + "Diff_diaSrc")

            if self.config.doAddMetrics and self.config.doSelectSources:
                self.log.info("Evaluating metrics and control sample")

                kernelCandList = []
                for cell in subtractRes.kernelCellSet.getCellList():
                    for cand in cell.begin(False): # include bad candidates
                        kernelCandList.append(cast_KernelCandidateF(cand))

                # Get basis list to build control sample kernels
                basisList = afwMath.cast_LinearCombinationKernel(
                    kernelCandList[0].getKernel(KernelCandidateF.ORIG)).getKernelList()

                controlCandList = \
                    diffimTools.sourceTableToCandidateList(controlSources, 
                                                           subtractRes.warpedExposure, exposure,
                                                           self.config.subtract.kernel.active,
                                                           self.config.subtract.kernel.active.detectionConfig,
                                                           self.log, doBuild=True, basisList=basisList)

                kcQa.apply(kernelCandList, subtractRes.psfMatchingKernel, subtractRes.backgroundModel,
                                dof=nparam)
                kcQa.apply(controlCandList, subtractRes.psfMatchingKernel, subtractRes.backgroundModel)

                if self.config.doDetection:
                    kcQa.aggregate(selectSources, self.metadata, allresids, diaSources)
                else:
                    kcQa.aggregate(selectSources, self.metadata, allresids)

                sensorRef.put(selectSources, self.config.coaddName + "Diff_kernelSrc")

        if self.config.doWriteSubtractedExp:
            sensorRef.put(subtractedExposure, subtractedExposureName)

        self.runDebug(exposure, subtractRes, selectSources, kernelSources, diaSources)
        return pipeBase.Struct(
            subtractedExposure=subtractedExposure,
            subtractRes=subtractRes,
            sources=diaSources,
        )
コード例 #44
0
ファイル: matchingCode.py プロジェクト: NabeelSarwar/Thesis
def matchCats(cat1, cat2, matchRadius=1*afwGeom.arcseconds, includeMismatches=True, multiMeas=False):
    """
    Match to catalogs and return a catalog with the fields of the two catalogs
    """

    mc = afwTable.MatchControl()
    mc.includeMismatches = includeMismatches
    mc.findOnlyClosest = True

    matched = afwTable.matchRaDec(cat1, cat2, matchRadius, mc)

    bestMatches = {}
    if includeMismatches:
        noMatch = []
    for m1, m2, d in matched:
        if m2 is None:
            noMatch.append(m1)
        else:
            if not multiMeas:
                id = m2.getId()
                if id not in bestMatches:
                    bestMatches[id] = (m1, m2, d)
                else:
                    if d < bestMatches[id][2]:
                        bestMatches[id] = (m1, m2, d)
            else:
                id = m1.getId()
                bestMatches[id] = (m1, m2, d)

    if includeMismatches:
        print "{0} objects from {1} in the first catalog had no match in the second catalog.".format(len(noMatch), len(cat1))
        print "{0} objects from the first catalog with a match in the second catalog were not the closest match.".format(len(matched) - len(noMatch) - len(bestMatches))

    nMatches = len(bestMatches)
    print "I found {0} matches".format(nMatches)

    schema1 = cat1.getSchema(); schema2 = cat2.getSchema()
    names1 = cat1.schema.getNames(); names2 = cat2.schema.getNames()

    schema = afwTable.SimpleTable.makeMinimalSchema()

    catKeys = []; cat1Keys = []; cat2Keys = []
    for name in names1:
        cat1Keys.append(schema1.find(name).getKey())
        if name not in ['id', 'coord']:
            catKeys.append(schema.addField(schema1.find(name).getField()))
        else:
            catKeys.append(schema.find(name).getKey())
    for name in names2:
        cat2Keys.append(schema2.find(name).getKey())
        if name not in schema1.getNames():
            catKeys.append(schema.addField(schema2.find(name).getField()))
        elif name+".2" not in schema1.getNames():
            catKeys.append(schema.addField(schema2.find(name).getField().copyRenamed(name+".2")))
        else:
            text = 3
            if not name +'.'+str(text) in schema.getNames():
                key = schema.addField(schema2.find(name).getField().copyRenamed(name+"." +str(text)))
                catKeys.append(key)
            else:
                while name+'.'+str(text) in schema.getNames():
                    print 'insane looping 2:' + name+'.'+str(text)
                    text = text + 1
                    if not name+'.'+str(text) in schema.getNames():
                        catKeys.append(schema.addField(schema2.find(name).getField().copyRenamed(name+"."+str(text))))
                        # need to break or otherwise introduce infinite loop because
                        # the thing will be added and then we try to add next iteration
                        break

    print 'Done matching'
    print 'Now merging'
    cat = afwTable.SimpleCatalog(schema)
    cat.reserve(nMatches)

    for id in bestMatches:
        m1, m2, d = bestMatches[id]
        record = cat.addNew()
        for i in range(len(cat1Keys)):
            record.set(catKeys[i], m1.get(cat1Keys[i]))
        for i in range(len(cat1Keys), len(catKeys)):
            record.set(catKeys[i], m2.get(cat2Keys[i-len(cat1Keys)]))

    return cat