Exemple #1
0
    def setUp(self):
        # Set up a Coadd with CoaddInputs tables that have blank filter
        # columns to be filled in by later test code.
        self.coadd = ExposureF(30, 90)
        # WCS is arbitrary, since it'll be the same for all images
        wcs = makeSkyWcs(crpix=Point2D(0, 0),
                         crval=SpherePoint(45.0, 45.0, degrees),
                         cdMatrix=makeCdMatrix(scale=0.17 * degrees))
        self.coadd.setWcs(wcs)
        schema = ExposureCatalog.Table.makeMinimalSchema()
        self.filterKey = schema.addField("filter", type=str, doc="", size=16)
        weightKey = schema.addField("weight", type=float, doc="")
        # First input image covers the first 2/3, second covers the last 2/3,
        # so they overlap in the middle 1/3.
        inputs = ExposureCatalog(schema)
        self.input1 = inputs.addNew()
        self.input1.setId(1)
        self.input1.setBBox(Box2I(Point2I(0, 0), Point2I(29, 59)))
        self.input1.setWcs(wcs)
        self.input1.set(weightKey, 2.0)
        self.input2 = inputs.addNew()
        self.input2.setId(2)
        self.input2.setBBox(Box2I(Point2I(0, 30), Point2I(29, 89)))
        self.input2.setWcs(wcs)
        self.input2.set(weightKey, 3.0)
        # Use the same catalog for visits and CCDs since the algorithm we're
        # testing only cares about CCDs.
        self.coadd.getInfo().setCoaddInputs(CoaddInputs(inputs, inputs))

        # Set up a catalog with centroids and a FilterFraction plugin.
        # We have one record in each region (first input only, both inputs,
        # second input only)
        schema = SourceCatalog.Table.makeMinimalSchema()
        centroidKey = Point2DKey.addFields(schema,
                                           "centroid",
                                           doc="position",
                                           unit="pixel")
        schema.getAliasMap().set("slot_Centroid", "centroid")
        self.plugin = FilterFractionPlugin(
            config=FilterFractionPlugin.ConfigClass(),
            schema=schema,
            name="subaru_FilterFraction",
            metadata=PropertyList())
        catalog = SourceCatalog(schema)
        self.record1 = catalog.addNew()
        self.record1.set(centroidKey, Point2D(14.0, 14.0))
        self.record12 = catalog.addNew()
        self.record12.set(centroidKey, Point2D(14.0, 44.0))
        self.record2 = catalog.addNew()
        self.record2.set(centroidKey, Point2D(14.0, 74.0))
Exemple #2
0
 def testPut(self):
     with lsst.utils.tests.temporaryDirectory() as root:
         Butler3.makeRepo(root)
         butler3 = Butler3(root, run="three")
         butler3.registry.registerDatasetType(
             DatasetType("cat", ["htm7"], "SourceCatalog",
                         universe=butler3.registry.dimensions)
         )
         butlerShim = ShimButler(butler3)
         catIn = SourceCatalog(SourceCatalog.Table.makeMinimalSchema())
         catIn.addNew().set("id", 42)
         butlerShim.put(catIn, "cat", htm7=131072)
         catOut = butlerShim.get("cat", htm7=131072)
         self.assertEqual(list(catIn["id"]), list(catOut["id"]))
Exemple #3
0
def _makeDummyCatalog(size, skyFlag=False):
    """Create a trivial catalog for testing source counts.

    Parameters
    ----------
    size : `int`
        The number of entries in the catalog.
    skyFlag : `bool`
        If set, the schema is guaranteed to have the ``sky_source`` flag, and
        one row has it set to `True`. If not set, the ``sky_source`` flag is
        not present.

    Returns
    -------
    catalog : `lsst.afw.table.SourceCatalog`
        A new catalog with ``size`` rows.
    """
    schema = SourceCatalog.Table.makeMinimalSchema()
    if skyFlag:
        schema.addField("sky_source", type="Flag", doc="Sky objects.")
    catalog = SourceCatalog(schema)
    for i in range(size):
        record = catalog.addNew()
    if skyFlag and size > 0:
        record["sky_source"] = True
    return catalog
Exemple #4
0
 def testPut(self):
     with TemporaryDirectory(dir=TESTDIR) as root:
         Butler3.makeRepo(root)
         butler3 = Butler3(root, run="three")
         butler3.registry.registerDatasetType(
             DatasetType("cat", ["label"], "SourceCatalog",
                         universe=butler3.registry.dimensions)
         )
         butlerShim = ShimButler(butler3)
         catIn = SourceCatalog(SourceCatalog.Table.makeMinimalSchema())
         catIn.addNew().set("id", 42)
         butlerShim.put(catIn, "cat", label="four")
         catOut = butlerShim.get("cat", label="four")
         self.assertEqual(list(catIn["id"]), list(catOut["id"]))
         # Without this the temporary directory can not be removed
         # if on NFS because these objects have open SQLite registries.
         del butler3
         del butlerShim
Exemple #5
0
def _makeDummyCatalog(nParents,
                      *,
                      skyFlags=False,
                      deblendFlags=False,
                      nChildren=0,
                      nGrandchildren=0):
    """Create a trivial catalog for testing deblending counts.

    Parameters
    ----------
    nParents : `int`
        The number of entries in the catalog prior to deblending.
    skyFlags : `bool`
        If set, the schema includes flags associated with sky sources,
        and one top-level source (the deblended one, if it exists) and any
        descendents are sky sources.
    deblendFlags : `bool`
        If set, the schema includes flags associated with the deblender.
    nChildren : `int`
        If positive, one source is deblended into ``nChildren`` children. This
        parameter is ignored if ``deblendFlags`` is `False`.
    nGrandchildren : `int`
        If positive, one source produced by ``nChildren`` is deblended into
        ``nGrandchildren`` children. This parameter is ignored if ``nChildren``
        is 0 or not applicable.

    Returns
    -------
    catalog : `lsst.afw.table.SourceCatalog`
        A new catalog with ``nParents + nChildren + nGrandchildren`` rows.
    """
    schema = SourceCatalog.Table.makeMinimalSchema()
    if skyFlags:
        schema.addField("sky_source", type="Flag", doc="Sky source.")
    if deblendFlags:
        # See https://community.lsst.org/t/4957 for flag definitions.
        # Do not use detect_ flags, as they are defined by a postprocessing
        # task and some post-deblend catalogs may not have them.
        schema.addField('deblend_nChild', type=np.int32, doc='')
        schema.addField('deblend_nPeaks', type=np.int32, doc='')
        schema.addField('deblend_parentNPeaks', type=np.int32, doc='')
        schema.addField('deblend_parentNChild', type=np.int32, doc='')
    catalog = SourceCatalog(schema)
    if nParents > 0:  # normally anti-pattern, but simplifies nested ifs
        for i in range(nParents):
            record = catalog.addNew()
            if deblendFlags:
                record["deblend_nPeaks"] = 1
        if skyFlags:
            record["sky_source"] = True
        if deblendFlags and nChildren > 0:
            children = _addChildren(catalog, record, nChildren)
            if nGrandchildren > 0:
                _addChildren(catalog, children[0], nGrandchildren)
    return catalog
Exemple #6
0
def makeRerunCatalog(schema, oldCatalog, idList, fields=None):
    """ Creates a catalog prepopulated with ids

    This function is used to generate a SourceCatalog containing blank records
    with Ids specified in the idList parameter

    This function is primarily used when rerunning measurements on a footprint.
    Specifying ids in a new measurement catalog which correspond to ids in an
    old catalog makes comparing results much easier.

    Parameters
    ----------
    schema : lsst.afw.table.Schema
        Schema used to describe the fields in the resulting SourceCatalog

    oldCatalog : lsst.afw.table.SourceCatalog
        Catalog containing previous measurements.

    idList : iterable
        Python iterable whose values should be numbers corresponding to
        measurement ids, ids must exist in the oldCatalog

    fields : iterable
        Python iterable whose entries should be strings corresponding to schema
        keys that exist in both the old catalog and input schema. Fields listed
        will be copied from the old catalog into the new catalog.

    Returns
    -------
    measCat : lsst.afw.table.SourceCatalog
        SourceCatalog prepopulated with entries corresponding to the ids
        specified
    """

    if fields is None:
        fields = []
    if not isinstance(fields, Iterable):
        raise RuntimeError("fields list must be an iterable with string"
                           "elements")
    for entry in fields:
        if entry not in schema:
            schema.addField(oldCatalog.schema.find(entry).field)

    measCat = SourceCatalog(schema)
    for srcId in idList:
        oldSrc = oldCatalog.find(srcId)
        src = measCat.addNew()
        src.setId(srcId)
        src.setFootprint(oldSrc.getFootprint())
        src.setParent(oldSrc.getParent())
        src.setCoord(oldSrc.getCoord())
        for entry in fields:
            src[entry] = oldSrc[entry]
    return measCat
Exemple #7
0
def create_source_catalog_from_text_and_butler(repo_dir, info, dataset='src'):
    butler = dafPersistence.Butler(repo_dir)
    schema = butler.get(dataset + "_schema", immediate=True).schema
    mapper = SchemaMapper(schema)
    mapper.addMinimalSchema(schema)
    newSchema = mapper.getOutputSchema()

    src_cat = SourceCatalog(newSchema)
    for row in info:
        record = src_cat.addNew()
        record.set('coord_ra', Angle(row['RA']*degrees))
        record.set('coord_dec', Angle(row['Dec']*degrees))

    print(src_cat['coord_ra'], src_cat['coord_dec'])
    return(src_cat)
def makeRerunCatalog(schema, oldCatalog, idList, fields=None,
                     resetParents=True, addParents=False):
    """Create a catalog prepopulated with IDs.

    This function is used to generate a `~lsst.afw.table.SourceCatalog`
    containing blank records with IDs as specified in the ``idList``
    parameter.

    This function is primarily used when re-running measurements on a
    particular footprint. Specifying IDs in the new measurement catalog which
    correspond to IDs in the old catalog makes comparing results much easier.

    The ``resetParents`` and ``addParents`` options are needed because
    `SingleFrameMeasurementTask.runPlugins` will skip child
    objects whose parents are not in the catalog.

    Parameters
    ----------
    schema : `lsst.afw.table.Schema`
        Schema used to describe the fields in the resulting catalog.

    oldCatalog : `lsst.afw.table.SourceCatalog`
        Catalog containing previous measurements.

    idList : iterable of `int`
        Iterable whose values should be numbers corresponding to measurement
        IDs which exist in ``oldCatalog``.

    fields : iterable of `str`
        Iterable whose entries should be strings corresponding to schema keys
        that exist in both the old catalog and input schema. Fields listed
        will be copied from the old catalog into the new catalog.

    resetParents : `bool`
        If `True`, child objects whose parents are not in the ``idList``
        will have their parents reset to zero.

    addParents : `bool`
        If `True`, parents of child objects will be added to ``idList`` if
        they are not already present.

    Returns
    -------
    measCat : `lsst.afw.table.SourceCatalog`
        Catalog prepopulated with entries with the IDs specified.
    """

    if not isinstance(schema, Schema):
        raise RuntimeError("schema must be an instance of "
                           "lsst.afw.table.Schema")

    if not isinstance(oldCatalog, SourceCatalog):
        raise RuntimeError("oldCatalog must be an instance of "
                           "lsst.afw.table.SourceCatalogiterable")

    if fields is None:
        fields = []
    if not isinstance(fields, Iterable):
        raise RuntimeError("fields list must be an iterable with string"
                           "elements")
    for entry in fields:
        if entry not in schema:
            schema.addField(oldCatalog.schema.find(entry).field)

    if addParents:
        newIdList = set()
        for srcId in idList:
            newIdList.add(srcId)
            parentId = oldCatalog.find(srcId).getParent()
            if parentId:
                newIdList.add(parentId)
        idList = newIdList
    idList = sorted(idList)

    measCat = SourceCatalog(schema)
    for srcId in idList:
        oldSrc = oldCatalog.find(srcId)
        src = measCat.addNew()
        src.setId(srcId)
        src.setFootprint(oldSrc.getFootprint())
        parent = oldSrc.getParent()
        if resetParents and parent and parent not in idList:
            parent = 0
        src.setParent(parent)
        src.setCoord(oldSrc.getCoord())
        for entry in fields:
            src[entry] = oldSrc[entry]
    return measCat
Exemple #9
0
def _makeDummyCatalog(size):
    catalog = SourceCatalog(SourceCatalog.Table.makeMinimalSchema())
    for i in range(size):
        catalog.addNew()
    return catalog
def _makeDummyCatalog(size):
    catalog = SourceCatalog(SourceCatalog.Table.makeMinimalSchema())
    for i in range(size):
        catalog.addNew()
    return catalog
def makeRerunCatalog(schema,
                     oldCatalog,
                     idList,
                     fields=None,
                     resetParents=True,
                     addParents=False):
    """ Creates a catalog prepopulated with ids

    This function is used to generate a SourceCatalog containing blank records
    with Ids specified in the idList parameter

    This function is primarily used when rerunning measurements on a footprint.
    Specifying ids in a new measurement catalog which correspond to ids in an
    old catalog makes comparing results much easier.

    The resetParents and addParents options are needed because
    lsst.meas.base.SingleFrameMeasurementTask.runPlugins() will skip child
    objects whose parents are not in the catalog.

    Parameters
    ----------
    schema : lsst.afw.table.Schema
        Schema used to describe the fields in the resulting SourceCatalog

    oldCatalog : lsst.afw.table.SourceCatalog
        Catalog containing previous measurements.

    idList : iterable
        Python iterable whose values should be numbers corresponding to
        measurement ids, ids must exist in the oldCatalog

    fields : iterable
        Python iterable whose entries should be strings corresponding to schema
        keys that exist in both the old catalog and input schema. Fields listed
        will be copied from the old catalog into the new catalog.

    resetParents: boolean
        Flag to toggle whether child objects whose parents are not in the
        idList should have their parents reset to zero.

    addParents: boolean
        Flag to toggle whether parents of child objects will be added to the
        idList (if not already present).

    Returns
    -------
    measCat : lsst.afw.table.SourceCatalog
        SourceCatalog prepopulated with entries corresponding to the ids
        specified
    """

    if not isinstance(schema, Schema):
        raise RuntimeError("schema must be an instance of "
                           "lsst.afw.table.Schema")

    if not isinstance(oldCatalog, SourceCatalog):
        raise RuntimeError("oldCatalog must be an instance of "
                           "lsst.afw.table.SourceCatalogiterable")

    if fields is None:
        fields = []
    if not isinstance(fields, Iterable):
        raise RuntimeError("fields list must be an iterable with string"
                           "elements")
    for entry in fields:
        if entry not in schema:
            schema.addField(oldCatalog.schema.find(entry).field)

    if addParents:
        newIdList = set()
        for srcId in idList:
            newIdList.add(srcId)
            parentId = oldCatalog.find(srcId).getParent()
            if parentId:
                newIdList.add(parentId)
        idList = newIdList
    idList = sorted(idList)

    measCat = SourceCatalog(schema)
    for srcId in idList:
        oldSrc = oldCatalog.find(srcId)
        src = measCat.addNew()
        src.setId(srcId)
        src.setFootprint(oldSrc.getFootprint())
        parent = oldSrc.getParent()
        if resetParents and parent and parent not in idList:
            parent = 0
        src.setParent(parent)
        src.setCoord(oldSrc.getCoord())
        for entry in fields:
            src[entry] = oldSrc[entry]
    return measCat
Exemple #12
0
def makeRerunCatalog(schema,
                     oldCatalog,
                     idList,
                     fields=None,
                     resetParents=True,
                     addParents=False):
    """Create a catalog prepopulated with IDs.

    This function is used to generate a `~lsst.afw.table.SourceCatalog`
    containing blank records with IDs as specified in the ``idList``
    parameter.

    This function is primarily used when re-running measurements on a
    particular footprint. Specifying IDs in the new measurement catalog which
    correspond to IDs in the old catalog makes comparing results much easier.

    The ``resetParents`` and ``addParents`` options are needed because
    `SingleFrameMeasurementTask.runPlugins` will skip child
    objects whose parents are not in the catalog.

    Parameters
    ----------
    schema : `lsst.afw.table.Schema`
        Schema used to describe the fields in the resulting catalog.

    oldCatalog : `lsst.afw.table.SourceCatalog`
        Catalog containing previous measurements.

    idList : iterable of `int`
        Iterable whose values should be numbers corresponding to measurement
        IDs which exist in ``oldCatalog``.

    fields : iterable of `str`
        Iterable whose entries should be strings corresponding to schema keys
        that exist in both the old catalog and input schema. Fields listed
        will be copied from the old catalog into the new catalog.

    resetParents : `bool`
        If `True`, child objects whose parents are not in the ``idList``
        will have their parents reset to zero.

    addParents : `bool`
        If `True`, parents of child objects will be added to ``idList`` if
        they are not already present.

    Returns
    -------
    measCat : `lsst.afw.table.SourceCatalog`
        Catalog prepopulated with entries with the IDs specified.
    """

    if not isinstance(schema, Schema):
        raise RuntimeError("schema must be an instance of "
                           "lsst.afw.table.Schema")

    if not isinstance(oldCatalog, SourceCatalog):
        raise RuntimeError("oldCatalog must be an instance of "
                           "lsst.afw.table.SourceCatalogiterable")

    if fields is None:
        fields = []
    if not isinstance(fields, Iterable):
        raise RuntimeError("fields list must be an iterable with string"
                           "elements")
    for entry in fields:
        if entry not in schema:
            schema.addField(oldCatalog.schema.find(entry).field)

    if addParents:
        newIdList = set()
        for srcId in idList:
            newIdList.add(srcId)
            parentId = oldCatalog.find(srcId).getParent()
            if parentId:
                newIdList.add(parentId)
        idList = newIdList
    idList = sorted(idList)

    measCat = SourceCatalog(schema)
    for srcId in idList:
        oldSrc = oldCatalog.find(srcId)
        src = measCat.addNew()
        src.setId(srcId)
        src.setFootprint(oldSrc.getFootprint())
        parent = oldSrc.getParent()
        if resetParents and parent and parent not in idList:
            parent = 0
        src.setParent(parent)
        src.setCoord(oldSrc.getCoord())
        for entry in fields:
            src[entry] = oldSrc[entry]
    return measCat
Exemple #13
0
def getFakeSources(butler, dataId, tol=1.0, extraCols=('zeropoint', 'visit', 'ccd'),
                   includeMissing=False, footprints=False, radecMatch=None):
    """Get list of sources which agree in pixel position with fake ones with tol
    
    this returns a sourceCatalog of all the matched fake objects,
    note, there will be duplicates in this list, since I haven't checked deblend.nchild,
    and I'm only doing a tolerance match, which could include extra sources
    
    the outputs can include extraCols as long as they are one of:
      zeropoint, visit, ccd, thetaNorth, pixelScale

    if includeMissing is true, then the pipeline looks at the fake sources
    added in the header and includes an entry in the table for sources without
    any measurements, specifically the 'id' column will be 0

    radecMatch is the fakes table. if it's not None(default), then do an ra/dec 
    match with the input catalog instead of looking in the header for where the 
    sources where added
    """
    
    availExtras = {'zeropoint':{'type':float, 'doc':'zeropoint'}, 
                   'visit':{'type':int, 'doc':'visit id'}, 
                   'ccd':{'type':int, 'doc':'ccd id'},
                   'thetaNorth':{'type':lsst.afw.geom.Angle, 'doc':'angle to north'},
                   'pixelScale':{'type':float, 'doc':'pixelscale in arcsec/pixel'}}
    
    if not np.in1d(extraCols, availExtras.keys()).all():
        print "extraCols must be in ",availExtras

    try:
        if not 'filter' in dataId:
            sources = butler.get('src', dataId, 
                                 flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS,
                                 immediate=True)
            cal = butler.get('calexp', dataId, immediate=True)
            cal_md = butler.get('calexp_md', dataId, immediate=True)
        else:
            sources = butler.get('deepCoadd_src', dataId, 
                                 flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS,
                                 immediate=True)
            cal = butler.get('deepCoadd', dataId, immediate=True)
            cal_md = butler.get('deepCoadd_md', dataId, immediate=True)
    except (lsst.pex.exceptions.LsstException, RuntimeError) as e:
        print "skipping", dataId
        return None
        
    if ('pixelScale' in extraCols) or ('thetaNorth' in extraCols):
        wcs = cal.getWcs()
        availExtras['pixelScale']['value'] =  wcs.pixelScale().asArcseconds()
        availExtras['thetaNorth']['value'] = lsst.afw.geom.Angle(
            np.arctan2(*tuple(wcs.getLinearTransform().invert()
                              (lsst.afw.geom.Point2D(1.0,0.0)))))
    if 'visit' in extraCols:
        availExtras['visit']['value'] = dataId['visit']
    if 'ccd' in extraCols:
        availExtras['ccd']['value'] = dataId['ccd']
    if 'zeropoint' in extraCols:
        availExtras['zeropoint']['value'] = 2.5*np.log10(cal_md.get('FLUXMAG0'))

        
    if radecMatch is None:
        fakeXY, srcIndex = getFakeMatchesHeader(cal_md, sources, tol=tol)
    else:
        fakeXY, srcIndex = getFakeMatchesRaDec(sources, radecMatch, 
                                               lsst.afw.geom.Box2D(cal.getBBox(lsst.afw.image.PARENT)),
                                               cal.getWcs(), 
                                               tol=tol)

    mapper = SchemaMapper(sources.schema)
    mapper.addMinimalSchema(sources.schema)
    newSchema = mapper.getOutputSchema()
    newSchema.addField('fakeId', type=int, doc='id of fake source matched to position')
    newSchema.addField('fakeOffset', type=lsst.afw.geom.Point2D,
                       doc='offset from input fake position (pixels)')

    for extraName in set(extraCols).intersection(availExtras):
        newSchema.addField(extraName, type=availExtras[extraName]['type'],
                           doc=availExtras[extraName]['doc'])

    srcList = SourceCatalog(newSchema)
    srcList.reserve(sum([len(s) for s in srcIndex.values()]) + 
                    (0 if not includeMissing else srcIndex.values().count([])))

    centroidKey = sources.schema.find('centroid.sdss').getKey()
    for ident, sindlist in srcIndex.items():
        if includeMissing and (len(sindlist)==0):
            newRec = srcList.addNew()
            newRec.set('fakeId', ident)
            newRec.set('id', 0)
        for ss in sindlist:
            newRec = srcList.addNew()
            newRec.assign(sources[ss], mapper)
            newRec.set('fakeId', ident)
            newRec.set('fakeOffset', 
                       lsst.afw.geom.Point2D(sources[ss].get(centroidKey).getX() - 
                                             fakeXY[ident][0],
                                             sources[ss].get(centroidKey).getY() - 
                                             fakeXY[ident][1]))

    if includeMissing:
        srcList = srcList.copy(deep=True)

    for extraName in set(extraCols).intersection(availExtras):
        tempCol = srcList.get(extraName)
        tempCol.fill(availExtras[extraName]['value'])

    return srcList
Exemple #14
0
    def run(self, ccdExposure):
        """Mask negative pixels"""
        ccd = ccdExposure.getDetector()
        ccdExposure = self.convertIntToFloat(ccdExposure)

        self.updateVariance(ccdExposure, ccd[0])  # Treating as having only a single amplifier

        image = ccdExposure.getMaskedImage().getImage()
        mask = ccdExposure.getMaskedImage().getMask()
        bad = mask.getPlaneBitMask("BAD")
        if False:
            mask.getArray()[:] = numpy.where(image <= 0, bad, 0)  # XXX this causes bad things to happen

        #from lsst.afw.image.utils import clipImage
        #clipImage(image,0,10)
        #exit()


        """ transfer wcs system to TAN """
        matches = ReferenceMatchVector()
        md  = ccdExposure.getMetadata()
        wcs = ccdExposure.getWcs()

        refSchema = SimpleTable.makeMinimalSchema()
        Point2DKey.addFields(refSchema, "centroid", "centroid position", "pixel")
        refCatalog = SimpleCatalog(refSchema)
        schema = SourceTable.makeMinimalSchema()
        centroidKey = Point2DKey.addFields(schema, "centroid", "centroid position", "pixel")
        imgCatalog = SourceCatalog(schema)
        imgCatalog.defineCentroid("centroid")

#        for i in numpy.linspace(10, md.get("ZNAXIS1")-10, 20):
#            for j in numpy.linspace(10, md.get("ZNAXIS2")-10, 20):
        for i in numpy.linspace(10, 4000, 10):
            for j in numpy.linspace(10, 4000, 10):
                imgcrd = Point2D(i,j)  
                skycrd = wcs.pixelToSky(afwGeom.Point2D(i, j))
                # Create the reference catalog (with coordinates on the sky)
                refSrc = refCatalog.addNew()
                refSrc.setCoord(skycrd)
                # Create the position catalog (with positions on the image)
                imgSrc = imgCatalog.addNew()
                imgSrc.set(centroidKey, imgcrd)
                # matches
                matches.push_back(ReferenceMatch(refSrc, imgSrc, float("NaN")))

        # make initial wcs
        refPix = afwGeom.Point2D(0.5*ccdExposure.getWidth(), 0.5*ccdExposure.getHeight())
        refSky = wcs.pixelToSky(refPix)
        xPixelScale = yPixelScale = (0.2*afwGeom.arcseconds).asDegrees()
        initanWcs = afwImage.makeWcs(refSky, refPix, xPixelScale, 0.0, 0.0, yPixelScale)
        # obtain modified wcs with matched catalogs
        fitter = FitTanSipWcsTask()
        fitRes = fitter.fitWcs(
            matches = matches,
            initWcs = initanWcs,
            refCat = refCatalog,
            sourceCat = imgCatalog,
        )
        ccdExposure.setWcs(fitRes.wcs)


        """ Set zero point, ZP error, exptime """
    	zp = md.get("MAGZPT")
    	ccdExposure.getCalib().setFluxMag0(10.0**(0.4*zp))

        return lsst.pipe.base.Struct(exposure=ccdExposure)
Exemple #15
0
def getFakeSources(
    butler,
    dataId,
    tol=1.0,
    extraCols=("zeropoint", "visit", "ccd"),
    includeMissing=False,
    footprints=False,
    radecMatch=None,
    multiband=False,
    reffMatch=False,
    pix=0.168,
    minRad=None,
    raCol="RA",
    decCol="Dec",
):
    """
    Get list of sources which agree in pixel position with fake ones with tol.

    This returns a sourceCatalog of all the matched fake objects,
    note, there will be duplicates in this list, since I haven't
    checked deblend.nchild, and I'm only doing a tolerance match,
    which could include extra sources

    The outputs can include extraCols as long as they are one of:
        zeropoint, visit, ccd, thetaNorth, pixelScale

    If includeMissing is true, then the pipeline looks at the fake sources
    added in the header and includes an entry in the table for sources without
    any measurements, specifically the 'id' column will be 0

    radecMatch is the fakes table. if it's not None(default), then do an ra/dec
    match with the input catalog instead of looking in the header for where the
    sources where added
    """
    pipeVersion = dafPersist.eupsVersions.EupsVersions().versions["hscPipe"]
    if StrictVersion(pipeVersion) >= StrictVersion("3.9.0"):
        coaddData = "deepCoadd_calexp"
        coaddMeta = "deepCoadd_calexp_md"
    else:
        coaddData = "deepCoadd"
        coaddMeta = "deepCoadd_md"

    availExtras = {
        "zeropoint": {"type": float, "doc": "zeropoint"},
        "visit": {"type": int, "doc": "visit id"},
        "ccd": {"type": int, "doc": "ccd id"},
        "thetaNorth": {"type": lsst.afw.geom.Angle, "doc": "angle to north"},
        "pixelScale": {"type": float, "doc": "pixelscale in arcsec/pixel"},
    }

    if not np.in1d(extraCols, availExtras.keys()).all():
        print "extraCols must be in ", availExtras

    try:
        if "filter" not in dataId:
            sources = butler.get("src", dataId, flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS, immediate=True)
            cal = butler.get("calexp", dataId, immediate=True)
            cal_md = butler.get("calexp_md", dataId, immediate=True)
        else:
            meas = butler.get("deepCoadd_meas", dataId, flags=NO_FOOTPRINT, immediate=True)
            force = butler.get("deepCoadd_forced_src", dataId, flags=NO_FOOTPRINT, immediate=True)
            sources = combineWithForce(meas, force)
            cal = butler.get(coaddData, dataId, immediate=True)
            cal_md = butler.get(coaddMeta, dataId, immediate=True)
    except (lsst.pex.exceptions.LsstException, RuntimeError):
        print "skipping", dataId
        return None

    if ("pixelScale" in extraCols) or ("thetaNorth" in extraCols):
        wcs = cal.getWcs()
        availExtras["pixelScale"]["value"] = wcs.pixelScale().asArcseconds()
        availExtras["thetaNorth"]["value"] = lsst.afw.geom.Angle(
            np.arctan2(*tuple(wcs.getLinearTransform().invert()(lsst.afw.geom.Point2D(1.0, 0.0))))
        )
    if "visit" in extraCols:
        availExtras["visit"]["value"] = dataId["visit"]
    if "ccd" in extraCols:
        availExtras["ccd"]["value"] = dataId["ccd"]
    if "zeropoint" in extraCols:
        zeropoint = 2.5 * np.log10(cal_md.get("FLUXMAG0"))
        availExtras["zeropoint"]["value"] = zeropoint

    if radecMatch is None:
        fakeXY, srcIndex = getFakeMatchesHeader(cal_md, sources, tol=tol)
    else:
        if minRad is not None:
            print "# The min matching radius is %4.1f pixel" % minRad
        bbox = lsst.afw.geom.Box2D(cal.getBBox(lsst.afw.image.PARENT))
        fakeXY, srcIndex, srcClose = getFakeMatchesRaDec(
            sources,
            radecMatch,
            bbox,
            cal.getWcs(),
            tol=tol,
            reffMatch=reffMatch,
            pix=pix,
            minRad=minRad,
            raCol=raCol,
            decCol=decCol,
        )

    mapper = SchemaMapper(sources.schema)
    mapper.addMinimalSchema(sources.schema)
    newSchema = mapper.getOutputSchema()
    newSchema.addField("fakeId", type=int, doc="id of fake source matched to position")
    newSchema.addField("nMatched", type=int, doc="Number of matched objects")
    newSchema.addField("nPrimary", type=int, doc="Number of unique matched objects")
    newSchema.addField("nNoChild", type=int, doc="Number of matched objects with nchild==0")
    newSchema.addField("rMatched", type=float, doc="Radius used form atching obects, in pixel")
    newSchema.addField("fakeOffX", type=float, doc="offset from input fake position in X (pixels)")
    newSchema.addField("fakeOffY", type=float, doc="offset from input fake position in Y (pixels)")
    newSchema.addField("fakeOffR", type=float, doc="offset from input fake position in radius")
    newSchema.addField("fakeClosest", type="Flag", doc="Is this match the closest one?")

    for extraName in set(extraCols).intersection(availExtras):
        newSchema.addField(extraName, type=availExtras[extraName]["type"], doc=availExtras[extraName]["doc"])

    srcList = SourceCatalog(newSchema)
    srcList.reserve(
        sum([len(s) for s in srcIndex.values()]) + (0 if not includeMissing else srcIndex.values().count([]))
    )

    centroidKey = sources.schema.find("centroid.sdss").getKey()
    isPrimary = sources.schema.find("detect.is-primary").getKey()
    nChild = sources.schema.find("force.deblend.nchild").getKey()
    for ident, sindlist in srcIndex.items():
        rMatched = fakeXY[ident][2]
        if minRad is not None:
            if rMatched < minRad:
                rMatched = minRad
        nMatched = len(sindlist)
        nPrimary = np.sum([sources[obj].get(isPrimary) for obj in sindlist])
        nNoChild = np.sum([(sources[obj].get(nChild) == 0) for obj in sindlist])
        if includeMissing and (nMatched == 0):
            newRec = srcList.addNew()
            newRec.set("fakeId", ident)
            newRec.set("id", 0)
            newRec.set("nMatched", 0)
            newRec.set("rMatched", rMatched)
        for ss in sindlist:
            newRec = srcList.addNew()
            newRec.assign(sources[ss], mapper)
            newRec.set("fakeId", ident)
            newRec.set("nMatched", nMatched)
            newRec.set("nPrimary", nPrimary)
            newRec.set("nNoChild", nNoChild)
            newRec.set("rMatched", rMatched)
            offsetX = sources[ss].get(centroidKey).getX() - fakeXY[ident][0]
            newRec.set("fakeOffX", offsetX)
            offsetY = sources[ss].get(centroidKey).getY() - fakeXY[ident][1]
            newRec.set("fakeOffY", offsetY)
            newRec.set("fakeOffR", np.sqrt(offsetX ** 2.0 + offsetY ** 2.0))
            if radecMatch:
                if ss == srcClose[ident]:
                    newRec.set("fakeClosest", True)
                else:
                    newRec.set("fakeClosest", False)

    if includeMissing:
        srcList = srcList.copy(deep=True)

    for extraName in set(extraCols).intersection(availExtras):
        tempCol = srcList.get(extraName)
        tempCol.fill(availExtras[extraName]["value"])

    return srcList
Exemple #16
0
def getFakeSources(butler,
                   dataId,
                   tol=1.0,
                   extraCols=('zeropoint', 'visit', 'ccd'),
                   includeMissing=False,
                   footprints=False,
                   radecMatch=None,
                   multiband=False,
                   reffMatch=False,
                   pix=0.168,
                   minRad=None,
                   raCol='RA',
                   decCol='Dec'):
    """
    Get list of sources which agree in pixel position with fake ones with tol.

    This returns a sourceCatalog of all the matched fake objects,
    note, there will be duplicates in this list, since I haven't
    checked deblend.nchild, and I'm only doing a tolerance match,
    which could include extra sources

    The outputs can include extraCols as long as they are one of:
        zeropoint, visit, ccd, thetaNorth, pixelScale

    If includeMissing is true, then the pipeline looks at the fake sources
    added in the header and includes an entry in the table for sources without
    any measurements, specifically the 'id' column will be 0

    radecMatch is the fakes table. if it's not None(default), then do an ra/dec
    match with the input catalog instead of looking in the header for where the
    sources where added
    """
    coaddData = "deepCoadd_calexp"
    coaddMeta = "deepCoadd_calexp_md"

    availExtras = {
        'zeropoint': {
            'type': float,
            'doc': 'zeropoint'
        },
        'visit': {
            'type': int,
            'doc': 'visit id'
        },
        'ccd': {
            'type': int,
            'doc': 'ccd id'
        },
        'thetaNorth': {
            'type': lsst.afw.geom.Angle,
            'doc': 'angle to north'
        },
        'pixelScale': {
            'type': float,
            'doc': 'pixelscale in arcsec/pixel'
        }
    }

    if not np.in1d(extraCols, list(availExtras.keys())).all():
        print("extraCols must be in ", availExtras)

    try:
        if 'filter' not in dataId:
            sources = butler.get('src',
                                 dataId,
                                 flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS,
                                 immediate=True)
            cal = butler.get('calexp', dataId, immediate=True)
            cal_md = butler.get('calexp_md', dataId, immediate=True)
        else:
            meas = butler.get('deepCoadd_meas',
                              dataId,
                              flags=NO_FOOTPRINT,
                              immediate=True)
            force = butler.get('deepCoadd_forced_src',
                               dataId,
                               flags=NO_FOOTPRINT,
                               immediate=True)
            sources = combineWithForce(meas, force)
            cal = butler.get(coaddData, dataId, immediate=True)
            cal_md = butler.get(coaddMeta, dataId, immediate=True)
    except RuntimeError:
        print("skipping", dataId)
        return None

    if ('pixelScale' in extraCols) or ('thetaNorth' in extraCols):
        wcs = cal.getWcs()
        availExtras['pixelScale']['value'] = wcs.getPixelScale().asArcseconds()
        # The 8 lines of code below find the angle to north, first the mid pixel of the calexp is found,
        # then the pixel to sky matrix at this point, the coordinate this gives can then be used to find the
        # linearized sky to pixel matrix which can then be used to find the angle.
        xMid = cal.getWidth() // 2
        yMid = cal.getHeight() // 2
        midPoint = lsst.afw.geom.Point2D(xMid, yMid)
        midCoord = wcs.pixelToSky(midPoint)
        northSkyToPixelMatrix = wcs.linearizeSkyToPixel(
            midCoord, lsst.afw.geom.degrees)
        northSkyToPixelMatrix = northSkyToPixelMatrix.getLinear()
        availExtras['thetaNorth']['value'] = (np.arctan2(
            *tuple(northSkyToPixelMatrix(lsst.afw.geom.Point2D(1.0, 0.0))))
                                              ) * lsst.afw.geom.radians

    if 'visit' in extraCols:
        availExtras['visit']['value'] = dataId['visit']
    if 'ccd' in extraCols:
        availExtras['ccd']['value'] = dataId['ccd']
    if 'zeropoint' in extraCols:
        zeropoint = 2.5 * np.log10(cal_md.getScalar('FLUXMAG0'))
        availExtras['zeropoint']['value'] = zeropoint

    if radecMatch is None:
        fakeXY, srcIndex = getFakeMatchesHeader(cal_md, sources, tol=tol)
    else:
        if minRad is not None:
            print("# The min matching radius is %4.1f pixel" % minRad)
        bbox = lsst.afw.geom.Box2D(cal.getBBox(lsst.afw.image.PARENT))
        fakeXY, srcIndex, srcClose = getFakeMatchesRaDec(sources,
                                                         radecMatch,
                                                         bbox,
                                                         cal.getWcs(),
                                                         tol=tol,
                                                         reffMatch=reffMatch,
                                                         pix=pix,
                                                         minRad=minRad,
                                                         raCol=raCol,
                                                         decCol=decCol)

    mapper = SchemaMapper(sources.schema)
    mapper.addMinimalSchema(sources.schema)
    newSchema = mapper.getOutputSchema()
    newSchema.addField('fakeId',
                       type=np.int32,
                       doc='id of fake source matched to position')
    newSchema.addField('nMatched',
                       type=np.int32,
                       doc='Number of matched objects')
    newSchema.addField('nPrimary',
                       type=np.int32,
                       doc='Number of unique matched objects')
    newSchema.addField('nNoChild',
                       type=np.int32,
                       doc='Number of matched objects with nchild==0')
    newSchema.addField('rMatched',
                       type=float,
                       doc='Radius used form atching obects, in pixel')
    newSchema.addField('fakeOffX',
                       type=float,
                       doc='offset from input fake position in X (pixels)')
    newSchema.addField('fakeOffY',
                       type=float,
                       doc='offset from input fake position in Y (pixels)')
    newSchema.addField('fakeOffR',
                       type=float,
                       doc='offset from input fake position in radius')
    newSchema.addField('fakeClosest',
                       type="Flag",
                       doc='Is this match the closest one?')

    for extraName in set(extraCols).intersection(availExtras):
        newSchema.addField(extraName,
                           type=availExtras[extraName]['type'],
                           doc=availExtras[extraName]['doc'])

    srcList = SourceCatalog(newSchema)
    srcList.reserve(
        sum([len(s) for s in srcIndex.values()]) +
        (0 if not includeMissing else list(srcIndex.values()).count([])))

    centroidKey = sources.getCentroidKey()
    isPrimary = sources.schema.find('detect_isPrimary').getKey()
    nChild = sources.schema.find('force_deblend_nChild').getKey()
    for ident, sindlist in srcIndex.items():
        rMatched = fakeXY[ident][2]
        if minRad is not None:
            if rMatched < minRad:
                rMatched = minRad
        nMatched = len(sindlist)
        nPrimary = np.sum(
            [sources[int(obj)].get(isPrimary) for obj in sindlist])
        nNoChild = np.sum([(sources[int(obj)].get(nChild) == 0)
                           for obj in sindlist])
        if includeMissing and (nMatched == 0):
            newRec = srcList.addNew()
            newRec.set('fakeId', ident)
            newRec.set('id', 0)
            newRec.set('nMatched', 0)
            newRec.set('rMatched', rMatched)
        for ss in sindlist:
            newRec = srcList.addNew()
            newRec.assign(sources[int(ss)], mapper)
            newRec.set('fakeId', ident)
            newRec.set('nMatched', nMatched)
            newRec.set('nPrimary', nPrimary)
            newRec.set('nNoChild', nNoChild)
            newRec.set('rMatched', rMatched)
            offsetX = (sources[int(ss)].get(centroidKey).getX() -
                       fakeXY[ident][0])
            newRec.set('fakeOffX', offsetX)
            offsetY = (sources[int(ss)].get(centroidKey).getY() -
                       fakeXY[ident][1])
            newRec.set('fakeOffY', offsetY)
            newRec.set('fakeOffR', np.sqrt(offsetX**2.0 + offsetY**2.0))
            if radecMatch:
                if int(ss) == int(srcClose[ident]):
                    newRec.set('fakeClosest', True)
                else:
                    newRec.set('fakeClosest', False)

    if includeMissing:
        srcList = srcList.copy(deep=True)

    for extraName in set(extraCols).intersection(availExtras):
        tempCol = srcList.get(extraName)
        tempCol.fill(availExtras[extraName]['value'])

    return srcList
Exemple #17
0
def getFakeSources(butler,
                   dataId,
                   tol=1.0,
                   extraCols=('zeropoint', 'visit', 'ccd'),
                   includeMissing=False,
                   footprints=False,
                   radecMatch=None):
    """Get list of sources which agree in pixel position with fake ones with tol
    
    this returns a sourceCatalog of all the matched fake objects,
    note, there will be duplicates in this list, since I haven't checked deblend.nchild,
    and I'm only doing a tolerance match, which could include extra sources
    
    the outputs can include extraCols as long as they are one of:
      zeropoint, visit, ccd, thetaNorth, pixelScale

    if includeMissing is true, then the pipeline looks at the fake sources
    added in the header and includes an entry in the table for sources without
    any measurements, specifically the 'id' column will be 0

    radecMatch is the fakes table. if it's not None(default), then do an ra/dec 
    match with the input catalog instead of looking in the header for where the 
    sources where added
    """

    availExtras = {
        'zeropoint': {
            'type': float,
            'doc': 'zeropoint'
        },
        'visit': {
            'type': int,
            'doc': 'visit id'
        },
        'ccd': {
            'type': int,
            'doc': 'ccd id'
        },
        'thetaNorth': {
            'type': lsst.afw.geom.Angle,
            'doc': 'angle to north'
        },
        'pixelScale': {
            'type': float,
            'doc': 'pixelscale in arcsec/pixel'
        }
    }

    if not np.in1d(extraCols, availExtras.keys()).all():
        print "extraCols must be in ", availExtras

    try:
        if not 'filter' in dataId:
            sources = butler.get('src',
                                 dataId,
                                 flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS,
                                 immediate=True)
            cal = butler.get('calexp', dataId, immediate=True)
            cal_md = butler.get('calexp_md', dataId, immediate=True)
        else:
            sources = butler.get('deepCoadd_src',
                                 dataId,
                                 flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS,
                                 immediate=True)
            cal = butler.get('deepCoadd', dataId, immediate=True)
            cal_md = butler.get('deepCoadd_md', dataId, immediate=True)
    except (lsst.pex.exceptions.LsstException, RuntimeError) as e:
        print "skipping", dataId
        return None

    if ('pixelScale' in extraCols) or ('thetaNorth' in extraCols):
        wcs = cal.getWcs()
        availExtras['pixelScale']['value'] = wcs.pixelScale().asArcseconds()
        availExtras['thetaNorth']['value'] = lsst.afw.geom.Angle(
            np.arctan2(*tuple(wcs.getLinearTransform().invert()(
                lsst.afw.geom.Point2D(1.0, 0.0)))))
    if 'visit' in extraCols:
        availExtras['visit']['value'] = dataId['visit']
    if 'ccd' in extraCols:
        availExtras['ccd']['value'] = dataId['ccd']
    if 'zeropoint' in extraCols:
        availExtras['zeropoint']['value'] = 2.5 * np.log10(
            cal_md.get('FLUXMAG0'))

    if radecMatch is None:
        fakeXY, srcIndex = getFakeMatchesHeader(cal_md, sources, tol=tol)
    else:
        fakeXY, srcIndex = getFakeMatchesRaDec(
            sources,
            radecMatch,
            lsst.afw.geom.Box2D(cal.getBBox(lsst.afw.image.PARENT)),
            cal.getWcs(),
            tol=tol)

    mapper = SchemaMapper(sources.schema)
    mapper.addMinimalSchema(sources.schema)
    newSchema = mapper.getOutputSchema()
    newSchema.addField('fakeId',
                       type=int,
                       doc='id of fake source matched to position')
    newSchema.addField('fakeOffset',
                       type=lsst.afw.geom.Point2D,
                       doc='offset from input fake position (pixels)')

    for extraName in set(extraCols).intersection(availExtras):
        newSchema.addField(extraName,
                           type=availExtras[extraName]['type'],
                           doc=availExtras[extraName]['doc'])

    srcList = SourceCatalog(newSchema)
    srcList.reserve(
        sum([len(s) for s in srcIndex.values()]) +
        (0 if not includeMissing else srcIndex.values().count([])))

    centroidKey = sources.schema.find('centroid.sdss').getKey()
    for ident, sindlist in srcIndex.items():
        if includeMissing and (len(sindlist) == 0):
            newRec = srcList.addNew()
            newRec.set('fakeId', ident)
            newRec.set('id', 0)
        for ss in sindlist:
            newRec = srcList.addNew()
            newRec.assign(sources[ss], mapper)
            newRec.set('fakeId', ident)
            newRec.set(
                'fakeOffset',
                lsst.afw.geom.Point2D(
                    sources[ss].get(centroidKey).getX() - fakeXY[ident][0],
                    sources[ss].get(centroidKey).getY() - fakeXY[ident][1]))

    if includeMissing:
        srcList = srcList.copy(deep=True)

    for extraName in set(extraCols).intersection(availExtras):
        tempCol = srcList.get(extraName)
        tempCol.fill(availExtras[extraName]['value'])

    return srcList